mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
Fixed im2col mistake >< face#palm
This commit is contained in:
parent
e92f7d301c
commit
d7d7da2653
2
Makefile
2
Makefile
@ -1,6 +1,6 @@
|
|||||||
GPU=1
|
GPU=1
|
||||||
DEBUG=0
|
DEBUG=0
|
||||||
ARCH= -arch=sm_35
|
ARCH= -arch=sm_50
|
||||||
|
|
||||||
VPATH=./src/
|
VPATH=./src/
|
||||||
EXEC=darknet
|
EXEC=darknet
|
||||||
|
@ -37,9 +37,9 @@ __global__ void col2im_gpu_kernel(const int n, const float* data_col,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void col2im_ongpu(float *im,
|
void col2im_ongpu(float *data_col,
|
||||||
int channels, int height, int width,
|
int channels, int height, int width,
|
||||||
int ksize, int stride, int pad, float *data_col){
|
int ksize, int stride, int pad, float *data_im){
|
||||||
// We are going to launch channels * height_col * width_col kernels, each
|
// We are going to launch channels * height_col * width_col kernels, each
|
||||||
// kernel responsible for copying a single-channel grid.
|
// kernel responsible for copying a single-channel grid.
|
||||||
pad = pad ? ksize/2 : 0;
|
pad = pad ? ksize/2 : 0;
|
||||||
@ -50,7 +50,7 @@ void col2im_ongpu(float *im,
|
|||||||
BLOCK>>>(
|
BLOCK>>>(
|
||||||
num_kernels, data_col, height, width, ksize, pad,
|
num_kernels, data_col, height, width, ksize, pad,
|
||||||
stride, height_col,
|
stride, height_col,
|
||||||
width_col, im);
|
width_col, data_im);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -26,7 +26,7 @@ void bias_output_gpu(float *output, float *biases, int batch, int n, int size)
|
|||||||
check_error(cudaPeekAtLastError());
|
check_error(cudaPeekAtLastError());
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size, float scale)
|
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
|
||||||
{
|
{
|
||||||
__shared__ float part[BLOCK];
|
__shared__ float part[BLOCK];
|
||||||
int i,b;
|
int i,b;
|
||||||
@ -42,13 +42,13 @@ __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batc
|
|||||||
part[p] = sum;
|
part[p] = sum;
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
if(p == 0){
|
if(p == 0){
|
||||||
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += scale * part[i];
|
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
|
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
|
||||||
{
|
{
|
||||||
backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size, 1);
|
backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size);
|
||||||
check_error(cudaPeekAtLastError());
|
check_error(cudaPeekAtLastError());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ void train_detection(char *cfgfile, char *weightfile)
|
|||||||
{
|
{
|
||||||
char *base = basecfg(cfgfile);
|
char *base = basecfg(cfgfile);
|
||||||
printf("%s\n", base);
|
printf("%s\n", base);
|
||||||
float avg_loss = 1;
|
float avg_loss = -1;
|
||||||
network net = parse_network_cfg(cfgfile);
|
network net = parse_network_cfg(cfgfile);
|
||||||
if(weightfile){
|
if(weightfile){
|
||||||
load_weights(&net, weightfile);
|
load_weights(&net, weightfile);
|
||||||
@ -84,6 +84,7 @@ void train_detection(char *cfgfile, char *weightfile)
|
|||||||
time=clock();
|
time=clock();
|
||||||
float loss = train_network(net, train);
|
float loss = train_network(net, train);
|
||||||
net.seen += imgs;
|
net.seen += imgs;
|
||||||
|
if (avg_loss < 0) avg_loss = loss;
|
||||||
avg_loss = avg_loss*.9 + loss*.1;
|
avg_loss = avg_loss*.9 + loss*.1;
|
||||||
printf("%d: %f, %f avg, %lf seconds, %d images\n", i, loss, avg_loss, sec(clock()-time), i*imgs);
|
printf("%d: %f, %f avg, %lf seconds, %d images\n", i, loss, avg_loss, sec(clock()-time), i*imgs);
|
||||||
if(i%100==0){
|
if(i%100==0){
|
||||||
@ -109,8 +110,8 @@ void validate_detection(char *cfgfile, char *weightfile)
|
|||||||
char **paths = (char **)list_to_array(plist);
|
char **paths = (char **)list_to_array(plist);
|
||||||
int im_size = 448;
|
int im_size = 448;
|
||||||
int classes = 20;
|
int classes = 20;
|
||||||
int background = 1;
|
int background = 0;
|
||||||
int nuisance = 0;
|
int nuisance = 1;
|
||||||
int num_output = 7*7*(4+classes+background+nuisance);
|
int num_output = 7*7*(4+classes+background+nuisance);
|
||||||
|
|
||||||
int m = plist->size;
|
int m = plist->size;
|
||||||
@ -137,7 +138,7 @@ void validate_detection(char *cfgfile, char *weightfile)
|
|||||||
for(j = 0; j < pred.rows; ++j){
|
for(j = 0; j < pred.rows; ++j){
|
||||||
for(k = 0; k < pred.cols; k += classes+4+background+nuisance){
|
for(k = 0; k < pred.cols; k += classes+4+background+nuisance){
|
||||||
float scale = 1.;
|
float scale = 1.;
|
||||||
if(nuisance) scale = pred.vals[j][k];
|
if(nuisance) scale = 1.-pred.vals[j][k];
|
||||||
for(class = 0; class < classes; ++class){
|
for(class = 0; class < classes; ++class){
|
||||||
int index = (k)/(classes+4+background+nuisance);
|
int index = (k)/(classes+4+background+nuisance);
|
||||||
int r = index/7;
|
int r = index/7;
|
||||||
|
@ -93,6 +93,19 @@ void forward_detection_layer(const detection_layer layer, network_state state)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
int count = 0;
|
||||||
|
for(i = 0; i < layer.batch*locations; ++i){
|
||||||
|
for(j = 0; j < layer.classes+layer.background; ++j){
|
||||||
|
printf("%f, ", layer.output[count++]);
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
for(j = 0; j < layer.coords; ++j){
|
||||||
|
printf("%f, ", layer.output[count++]);
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
/*
|
||||||
if(layer.background || 1){
|
if(layer.background || 1){
|
||||||
for(i = 0; i < layer.batch*locations; ++i){
|
for(i = 0; i < layer.batch*locations; ++i){
|
||||||
int index = i*(layer.classes+layer.coords+layer.background);
|
int index = i*(layer.classes+layer.coords+layer.background);
|
||||||
@ -123,8 +136,9 @@ void backward_detection_layer(const detection_layer layer, network_state state)
|
|||||||
state.delta[in_i++] = scale*layer.delta[out_i++];
|
state.delta[in_i++] = scale*layer.delta[out_i++];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (layer.nuisance) ;
|
if (layer.nuisance) {
|
||||||
else if (layer.background) gradient_array(layer.output + out_i, layer.coords, LOGISTIC, layer.delta + out_i);
|
|
||||||
|
}else if (layer.background) gradient_array(layer.output + out_i, layer.coords, LOGISTIC, layer.delta + out_i);
|
||||||
for(j = 0; j < layer.coords; ++j){
|
for(j = 0; j < layer.coords; ++j){
|
||||||
state.delta[in_i++] = layer.delta[out_i++];
|
state.delta[in_i++] = layer.delta[out_i++];
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,11 @@ void forward_dropout_layer_gpu(dropout_layer layer, network_state state)
|
|||||||
if (!state.train) return;
|
if (!state.train) return;
|
||||||
int size = layer.inputs*layer.batch;
|
int size = layer.inputs*layer.batch;
|
||||||
cuda_random(layer.rand_gpu, size);
|
cuda_random(layer.rand_gpu, size);
|
||||||
|
int i;
|
||||||
|
for(i = 0; i < size; ++i){
|
||||||
|
layer.rand[i] = rand_uniform();
|
||||||
|
}
|
||||||
|
cuda_push_array(layer.rand_gpu, layer.rand, size);
|
||||||
|
|
||||||
yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(state.input, size, layer.rand_gpu, layer.probability, layer.scale);
|
yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(state.input, size, layer.rand_gpu, layer.probability, layer.scale);
|
||||||
check_error(cudaPeekAtLastError());
|
check_error(cudaPeekAtLastError());
|
||||||
|
@ -71,6 +71,7 @@ void backward_network_gpu(network net, network_state state)
|
|||||||
state.input = get_network_output_gpu_layer(net, i-1);
|
state.input = get_network_output_gpu_layer(net, i-1);
|
||||||
state.delta = get_network_delta_gpu_layer(net, i-1);
|
state.delta = get_network_delta_gpu_layer(net, i-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(net.types[i] == CONVOLUTIONAL){
|
if(net.types[i] == CONVOLUTIONAL){
|
||||||
backward_convolutional_layer_gpu(*(convolutional_layer *)net.layers[i], state);
|
backward_convolutional_layer_gpu(*(convolutional_layer *)net.layers[i], state);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user