From e7d43fd65ddc476469ee8d24140835c1e0159fa6 Mon Sep 17 00:00:00 2001 From: Joseph Redmon Date: Mon, 30 Nov 2015 15:04:09 -0800 Subject: [PATCH] rolling avg demo --- cfg/darknet.cfg | 16 ++- data/labels/.make_labels.py.swp | Bin 12288 -> 0 bytes src/coco.c | 21 +-- src/coco_kernels.cu | 33 ++++- src/local_kernels.cu | 230 -------------------------------- src/utils.c | 15 +++ src/utils.h | 1 + 7 files changed, 66 insertions(+), 250 deletions(-) delete mode 100644 data/labels/.make_labels.py.swp delete mode 100644 src/local_kernels.cu diff --git a/cfg/darknet.cfg b/cfg/darknet.cfg index 00e9c366..53d1ec98 100644 --- a/cfg/darknet.cfg +++ b/cfg/darknet.cfg @@ -7,11 +7,10 @@ channels=3 momentum=0.9 decay=0.0005 -learning_rate=0.01 -policy=sigmoid -gamma=.00002 -step=400000 -max_batches=800000 +learning_rate=0.1 +policy=poly +power=4 +max_batches=500000 [crop] crop_height=224 @@ -22,6 +21,7 @@ saturation=1 exposure=1 [convolutional] +batch_normalize=1 filters=16 size=3 stride=1 @@ -33,6 +33,7 @@ size=2 stride=2 [convolutional] +batch_normalize=1 filters=32 size=3 stride=1 @@ -44,6 +45,7 @@ size=2 stride=2 [convolutional] +batch_normalize=1 filters=64 size=3 stride=1 @@ -55,6 +57,7 @@ size=2 stride=2 [convolutional] +batch_normalize=1 filters=128 size=3 stride=1 @@ -66,6 +69,7 @@ size=2 stride=2 [convolutional] +batch_normalize=1 filters=256 size=3 stride=1 @@ -77,6 +81,7 @@ size=2 stride=2 [convolutional] +batch_normalize=1 filters=512 size=3 stride=1 @@ -88,6 +93,7 @@ size=2 stride=2 [convolutional] +batch_normalize=1 filters=1024 size=3 stride=1 diff --git a/data/labels/.make_labels.py.swp b/data/labels/.make_labels.py.swp deleted file mode 100644 index 2dbb50ee51837fccd560433dee95e01136fe7539..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI2F^?TJ5P%&Dnt*~xoiHqlltg)V2}DYWghWe0$x(19vc2A2^SmC*_P(cg1^ftp z2GsmBsG0G;cL<6cZKB!Jdd41m?D=MV(&^Sbz4+!^cyabz(esf~zy5Y{@%Y~F>W}Z0 zNt3(=SvuV8 zS7rzSA#ekMr4E~yAF3}tfA*ATe%5^ocR&8_hDBl)0zyCt2mv7=1cZPP5CTHrT_RA^ zE%gfp^xiS3_mA?{f8?<%LI?-}As_^VfDjM@LO=)z0U;m+gn$ru3km36sjqJ0C-Xdcgn$qb0zyCt2mv7=1cZPP_`d{zA1r6(P^z0B zb{6B#LRcFefM9`=*3jz%!|E?W$k|2+iHX(`4E!%~;5Hxcu!18fp{ z`oh*pDCP-a_!>#Qni%2h*X? zI&!yEOi=vDD-(Sk?BfWttB&_GF4~Eu&pip-{Pj4JK4C7rb(x$?Y{NaZF1R$UevNBK zzH%mOvW!0KVQ7aX)QMgiTxOG92yCZSz0vHs`sR1>)#|pzkbN<(4Q@RLS|aW#xOS$J zFxLV`hd`DsrH+K5g*-yT9O89%47gkU=<2RLEO|KT)y3!wWICQ}E3wwbfA6D%G3dhk z+l3>h+;I%s=v!EHkTqwx=aU#6>4y}ZQu9zKz`buck^N1ukDSP=P20Sp4=td&BV*8sl1|Eu zOCy{eRTlf+y2A;DrNls*uoc+sZN|NB@Cae6GSqaR_#nkc29X^lYIIpsjZ)Vj@~92i zQoUPqS*$OGN{1|T=>7o5GU1q@&%QaVh;=e~DO7vVX_xU>=j3MWVL@0(tk_85N`)h#BOvN>Rl}*QVZCa`GjX`Skfb%(N7itveo}CtRKe*ElCqmI QdPec1Qhw$trC&noZ!%4#cmMzZ diff --git a/src/coco.c b/src/coco.c index 17d06540..b532d623 100644 --- a/src/coco.c +++ b/src/coco.c @@ -385,11 +385,15 @@ void test_coco(char *cfgfile, char *weightfile, char *filename, float thresh) } } -#ifdef OPENCV -#ifdef GPU -void demo_coco(char *cfgfile, char *weightfile, float thresh, int cam_index); -#endif -#endif +void demo_coco(char *cfgfile, char *weightfile, float thresh, int cam_index, char *filename); +static void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, char* filename) +{ + #if defined(OPENCV) && defined(GPU) + demo_coco(cfgfile, weightfile, thresh, cam_index, filename); + #else + fprintf(stderr, "Need to compile with GPU and OpenCV for demo.\n"); + #endif +} void run_coco(int argc, char **argv) { @@ -401,6 +405,7 @@ void run_coco(int argc, char **argv) } float thresh = find_float_arg(argc, argv, "-thresh", .2); int cam_index = find_int_arg(argc, argv, "-c", 0); + char *file = find_char_arg(argc, argv, "-file", 0); if(argc < 4){ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); @@ -414,9 +419,5 @@ void run_coco(int argc, char **argv) else if(0==strcmp(argv[2], "train")) train_coco(cfg, weights); else if(0==strcmp(argv[2], "valid")) validate_coco(cfg, weights); else if(0==strcmp(argv[2], "recall")) validate_coco_recall(cfg, weights); -#ifdef OPENCV -#ifdef GPU - else if(0==strcmp(argv[2], "demo")) demo_coco(cfg, weights, thresh, cam_index); -#endif -#endif + else if(0==strcmp(argv[2], "demo")) demo(cfg, weights, thresh, cam_index, file); } diff --git a/src/coco_kernels.cu b/src/coco_kernels.cu index 298bc34a..0a5f8407 100644 --- a/src/coco_kernels.cu +++ b/src/coco_kernels.cu @@ -34,6 +34,12 @@ static cv::VideoCapture cap; static float fps = 0; static float demo_thresh = 0; +static const int frames = 3; +static float *predictions[frames]; +static int demo_index = 0; +static image images[frames]; +static float *avg; + void *fetch_in_thread_coco(void *ptr) { cv::Mat frame_m; @@ -51,19 +57,28 @@ void *detect_in_thread_coco(void *ptr) detection_layer l = net.layers[net.n-1]; float *X = det_s.data; - float *predictions = network_predict(net, X); + float *prediction = network_predict(net, X); + + memcpy(predictions[demo_index], prediction, l.outputs*sizeof(float)); + mean_arrays(predictions, frames, l.outputs, avg); + free_image(det_s); - convert_coco_detections(predictions, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0); + convert_coco_detections(avg, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0); if (nms > 0) do_nms(boxes, probs, l.side*l.side*l.n, l.classes, nms); printf("\033[2J"); printf("\033[1;1H"); printf("\nFPS:%.0f\n",fps); printf("Objects:\n\n"); + + images[demo_index] = det; + det = images[(demo_index + frames/2 + 1)%frames]; + demo_index = (demo_index + 1)%frames; + draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, coco_classes, coco_labels, 80); return 0; } -extern "C" void demo_coco(char *cfgfile, char *weightfile, float thresh, int cam_index) +extern "C" void demo_coco(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename) { demo_thresh = thresh; printf("YOLO demo\n"); @@ -75,13 +90,21 @@ extern "C" void demo_coco(char *cfgfile, char *weightfile, float thresh, int cam srand(2222222); - cv::VideoCapture cam(cam_index); - cap = cam; + if(filename){ + cap.open(filename); + }else{ + cap.open(cam_index); + } + if(!cap.isOpened()) error("Couldn't connect to webcam.\n"); detection_layer l = net.layers[net.n-1]; int j; + avg = (float *) calloc(l.outputs, sizeof(float)); + for(j = 0; j < frames; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float)); + for(j = 0; j < frames; ++j) images[j] = make_image(1,1,3); + boxes = (box *)calloc(l.side*l.side*l.n, sizeof(box)); probs = (float **)calloc(l.side*l.side*l.n, sizeof(float *)); for(j = 0; j < l.side*l.side*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float *)); diff --git a/src/local_kernels.cu b/src/local_kernels.cu deleted file mode 100644 index 14e5a0ed..00000000 --- a/src/local_kernels.cu +++ /dev/null @@ -1,230 +0,0 @@ -#include "cuda_runtime.h" -#include "curand.h" -#include "cublas_v2.h" - -extern "C" { -#include "local_layer.h" -#include "gemm.h" -#include "blas.h" -#include "im2col.h" -#include "col2im.h" -#include "utils.h" -#include "cuda.h" -} - -__global__ void scale_bias_kernel(float *output, float *biases, int n, int size) -{ - int offset = blockIdx.x * blockDim.x + threadIdx.x; - int filter = blockIdx.y; - int batch = blockIdx.z; - - if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; -} - -void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) -{ - dim3 dimGrid((size-1)/BLOCK + 1, n, batch); - dim3 dimBlock(BLOCK, 1, 1); - - scale_bias_kernel<<>>(output, biases, n, size); - check_error(cudaPeekAtLastError()); -} - -__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) -{ - __shared__ float part[BLOCK]; - int i,b; - int filter = blockIdx.x; - int p = threadIdx.x; - float sum = 0; - for(b = 0; b < batch; ++b){ - for(i = 0; i < size; i += BLOCK){ - int index = p + i + size*(filter + n*b); - sum += (p+i < size) ? delta[index]*x_norm[index] : 0; - } - } - part[p] = sum; - __syncthreads(); - if (p == 0) { - for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; - } -} - -void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) -{ - backward_scale_kernel<<>>(x_norm, delta, batch, n, size, scale_updates); - check_error(cudaPeekAtLastError()); -} - -__global__ void add_bias_kernel(float *output, float *biases, int n, int size) -{ - int offset = blockIdx.x * blockDim.x + threadIdx.x; - int filter = blockIdx.y; - int batch = blockIdx.z; - - if(offset < size) output[(batch*n+filter)*size + offset] += biases[filter]; -} - -void add_bias_gpu(float *output, float *biases, int batch, int n, int size) -{ - dim3 dimGrid((size-1)/BLOCK + 1, n, batch); - dim3 dimBlock(BLOCK, 1, 1); - - add_bias_kernel<<>>(output, biases, n, size); - check_error(cudaPeekAtLastError()); -} - -__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) -{ - __shared__ float part[BLOCK]; - int i,b; - int filter = blockIdx.x; - int p = threadIdx.x; - float sum = 0; - for(b = 0; b < batch; ++b){ - for(i = 0; i < size; i += BLOCK){ - int index = p + i + size*(filter + n*b); - sum += (p+i < size) ? delta[index] : 0; - } - } - part[p] = sum; - __syncthreads(); - if (p == 0) { - for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; - } -} - -void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) -{ - backward_bias_kernel<<>>(bias_updates, delta, batch, n, size); - check_error(cudaPeekAtLastError()); -} - -void forward_local_layer_gpu(local_layer l, network_state state) -{ - int i; - int m = l.n; - int k = l.size*l.size*l.c; - int n = local_out_height(l)* - local_out_width(l); - - fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); - for(i = 0; i < l.batch; ++i){ - im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.col_image_gpu); - float * a = l.filters_gpu; - float * b = l.col_image_gpu; - float * c = l.output_gpu; - gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n); - } - - if(l.batch_normalize){ - if(state.train){ - fast_mean_gpu(l.output_gpu, l.batch, l.n, l.out_h*l.out_w, l.spatial_mean_gpu, l.mean_gpu); - fast_variance_gpu(l.output_gpu, l.mean_gpu, l.batch, l.n, l.out_h*l.out_w, l.spatial_variance_gpu, l.variance_gpu); - - scal_ongpu(l.n, .95, l.rolling_mean_gpu, 1); - axpy_ongpu(l.n, .05, l.mean_gpu, 1, l.rolling_mean_gpu, 1); - scal_ongpu(l.n, .95, l.rolling_variance_gpu, 1); - axpy_ongpu(l.n, .05, l.variance_gpu, 1, l.rolling_variance_gpu, 1); - - // cuda_pull_array(l.variance_gpu, l.mean, l.n); - // printf("%f\n", l.mean[0]); - - copy_ongpu(l.outputs*l.batch, l.output_gpu, 1, l.x_gpu, 1); - normalize_gpu(l.output_gpu, l.mean_gpu, l.variance_gpu, l.batch, l.n, l.out_h*l.out_w); - copy_ongpu(l.outputs*l.batch, l.output_gpu, 1, l.x_norm_gpu, 1); - } else { - normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.n, l.out_h*l.out_w); - } - - scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.n, l.out_h*l.out_w); - } - add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, n); - - activate_array_ongpu(l.output_gpu, m*n*l.batch, l.activation); -} - -void backward_local_layer_gpu(local_layer l, network_state state) -{ - int i; - int m = l.n; - int n = l.size*l.size*l.c; - int k = local_out_height(l)* - local_out_width(l); - - gradient_array_ongpu(l.output_gpu, m*k*l.batch, l.activation, l.delta_gpu); - - backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, k); - - if(l.batch_normalize){ - backward_scale_gpu(l.x_norm_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h, l.scale_updates_gpu); - - scale_bias_gpu(l.delta_gpu, l.scales_gpu, l.batch, l.n, l.out_h*l.out_w); - - fast_mean_delta_gpu(l.delta_gpu, l.variance_gpu, l.batch, l.n, l.out_w*l.out_h, l.spatial_mean_delta_gpu, l.mean_delta_gpu); - fast_variance_delta_gpu(l.x_gpu, l.delta_gpu, l.mean_gpu, l.variance_gpu, l.batch, l.n, l.out_w*l.out_h, l.spatial_variance_delta_gpu, l.variance_delta_gpu); - normalize_delta_gpu(l.x_gpu, l.mean_gpu, l.variance_gpu, l.mean_delta_gpu, l.variance_delta_gpu, l.batch, l.n, l.out_w*l.out_h, l.delta_gpu); - } - - for(i = 0; i < l.batch; ++i){ - float * a = l.delta_gpu; - float * b = l.col_image_gpu; - float * c = l.filter_updates_gpu; - - im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.col_image_gpu); - gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n); - - if(state.delta){ - float * a = l.filters_gpu; - float * b = l.delta_gpu; - float * c = l.col_image_gpu; - - gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k); - - col2im_ongpu(l.col_image_gpu, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w); - } - } -} - -void pull_local_layer(local_layer layer) -{ - cuda_pull_array(layer.filters_gpu, layer.filters, layer.c*layer.n*layer.size*layer.size); - cuda_pull_array(layer.biases_gpu, layer.biases, layer.n); - cuda_pull_array(layer.filter_updates_gpu, layer.filter_updates, layer.c*layer.n*layer.size*layer.size); - cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); - if (layer.batch_normalize){ - cuda_pull_array(layer.scales_gpu, layer.scales, layer.n); - cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); - cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); - } -} - -void push_local_layer(local_layer layer) -{ - cuda_push_array(layer.filters_gpu, layer.filters, layer.c*layer.n*layer.size*layer.size); - cuda_push_array(layer.biases_gpu, layer.biases, layer.n); - cuda_push_array(layer.filter_updates_gpu, layer.filter_updates, layer.c*layer.n*layer.size*layer.size); - cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); - if (layer.batch_normalize){ - cuda_push_array(layer.scales_gpu, layer.scales, layer.n); - cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); - cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); - } -} - -void update_local_layer_gpu(local_layer layer, int batch, float learning_rate, float momentum, float decay) -{ - int size = layer.size*layer.size*layer.c*layer.n; - - axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1); - scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1); - - axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1); - scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1); - - axpy_ongpu(size, -decay*batch, layer.filters_gpu, 1, layer.filter_updates_gpu, 1); - axpy_ongpu(size, learning_rate/batch, layer.filter_updates_gpu, 1, layer.filters_gpu, 1); - scal_ongpu(size, momentum, layer.filter_updates_gpu, 1); -} - - diff --git a/src/utils.c b/src/utils.c index 3121ef6f..3ad09329 100644 --- a/src/utils.c +++ b/src/utils.c @@ -359,6 +359,21 @@ float mean_array(float *a, int n) return sum_array(a,n)/n; } +void mean_arrays(float **a, int n, int els, float *avg) +{ + int i; + int j; + memset(avg, 0, els*sizeof(float)); + for(j = 0; j < n; ++j){ + for(i = 0; i < els; ++i){ + avg[i] += a[j][i]; + } + } + for(i = 0; i < els; ++i){ + avg[i] /= n; + } +} + float variance_array(float *a, int n) { int i; diff --git a/src/utils.h b/src/utils.h index 1b9ba08c..7e13e86f 100644 --- a/src/utils.h +++ b/src/utils.h @@ -37,6 +37,7 @@ float rand_normal(); float rand_uniform(); float sum_array(float *a, int n); float mean_array(float *a, int n); +void mean_arrays(float **a, int n, int els, float *avg); float variance_array(float *a, int n); float mag_array(float *a, int n); float **one_hot_encode(float *a, int n, int k);