mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
Minor output fixes
This commit is contained in:
@ -1,5 +1,9 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
#wget http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_bbox_val_v3.tgz
|
||||||
|
#other downloads: http://www.image-net.org/challenges/LSVRC/2012/nonpub-downloads
|
||||||
|
#read: https://pjreddie.com/darknet/imagenet/
|
||||||
|
|
||||||
mkdir -p labelled
|
mkdir -p labelled
|
||||||
wd=`pwd`
|
wd=`pwd`
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
avgpool_layer make_avgpool_layer(int batch, int w, int h, int c)
|
avgpool_layer make_avgpool_layer(int batch, int w, int h, int c)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "avg %4d x%4d x%4d -> %4d\n", w, h, c, c);
|
fprintf(stderr, "avg %4d x%4d x%4d -> %4d\n", w, h, c, c);
|
||||||
avgpool_layer l = { (LAYER_TYPE)0 };
|
avgpool_layer l = { (LAYER_TYPE)0 };
|
||||||
l.type = AVGPOOL;
|
l.type = AVGPOOL;
|
||||||
l.batch = batch;
|
l.batch = batch;
|
||||||
|
@ -552,13 +552,13 @@ convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w,
|
|||||||
else if (l.xnor) fprintf(stderr, "convX ");
|
else if (l.xnor) fprintf(stderr, "convX ");
|
||||||
else fprintf(stderr, "conv ");
|
else fprintf(stderr, "conv ");
|
||||||
|
|
||||||
if(groups > 1) fprintf(stderr, "%5d/%3d ", n, groups);
|
if(groups > 1) fprintf(stderr, "%5d/%4d ", n, groups);
|
||||||
else fprintf(stderr, "%5d ", n);
|
else fprintf(stderr, "%5d ", n);
|
||||||
|
|
||||||
if(dilation > 1) fprintf(stderr, "%2d x%2d/%2d(%1d)", size, size, stride, dilation);
|
if(dilation > 1) fprintf(stderr, "%2d x%2d/%2d(%1d)", size, size, stride, dilation);
|
||||||
else fprintf(stderr, "%2d x%2d/%2d ", size, size, stride);
|
else fprintf(stderr, "%2d x%2d/%2d ", size, size, stride);
|
||||||
|
|
||||||
fprintf(stderr, "%4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
|
fprintf(stderr, "%4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
|
||||||
|
|
||||||
//fprintf(stderr, "%5d/%2d %2d x%2d /%2d(%d)%4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", n, groups, size, size, stride, dilation, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
|
//fprintf(stderr, "%5d/%2d %2d x%2d /%2d(%d)%4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", n, groups, size, size, stride, dilation, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ dropout_layer make_dropout_layer(int batch, int inputs, float probability)
|
|||||||
l.backward_gpu = backward_dropout_layer_gpu;
|
l.backward_gpu = backward_dropout_layer_gpu;
|
||||||
l.rand_gpu = cuda_make_array(l.rand, inputs*batch);
|
l.rand_gpu = cuda_make_array(l.rand, inputs*batch);
|
||||||
#endif
|
#endif
|
||||||
fprintf(stderr, "dropout p = %.2f %4d -> %4d\n", probability, inputs, inputs);
|
fprintf(stderr, "dropout p = %.2f %4d -> %4d\n", probability, inputs, inputs);
|
||||||
return l;
|
return l;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int s
|
|||||||
|
|
||||||
#endif // GPU
|
#endif // GPU
|
||||||
l.bflops = (l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.;
|
l.bflops = (l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.;
|
||||||
fprintf(stderr, "max %d x %d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
|
fprintf(stderr, "max %d x %d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
|
||||||
return l;
|
return l;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse)
|
|||||||
l.out_c = c*(stride*stride);
|
l.out_c = c*(stride*stride);
|
||||||
}
|
}
|
||||||
l.reverse = reverse;
|
l.reverse = reverse;
|
||||||
fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
|
fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
|
||||||
l.outputs = l.out_h * l.out_w * l.out_c;
|
l.outputs = l.out_h * l.out_w * l.out_c;
|
||||||
l.inputs = h*w*c;
|
l.inputs = h*w*c;
|
||||||
int output_size = l.out_h * l.out_w * l.out_c * batch;
|
int output_size = l.out_h * l.out_w * l.out_c * batch;
|
||||||
|
@ -36,8 +36,8 @@ layer make_upsample_layer(int batch, int w, int h, int c, int stride)
|
|||||||
l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
|
l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
|
||||||
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
|
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
|
||||||
#endif
|
#endif
|
||||||
if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
|
if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
|
||||||
else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
|
else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
|
||||||
return l;
|
return l;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user