no mean on input binarization

This commit is contained in:
Joseph Redmon 2016-06-19 14:28:15 -07:00
parent 8322a58cf6
commit 08c7cf9c88
5 changed files with 35 additions and 8 deletions

View File

@ -82,9 +82,7 @@ void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
if(l.xnor){ if(l.xnor){
binarize_filters_gpu(l.filters_gpu, l.n, l.c*l.size*l.size, l.binary_filters_gpu); binarize_filters_gpu(l.filters_gpu, l.n, l.c*l.size*l.size, l.binary_filters_gpu);
swap_binary(&l); swap_binary(&l);
for(i = 0; i < l.batch; ++i){ binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
binarize_input_gpu(state.input + i*l.inputs, l.c, l.h*l.w, l.binary_input_gpu + i*l.inputs);
}
state.input = l.binary_input_gpu; state.input = l.binary_input_gpu;
} }

View File

@ -45,6 +45,14 @@ void binarize_filters(float *filters, int n, int size, float *binary)
} }
} }
void binarize_cpu(float *input, int n, float *binary)
{
int i;
for(i = 0; i < n; ++i){
binary[i] = (input[i] > 0) ? 1 : -1;
}
}
void binarize_input(float *input, int n, int size, float *binary) void binarize_input(float *input, int n, int size, float *binary)
{ {
int i, s; int i, s;
@ -426,12 +434,10 @@ void forward_convolutional_layer(convolutional_layer l, network_state state)
} }
*/ */
if(l.xnor && (l.c%32 != 0 || !AI2)){ if(l.xnor ){
binarize_filters(l.filters, l.n, l.c*l.size*l.size, l.binary_filters); binarize_filters(l.filters, l.n, l.c*l.size*l.size, l.binary_filters);
swap_binary(&l); swap_binary(&l);
for(i = 0; i < l.batch; ++i){ binarize_cpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input);
binarize_input(state.input + i*l.inputs, l.c, l.h*l.w, l.binary_input + i*l.inputs);
}
state.input = l.binary_input; state.input = l.binary_input;
} }

View File

@ -88,6 +88,23 @@ void average(int argc, char *argv[])
save_weights(sum, outfile); save_weights(sum, outfile);
} }
void operations(char *cfgfile)
{
gpu_index = -1;
network net = parse_network_cfg(cfgfile);
int i;
long ops = 0;
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
if(l.type == CONVOLUTIONAL){
ops += 2 * l.n * l.size*l.size*l.c * l.out_h*l.out_w;
} else if(l.type == CONNECTED){
ops += 2 * l.inputs * l.outputs;
}
}
printf("Floating Point Operations: %ld\n", ops);
}
void partial(char *cfgfile, char *weightfile, char *outfile, int max) void partial(char *cfgfile, char *weightfile, char *outfile, int max)
{ {
gpu_index = -1; gpu_index = -1;
@ -288,8 +305,12 @@ int main(int argc, char **argv)
normalize_net(argv[2], argv[3], argv[4]); normalize_net(argv[2], argv[3], argv[4]);
} else if (0 == strcmp(argv[1], "rescale")){ } else if (0 == strcmp(argv[1], "rescale")){
rescale_net(argv[2], argv[3], argv[4]); rescale_net(argv[2], argv[3], argv[4]);
} else if (0 == strcmp(argv[1], "ops")){
operations(argv[2]);
} else if (0 == strcmp(argv[1], "partial")){ } else if (0 == strcmp(argv[1], "partial")){
partial(argv[2], argv[3], argv[4], atoi(argv[5])); partial(argv[2], argv[3], argv[4], atoi(argv[5]));
} else if (0 == strcmp(argv[1], "average")){
average(argc, argv);
} else if (0 == strcmp(argv[1], "stacked")){ } else if (0 == strcmp(argv[1], "stacked")){
stacked(argv[2], argv[3], argv[4]); stacked(argv[2], argv[3], argv[4]);
} else if (0 == strcmp(argv[1], "visualize")){ } else if (0 == strcmp(argv[1], "visualize")){

View File

@ -133,9 +133,11 @@ void forward_detection_layer(const detection_layer l, network_state state)
best_index = 0; best_index = 0;
} }
} }
/*
if(1 && *(state.net.seen) < 100000){ if(1 && *(state.net.seen) < 100000){
best_index = rand()%l.n; best_index = rand()%l.n;
} }
*/
int box_index = index + locations*(l.classes + l.n) + (i*l.n + best_index) * l.coords; int box_index = index + locations*(l.classes + l.n) + (i*l.n + best_index) * l.coords;
int tbox_index = truth_index + 1 + l.classes; int tbox_index = truth_index + 1 + l.classes;

View File

@ -66,7 +66,7 @@ void forward_xnor_layer(const layer l, network_state state)
ai2_bin_conv_layer al = ai2_make_bin_conv_layer(b, c, ix, iy, wx, wy, s, pad); ai2_bin_conv_layer al = ai2_make_bin_conv_layer(b, c, ix, iy, wx, wy, s, pad);
// OPTIONAL: You need to set the real-valued input like: // OPTIONAL: You need to set the real-valued input like:
ai2_setFltInput(&al, state.input); ai2_setFltInput_unpadded(&al, state.input);
// The above function will automatically binarize the input for the layer (channel wise). // The above function will automatically binarize the input for the layer (channel wise).
// If commented: using the default 0-valued input. // If commented: using the default 0-valued input.