darknet/src/region_layer.c

508 lines
19 KiB
C
Raw Normal View History

2016-08-06 01:27:07 +03:00
#include "region_layer.h"
#include "activations.h"
#include "blas.h"
#include "box.h"
#include "cuda.h"
#include "utils.h"
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
2017-01-04 15:44:00 +03:00
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
2016-08-06 01:27:07 +03:00
{
2017-01-04 15:44:00 +03:00
layer l = {0};
2016-08-06 01:27:07 +03:00
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + coords + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
2016-08-06 01:27:07 +03:00
l.classes = classes;
l.coords = coords;
l.cost = calloc(1, sizeof(float));
2016-09-12 23:55:20 +03:00
l.biases = calloc(n*2, sizeof(float));
l.bias_updates = calloc(n*2, sizeof(float));
2016-08-06 01:27:07 +03:00
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
2017-05-28 07:41:55 +03:00
l.truths = 30*(l.coords + 1);
2016-08-06 01:27:07 +03:00
l.delta = calloc(batch*l.outputs, sizeof(float));
l.output = calloc(batch*l.outputs, sizeof(float));
2016-09-12 23:55:20 +03:00
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
l.backward = backward_region_layer;
2016-08-06 01:27:07 +03:00
#ifdef GPU
l.forward_gpu = forward_region_layer_gpu;
l.backward_gpu = backward_region_layer_gpu;
2016-08-06 01:27:07 +03:00
l.output_gpu = cuda_make_array(l.output, batch*l.outputs);
l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs);
#endif
2016-11-17 23:18:19 +03:00
fprintf(stderr, "detection\n");
2016-08-06 01:27:07 +03:00
srand(0);
return l;
}
2016-11-16 09:53:58 +03:00
void resize_region_layer(layer *l, int w, int h)
{
l->w = w;
l->h = h;
l->outputs = h*w*l->n*(l->classes + l->coords + 1);
l->inputs = l->outputs;
l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
#ifdef GPU
cuda_free(l->delta_gpu);
cuda_free(l->output_gpu);
l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
#endif
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
2016-08-06 01:27:07 +03:00
{
box b;
b.x = (i + x[index + 0*stride]) / w;
b.y = (j + x[index + 1*stride]) / h;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
2016-08-06 01:27:07 +03:00
return b;
}
float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale, int stride)
2016-08-06 01:27:07 +03:00
{
box pred = get_region_box(x, biases, n, index, i, j, w, h, stride);
2016-08-06 01:27:07 +03:00
float iou = box_iou(pred, truth);
2016-11-11 19:48:40 +03:00
float tx = (truth.x*w - i);
float ty = (truth.y*h - j);
2017-01-04 15:44:00 +03:00
float tw = log(truth.w*w / biases[2*n]);
float th = log(truth.h*h / biases[2*n + 1]);
2016-11-11 19:48:40 +03:00
delta[index + 0*stride] = scale * (tx - x[index + 0*stride]);
delta[index + 1*stride] = scale * (ty - x[index + 1*stride]);
delta[index + 2*stride] = scale * (tw - x[index + 2*stride]);
delta[index + 3*stride] = scale * (th - x[index + 3*stride]);
2016-08-06 01:27:07 +03:00
return iou;
}
void delta_region_class(float *output, float *delta, int index, int class, int classes, tree *hier, float scale, int stride, float *avg_cat)
2016-11-08 10:42:19 +03:00
{
int i, n;
if(hier){
float pred = 1;
while(class >= 0){
pred *= output[index + stride*class];
2016-11-08 10:42:19 +03:00
int g = hier->group[class];
int offset = hier->group_offset[g];
for(i = 0; i < hier->group_size[g]; ++i){
delta[index + stride*(offset + i)] = scale * (0 - output[index + stride*(offset + i)]);
2016-11-08 10:42:19 +03:00
}
delta[index + stride*class] = scale * (1 - output[index + stride*class]);
2016-11-08 10:42:19 +03:00
class = hier->parent[class];
}
*avg_cat += pred;
} else {
for(n = 0; n < classes; ++n){
delta[index + stride*n] = scale * (((n == class)?1 : 0) - output[index + stride*n]);
if(n == class) *avg_cat += output[index + stride*n];
2016-11-08 10:42:19 +03:00
}
}
}
2016-08-06 01:27:07 +03:00
float logit(float x)
{
return log(x/(1.-x));
}
float tisnan(float x)
{
return (x != x);
}
int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(l.coords+l.classes+1) + entry*l.w*l.h + loc;
}
void forward_region_layer(const layer l, network net)
2016-08-06 01:27:07 +03:00
{
int i,j,b,t,n;
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
2016-11-11 19:48:40 +03:00
2016-11-16 09:53:58 +03:00
#ifndef GPU
2017-03-27 10:01:27 +03:00
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, 4);
2017-05-28 07:41:55 +03:00
if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC);
2017-03-27 10:01:27 +03:00
}
}
2016-11-11 19:48:40 +03:00
if (l.softmax_tree){
int i;
int count = 5;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
softmax_cpu(net.input + count, group_size, l.batch, l.inputs, l.n*l.w*l.h, 1, l.n*l.w*l.h, l.temperature, l.output + count);
count += group_size;
2016-11-11 19:48:40 +03:00
}
} else if (l.softmax){
2017-05-28 07:41:55 +03:00
int index = entry_index(l, 0, 0, l.coords + !l.background);
softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index);
2016-08-06 01:27:07 +03:00
}
2016-11-16 09:53:58 +03:00
#endif
2016-08-06 01:27:07 +03:00
memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
if(!net.train) return;
2016-08-06 01:27:07 +03:00
float avg_iou = 0;
2016-09-12 23:55:20 +03:00
float recall = 0;
2016-08-06 01:27:07 +03:00
float avg_cat = 0;
float avg_obj = 0;
float avg_anyobj = 0;
int count = 0;
2016-11-08 10:42:19 +03:00
int class_count = 0;
2016-08-06 01:27:07 +03:00
*(l.cost) = 0;
for (b = 0; b < l.batch; ++b) {
2016-11-16 09:53:58 +03:00
if(l.softmax_tree){
int onlyclass = 0;
for(t = 0; t < 30; ++t){
2017-05-28 07:41:55 +03:00
box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
2016-11-16 09:53:58 +03:00
if(!truth.x) break;
2017-05-28 07:41:55 +03:00
int class = net.truth[t*(l.coords + 1) + b*l.truths + 4];
2016-11-16 09:53:58 +03:00
float maxp = 0;
int maxi = 0;
if(truth.x > 100000 && truth.y > 100000){
for(n = 0; n < l.n*l.w*l.h; ++n){
int class_index = entry_index(l, b, n, 5);
int obj_index = entry_index(l, b, n, 4);
float scale = l.output[obj_index];
l.delta[obj_index] = l.noobject_scale * (0 - l.output[obj_index]);
float p = scale*get_hierarchy_probability(l.output + class_index, l.softmax_tree, class, l.w*l.h);
2016-11-16 09:53:58 +03:00
if(p > maxp){
maxp = p;
maxi = n;
}
}
int class_index = entry_index(l, b, maxi, 5);
int obj_index = entry_index(l, b, maxi, 4);
delta_region_class(l.output, l.delta, class_index, class, l.classes, l.softmax_tree, l.class_scale, l.w*l.h, &avg_cat);
if(l.output[obj_index] < .3) l.delta[obj_index] = l.object_scale * (.3 - l.output[obj_index]);
else l.delta[obj_index] = 0;
2016-11-16 09:53:58 +03:00
++class_count;
onlyclass = 1;
break;
}
}
if(onlyclass) continue;
}
2016-08-06 01:27:07 +03:00
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
2016-08-06 01:27:07 +03:00
float best_iou = 0;
for(t = 0; t < 30; ++t){
box truth = float_to_box(net.truth + t*5 + b*l.truths, 1);
2016-08-06 01:27:07 +03:00
if(!truth.x) break;
float iou = box_iou(pred, truth);
2016-11-08 10:42:19 +03:00
if (iou > best_iou) {
best_iou = iou;
}
2016-08-06 01:27:07 +03:00
}
int obj_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4);
avg_anyobj += l.output[obj_index];
l.delta[obj_index] = l.noobject_scale * (0 - l.output[obj_index]);
2017-05-28 07:41:55 +03:00
if(l.background) l.delta[obj_index] = l.noobject_scale * (1 - l.output[obj_index]);
2017-01-04 15:44:00 +03:00
if (best_iou > l.thresh) {
l.delta[obj_index] = 0;
2016-11-08 10:42:19 +03:00
}
2016-08-06 01:27:07 +03:00
if(*(net.seen) < 12800){
2016-08-06 01:27:07 +03:00
box truth = {0};
truth.x = (i + .5)/l.w;
truth.y = (j + .5)/l.h;
2017-01-04 15:44:00 +03:00
truth.w = l.biases[2*n]/l.w;
truth.h = l.biases[2*n+1]/l.h;
delta_region_box(truth, l.output, l.biases, n, box_index, i, j, l.w, l.h, l.delta, .01, l.w*l.h);
2016-08-06 01:27:07 +03:00
}
}
}
}
for(t = 0; t < 30; ++t){
box truth = float_to_box(net.truth + t*5 + b*l.truths, 1);
2016-11-06 00:09:21 +03:00
2016-08-06 01:27:07 +03:00
if(!truth.x) break;
float best_iou = 0;
int best_n = 0;
i = (truth.x * l.w);
j = (truth.y * l.h);
//printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h);
box truth_shift = truth;
truth_shift.x = 0;
truth_shift.y = 0;
2016-11-06 03:27:31 +03:00
//printf("index %d %d\n",i, j);
2016-08-06 01:27:07 +03:00
for(n = 0; n < l.n; ++n){
int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
2016-11-06 00:09:21 +03:00
if(l.bias_match){
2017-01-04 15:44:00 +03:00
pred.w = l.biases[2*n]/l.w;
pred.h = l.biases[2*n+1]/l.h;
2016-11-06 00:09:21 +03:00
}
2016-11-06 03:27:31 +03:00
//printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h);
2016-08-06 01:27:07 +03:00
pred.x = 0;
pred.y = 0;
float iou = box_iou(pred, truth_shift);
if (iou > best_iou){
best_iou = iou;
best_n = n;
}
}
2016-11-06 03:27:31 +03:00
//printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h);
2016-08-06 01:27:07 +03:00
int box_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, 0);
float iou = delta_region_box(truth, l.output, l.biases, best_n, box_index, i, j, l.w, l.h, l.delta, l.coord_scale * (2 - truth.w*truth.h), l.w*l.h);
2016-09-12 23:55:20 +03:00
if(iou > .5) recall += 1;
2016-08-06 01:27:07 +03:00
avg_iou += iou;
//l.delta[best_index + 4] = iou - l.output[best_index + 4];
2017-05-28 07:41:55 +03:00
int obj_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, l.coords);
avg_obj += l.output[obj_index];
l.delta[obj_index] = l.object_scale * (1 - l.output[obj_index]);
2016-08-06 01:27:07 +03:00
if (l.rescore) {
l.delta[obj_index] = l.object_scale * (iou - l.output[obj_index]);
2016-08-06 01:27:07 +03:00
}
2017-05-28 07:41:55 +03:00
if(l.background){
l.delta[obj_index] = l.object_scale * (0 - l.output[obj_index]);
}
2016-08-06 01:27:07 +03:00
2017-05-28 07:41:55 +03:00
int class = net.truth[t*(l.coords + 1) + b*l.truths + l.coords];
2016-11-06 00:09:21 +03:00
if (l.map) class = l.map[class];
2017-05-28 07:41:55 +03:00
int class_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, l.coords + 1);
delta_region_class(l.output, l.delta, class_index, class, l.classes, l.softmax_tree, l.class_scale, l.w*l.h, &avg_cat);
2016-08-06 01:27:07 +03:00
++count;
2016-11-08 10:42:19 +03:00
++class_count;
2016-08-06 01:27:07 +03:00
}
}
2016-11-06 03:27:31 +03:00
//printf("\n");
2016-08-06 01:27:07 +03:00
*(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
2016-11-08 10:42:19 +03:00
printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count);
2016-08-06 01:27:07 +03:00
}
void backward_region_layer(const layer l, network net)
2016-08-06 01:27:07 +03:00
{
/*
int b;
int size = l.coords + l.classes + 1;
for (b = 0; b < l.batch*l.n; ++b){
int index = (b*size + 4)*l.w*l.h;
gradient_array(l.output + index, l.w*l.h, LOGISTIC, l.delta + index);
}
axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, net.delta, 1);
*/
2016-08-06 01:27:07 +03:00
}
void correct_region_boxes(box *boxes, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = boxes[i];
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
boxes[i] = b;
}
}
void get_region_boxes(layer l, int w, int h, int netw, int neth, float thresh, float **probs, box *boxes, int only_objectness, int *map, float tree_thresh, int relative)
{
int i,j,n,z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w/2; ++i) {
for (n = 0; n < l.n; ++n) {
for(z = 0; z < l.classes + 5; ++z){
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if(z == 0){
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for(i = 0; i < l.outputs; ++i){
l.output[i] = (l.output[i] + flip[i])/2.;
}
}
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = n*l.w*l.h + i;
2017-04-10 06:15:22 +03:00
for(j = 0; j < l.classes; ++j){
probs[index][j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
2017-05-28 07:41:55 +03:00
float scale = l.background ? 1 : predictions[obj_index];
boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h);
2016-11-06 00:09:21 +03:00
2017-05-28 07:41:55 +03:00
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
2016-11-06 00:09:21 +03:00
if(l.softmax_tree){
2016-11-08 10:42:19 +03:00
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0, l.w*l.h);
2016-11-27 07:02:46 +03:00
if(map){
for(j = 0; j < 200; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, 5 + map[j]);
float prob = scale*predictions[class_index];
2016-11-27 07:02:46 +03:00
probs[index][j] = (prob > thresh) ? prob : 0;
}
} else {
int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h);
2017-01-04 15:44:00 +03:00
probs[index][j] = (scale > thresh) ? scale : 0;
probs[index][l.classes] = scale;
2016-11-06 00:09:21 +03:00
}
2016-11-27 07:02:46 +03:00
} else {
2017-04-13 00:22:53 +03:00
float max = 0;
2016-11-06 00:09:21 +03:00
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, 5 + j);
float prob = scale*predictions[class_index];
2016-11-06 00:09:21 +03:00
probs[index][j] = (prob > thresh) ? prob : 0;
2017-04-13 00:22:53 +03:00
if(prob > max) max = prob;
// TODO REMOVE
2017-04-30 23:54:40 +03:00
// if (j == 56 ) probs[index][j] = 0;
2017-04-13 00:22:53 +03:00
/*
if (j != 0) probs[index][j] = 0;
int blacklist[] = {121, 497, 482, 504, 122, 518,481, 418, 542, 491, 914, 478, 120, 510,500};
int bb;
for (bb = 0; bb < sizeof(blacklist)/sizeof(int); ++bb){
if(index == blacklist[bb]) probs[index][j] = 0;
}
2017-04-13 00:22:53 +03:00
*/
2016-11-06 00:09:21 +03:00
}
2017-04-13 00:22:53 +03:00
probs[index][l.classes] = max;
}
if(only_objectness){
probs[index][0] = scale;
}
}
}
correct_region_boxes(boxes, l.w*l.h*l.n, w, h, netw, neth, relative);
}
2016-08-06 01:27:07 +03:00
#ifdef GPU
void forward_region_layer_gpu(const layer l, network net)
2016-08-06 01:27:07 +03:00
{
copy_ongpu(l.batch*l.inputs, net.input_gpu, 1, l.output_gpu, 1);
int b, n;
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array_ongpu(l.output_gpu + index, 2*l.w*l.h, LOGISTIC);
2017-05-28 07:41:55 +03:00
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) activate_array_ongpu(l.output_gpu + index, l.w*l.h, LOGISTIC);
}
}
if (l.softmax_tree){
2017-05-29 21:59:27 +03:00
int index = entry_index(l, 0, 0, 5);
softmax_tree(net.input_gpu + index, l.w*l.h, l.batch*l.n, l.inputs/l.n, 1, l.output_gpu + index, *l.softmax_tree);
/*
2016-11-16 09:53:58 +03:00
int i;
int count = 5;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
int index = entry_index(l, 0, 0, count);
softmax_gpu(net.input_gpu + index, group_size, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu + index);
2016-11-16 09:53:58 +03:00
count += group_size;
}
2017-05-29 21:59:27 +03:00
*/
} else if (l.softmax) {
2017-05-28 07:41:55 +03:00
int index = entry_index(l, 0, 0, l.coords + !l.background);
//printf("%d\n", index);
2017-05-28 07:41:55 +03:00
softmax_gpu(net.input_gpu + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu + index);
}
if(!net.train || l.onlyforward){
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
return;
2016-11-16 09:53:58 +03:00
}
2016-08-06 01:27:07 +03:00
float *truth_cpu = 0;
if(net.truth_gpu){
2016-08-06 01:27:07 +03:00
int num_truth = l.batch*l.truths;
truth_cpu = calloc(num_truth, sizeof(float));
cuda_pull_array(net.truth_gpu, truth_cpu, num_truth);
2016-08-06 01:27:07 +03:00
}
cuda_pull_array(l.output_gpu, net.input, l.batch*l.inputs);
forward_region_layer(l, net);
2017-03-27 10:01:27 +03:00
//cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs);
if(!net.train) return;
2016-11-16 09:53:58 +03:00
cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
2016-08-06 01:27:07 +03:00
}
void backward_region_layer_gpu(const layer l, network net)
2016-08-06 01:27:07 +03:00
{
int b, n;
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
gradient_array_ongpu(l.output_gpu + index, 2*l.w*l.h, LOGISTIC, l.delta_gpu + index);
2017-05-28 07:41:55 +03:00
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) gradient_array_ongpu(l.output_gpu + index, l.w*l.h, LOGISTIC, l.delta_gpu + index);
}
}
axpy_ongpu(l.batch*l.inputs, 1, l.delta_gpu, 1, net.delta_gpu, 1);
2016-08-06 01:27:07 +03:00
}
#endif
2017-04-13 00:22:53 +03:00
void zero_objectness(layer l)
{
int i, n;
for (i = 0; i < l.w*l.h; ++i){
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
l.output[obj_index] = 0;
}
}
}