darknet/src/network.c

611 lines
15 KiB
C
Raw Normal View History

2013-11-13 22:50:38 +04:00
#include <stdio.h>
2014-10-22 01:49:18 +04:00
#include <time.h>
2016-09-20 21:34:49 +03:00
#include <assert.h>
2017-06-02 06:31:13 +03:00
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "blas.h"
2013-11-04 23:11:01 +04:00
2017-06-02 06:31:13 +03:00
#include "crop_layer.h"
#include "connected_layer.h"
#include "gru_layer.h"
#include "rnn_layer.h"
#include "crnn_layer.h"
#include "local_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "cost_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "parser.h"
#include "data.h"
load_args get_base_args(network net)
{
load_args args = {0};
args.w = net.w;
args.h = net.h;
args.size = net.w;
args.min = net.min_crop;
args.max = net.max_crop;
args.angle = net.angle;
args.aspect = net.aspect;
args.exposure = net.exposure;
2017-04-13 00:22:53 +03:00
args.center = net.center;
args.saturation = net.saturation;
args.hue = net.hue;
return args;
}
network load_network(char *cfg, char *weights, int clear)
{
network net = parse_network_cfg(cfg);
if(weights && weights[0] != 0){
load_weights(&net, weights);
}
if(clear) *net.seen = 0;
return net;
}
2013-11-04 23:11:01 +04:00
2015-09-05 03:52:44 +03:00
int get_current_batch(network net)
{
int batch_num = (*net.seen)/(net.batch*net.subdivisions);
return batch_num;
}
2015-11-26 22:48:01 +03:00
void reset_momentum(network net)
{
if (net.momentum == 0) return;
net.learning_rate = 0;
net.momentum = 0;
net.decay = 0;
#ifdef GPU
2016-11-16 09:53:58 +03:00
//if(net.gpu_index >= 0) update_network_gpu(net);
2015-11-26 22:48:01 +03:00
#endif
}
2015-09-05 03:52:44 +03:00
float get_current_rate(network net)
{
int batch_num = get_current_batch(net);
2015-09-09 22:48:40 +03:00
int i;
float rate;
if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power);
2015-09-05 03:52:44 +03:00
switch (net.policy) {
case CONSTANT:
return net.learning_rate;
case STEP:
2015-09-09 22:48:40 +03:00
return net.learning_rate * pow(net.scale, batch_num/net.step);
case STEPS:
rate = net.learning_rate;
for(i = 0; i < net.num_steps; ++i){
if(net.steps[i] > batch_num) return rate;
rate *= net.scales[i];
2016-11-16 09:53:58 +03:00
//if(net.steps[i] > batch_num - 1 && net.scales[i] > 1) reset_momentum(net);
2015-09-09 22:48:40 +03:00
}
return rate;
2015-09-05 03:52:44 +03:00
case EXP:
return net.learning_rate * pow(net.gamma, batch_num);
case POLY:
return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power);
2016-05-12 23:36:11 +03:00
case RANDOM:
return net.learning_rate * pow(rand_uniform(0,1), net.power);
2015-09-09 22:48:40 +03:00
case SIG:
2015-09-17 00:12:10 +03:00
return net.learning_rate * (1./(1.+exp(net.gamma*(batch_num - net.step))));
2015-09-05 03:52:44 +03:00
default:
fprintf(stderr, "Policy is weird!\n");
return net.learning_rate;
}
}
2015-01-14 23:18:57 +03:00
char *get_layer_string(LAYER_TYPE a)
{
switch(a){
case CONVOLUTIONAL:
return "convolutional";
2016-01-19 02:40:14 +03:00
case ACTIVE:
return "activation";
2015-11-14 23:34:17 +03:00
case LOCAL:
return "local";
2015-02-11 06:41:03 +03:00
case DECONVOLUTIONAL:
return "deconvolutional";
2015-01-14 23:18:57 +03:00
case CONNECTED:
return "connected";
2016-01-28 23:30:38 +03:00
case RNN:
return "rnn";
2016-05-07 02:25:16 +03:00
case GRU:
return "gru";
case LSTM:
return "lstm";
2016-03-01 00:54:12 +03:00
case CRNN:
return "crnn";
2015-01-14 23:18:57 +03:00
case MAXPOOL:
return "maxpool";
2016-08-06 01:27:07 +03:00
case REORG:
return "reorg";
2015-07-14 01:04:21 +03:00
case AVGPOOL:
return "avgpool";
2015-01-14 23:18:57 +03:00
case SOFTMAX:
return "softmax";
2015-03-05 01:56:38 +03:00
case DETECTION:
return "detection";
2016-07-20 00:50:01 +03:00
case REGION:
return "region";
2015-01-14 23:18:57 +03:00
case DROPOUT:
return "dropout";
case CROP:
return "crop";
case COST:
return "cost";
2015-05-08 20:33:47 +03:00
case ROUTE:
return "route";
2015-12-14 22:57:10 +03:00
case SHORTCUT:
return "shortcut";
2015-07-10 01:22:14 +03:00
case NORMALIZATION:
return "normalization";
2016-05-07 02:25:16 +03:00
case BATCHNORM:
return "batchnorm";
2015-01-14 23:18:57 +03:00
default:
break;
}
return "none";
}
2015-03-12 08:20:15 +03:00
network make_network(int n)
{
2015-05-11 23:46:49 +03:00
network net = {0};
net.n = n;
2015-05-11 23:46:49 +03:00
net.layers = calloc(net.n, sizeof(layer));
2015-09-05 03:52:44 +03:00
net.seen = calloc(1, sizeof(int));
net.cost = calloc(1, sizeof(float));
return net;
}
void forward_network(network net)
2014-07-14 09:07:51 +04:00
{
int i;
for(i = 0; i < net.n; ++i){
net.index = i;
2015-05-11 23:46:49 +03:00
layer l = net.layers[i];
2015-07-22 02:09:33 +03:00
if(l.delta){
fill_cpu(l.outputs * l.batch, 0, l.delta, 1);
2015-07-22 02:09:33 +03:00
}
l.forward(l, net);
net.input = l.output;
if(l.truth) {
net.truth = l.output;
}
2013-11-04 23:11:01 +04:00
}
calc_network_cost(net);
2013-11-04 23:11:01 +04:00
}
2014-08-08 23:04:15 +04:00
void update_network(network net)
{
int i;
2015-03-22 19:56:40 +03:00
int update_batch = net.batch*net.subdivisions;
2015-09-05 03:52:44 +03:00
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
2015-05-11 23:46:49 +03:00
layer l = net.layers[i];
if(l.update){
l.update(l, update_batch, rate*l.learning_rate_scale, net.momentum, net.decay);
}
}
}
void calc_network_cost(network net)
2014-10-13 11:29:01 +04:00
{
int i;
float sum = 0;
int count = 0;
for(i = 0; i < net.n; ++i){
2016-07-20 00:50:01 +03:00
if(net.layers[i].cost){
2015-08-25 04:27:42 +03:00
sum += net.layers[i].cost[0];
++count;
}
2015-04-24 20:27:50 +03:00
}
*net.cost = sum/count;
2014-10-13 11:29:01 +04:00
}
2013-12-07 01:26:09 +04:00
int get_predicted_class_network(network net)
{
return max_index(net.output, net.outputs);
2013-12-07 01:26:09 +04:00
}
void backward_network(network net)
2013-12-07 01:26:09 +04:00
{
int i;
network orig = net;
for(i = net.n-1; i >= 0; --i){
layer l = net.layers[i];
if(l.stopbackward) break;
if(i == 0){
net = orig;
2013-11-13 22:50:38 +04:00
}else{
2015-05-11 23:46:49 +03:00
layer prev = net.layers[i-1];
net.input = prev.output;
net.delta = prev.delta;
2015-05-11 23:46:49 +03:00
}
net.index = i;
l.backward(l, net);
}
}
float train_network_datum(network net)
{
#ifdef GPU
if(gpu_index >= 0) return train_network_datum_gpu(net);
#endif
2016-09-12 23:55:20 +03:00
*net.seen += net.batch;
net.train = 1;
forward_network(net);
backward_network(net);
float error = *net.cost;
2015-09-05 03:52:44 +03:00
if(((*net.seen)/net.batch)%net.subdivisions == 0) update_network(net);
2014-02-14 22:26:31 +04:00
return error;
2013-12-07 01:26:09 +04:00
}
2014-08-08 23:04:15 +04:00
float train_network_sgd(network net, data d, int n)
2013-12-07 01:26:09 +04:00
{
2014-07-14 09:07:51 +04:00
int batch = net.batch;
2014-08-28 06:11:46 +04:00
int i;
2014-07-14 09:07:51 +04:00
float sum = 0;
2013-12-07 21:38:50 +04:00
for(i = 0; i < n; ++i){
get_random_batch(d, batch, net.input, net.truth);
float err = train_network_datum(net);
2014-07-14 09:07:51 +04:00
sum += err;
2013-12-07 01:26:09 +04:00
}
2014-07-14 09:07:51 +04:00
return (float)sum/(n*batch);
2013-12-07 01:26:09 +04:00
}
2014-11-06 01:49:58 +03:00
2014-12-17 02:34:10 +03:00
float train_network(network net, data d)
2014-11-06 01:49:58 +03:00
{
2016-09-20 21:34:49 +03:00
assert(d.X.rows % net.batch == 0);
2014-11-06 01:49:58 +03:00
int batch = net.batch;
2014-12-17 02:34:10 +03:00
int n = d.X.rows / batch;
2014-11-06 01:49:58 +03:00
int i;
float sum = 0;
for(i = 0; i < n; ++i){
get_next_batch(d, batch, i*batch, net.input, net.truth);
float err = train_network_datum(net);
2014-11-06 01:49:58 +03:00
sum += err;
}
2014-12-17 02:34:10 +03:00
return (float)sum/(n*batch);
}
2014-12-12 00:15:26 +03:00
void set_batch_network(network *net, int b)
{
net->batch = b;
int i;
for(i = 0; i < net->n; ++i){
2015-05-11 23:46:49 +03:00
net->layers[i].batch = b;
#ifdef CUDNN
2016-06-14 21:30:28 +03:00
if(net->layers[i].type == CONVOLUTIONAL){
cudnn_convolutional_setup(net->layers + i);
}
#endif
2014-05-10 02:14:52 +04:00
}
}
2015-07-08 10:36:43 +03:00
int resize_network(network *net, int w, int h)
2014-03-13 08:57:34 +04:00
{
2016-11-16 09:53:58 +03:00
#ifdef GPU
cuda_set_device(net->gpu_index);
cuda_free(net->workspace);
2016-11-16 09:53:58 +03:00
#endif
2014-03-13 08:57:34 +04:00
int i;
2015-07-08 10:36:43 +03:00
//if(w == net->w && h == net->h) return 0;
net->w = w;
net->h = h;
2015-09-24 00:13:43 +03:00
int inputs = 0;
2016-06-03 01:25:24 +03:00
size_t workspace_size = 0;
2016-09-02 02:48:41 +03:00
//fprintf(stderr, "Resizing to %d x %d...\n", w, h);
2015-07-08 10:36:43 +03:00
//fflush(stderr);
for (i = 0; i < net->n; ++i){
layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
resize_convolutional_layer(&l, w, h);
2016-01-19 02:40:14 +03:00
}else if(l.type == CROP){
resize_crop_layer(&l, w, h);
2015-07-08 10:36:43 +03:00
}else if(l.type == MAXPOOL){
resize_maxpool_layer(&l, w, h);
2016-11-16 09:53:58 +03:00
}else if(l.type == REGION){
resize_region_layer(&l, w, h);
}else if(l.type == ROUTE){
resize_route_layer(&l, net);
2016-08-06 01:27:07 +03:00
}else if(l.type == REORG){
resize_reorg_layer(&l, w, h);
2015-07-14 01:04:21 +03:00
}else if(l.type == AVGPOOL){
resize_avgpool_layer(&l, w, h);
2015-07-10 01:22:14 +03:00
}else if(l.type == NORMALIZATION){
resize_normalization_layer(&l, w, h);
2015-09-24 00:13:43 +03:00
}else if(l.type == COST){
resize_cost_layer(&l, inputs);
2014-04-17 04:05:29 +04:00
}else{
2014-03-13 08:57:34 +04:00
error("Cannot resize this type of layer");
}
2016-06-03 01:25:24 +03:00
if(l.workspace_size > workspace_size) workspace_size = l.workspace_size;
2015-09-24 00:13:43 +03:00
inputs = l.outputs;
2015-07-08 10:36:43 +03:00
net->layers[i] = l;
w = l.out_w;
h = l.out_h;
2016-01-19 02:40:14 +03:00
if(l.type == AVGPOOL) break;
2014-03-13 08:57:34 +04:00
}
layer out = get_network_output_layer(*net);
net->inputs = net->layers[0].inputs;
net->outputs = out.outputs;
net->truths = out.outputs;
if(net->layers[net->n-1].truths) net->truths = net->layers[net->n-1].truths;
net->output = out.output;
free(net->input);
free(net->truth);
net->input = calloc(net->inputs*net->batch, sizeof(float));
net->truth = calloc(net->truths*net->batch, sizeof(float));
2016-06-03 01:25:24 +03:00
#ifdef GPU
2016-08-06 01:27:07 +03:00
if(gpu_index >= 0){
cuda_free(net->input_gpu);
cuda_free(net->truth_gpu);
net->input_gpu = cuda_make_array(net->input, net->inputs*net->batch);
net->truth_gpu = cuda_make_array(net->truth, net->truths*net->batch);
2016-06-03 01:25:24 +03:00
net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1);
2016-08-06 01:27:07 +03:00
}else {
2016-06-03 01:25:24 +03:00
free(net->workspace);
2016-06-10 03:20:31 +03:00
net->workspace = calloc(1, workspace_size);
2016-08-06 01:27:07 +03:00
}
#else
free(net->workspace);
net->workspace = calloc(1, workspace_size);
2016-06-03 01:25:24 +03:00
#endif
2015-07-08 10:36:43 +03:00
//fprintf(stderr, " Done!\n");
2014-03-13 08:57:34 +04:00
return 0;
}
2015-05-11 23:46:49 +03:00
detection_layer get_network_detection_layer(network net)
2015-04-08 01:25:30 +03:00
{
int i;
for(i = 0; i < net.n; ++i){
2015-05-11 23:46:49 +03:00
if(net.layers[i].type == DETECTION){
return net.layers[i];
2015-04-08 01:25:30 +03:00
}
}
2015-05-11 23:46:49 +03:00
fprintf(stderr, "Detection layer not found!!\n");
detection_layer l = {0};
return l;
2015-04-08 01:25:30 +03:00
}
image get_network_image_layer(network net, int i)
{
2015-05-11 23:46:49 +03:00
layer l = net.layers[i];
#ifdef GPU
2017-04-13 00:22:53 +03:00
//cuda_pull_array(l.output_gpu, l.output, l.outputs);
#endif
2015-05-11 23:46:49 +03:00
if (l.out_w && l.out_h && l.out_c){
return float_to_image(l.out_w, l.out_h, l.out_c, l.output);
2015-01-31 09:05:23 +03:00
}
2015-05-11 23:46:49 +03:00
image def = {0};
return def;
}
2013-11-04 23:11:01 +04:00
image get_network_image(network net)
{
int i;
for(i = net.n-1; i >= 0; --i){
2013-11-13 22:50:38 +04:00
image m = get_network_image_layer(net, i);
if(m.h != 0) return m;
}
2015-05-11 23:46:49 +03:00
image def = {0};
return def;
2013-11-13 22:50:38 +04:00
}
void visualize_network(network net)
{
image *prev = 0;
2013-11-13 22:50:38 +04:00
int i;
2013-12-03 04:41:40 +04:00
char buff[256];
for(i = 0; i < net.n; ++i){
sprintf(buff, "Layer %d", i);
2015-05-11 23:46:49 +03:00
layer l = net.layers[i];
if(l.type == CONVOLUTIONAL){
prev = visualize_convolutional_layer(l, buff, prev);
2013-11-04 23:11:01 +04:00
}
2013-11-13 22:50:38 +04:00
}
2013-11-04 23:11:01 +04:00
}
2014-11-19 00:51:04 +03:00
void top_predictions(network net, int k, int *index)
2014-10-25 22:57:26 +04:00
{
top_k(net.output, net.outputs, k, index);
2014-10-25 22:57:26 +04:00
}
2014-11-06 01:49:58 +03:00
float *network_predict(network net, float *input)
2013-12-07 21:38:50 +04:00
{
2015-03-12 08:20:15 +03:00
#ifdef GPU
2015-01-23 03:38:24 +03:00
if(gpu_index >= 0) return network_predict_gpu(net, input);
2015-03-12 08:20:15 +03:00
#endif
net.input = input;
net.truth = 0;
net.train = 0;
net.delta = 0;
forward_network(net);
return net.output;
2013-12-07 21:38:50 +04:00
}
2014-08-11 23:52:07 +04:00
matrix network_predict_data_multi(network net, data test, int n)
{
int i,j,b,m;
int k = net.outputs;
2014-08-11 23:52:07 +04:00
matrix pred = make_matrix(test.X.rows, k);
float *X = calloc(net.batch*test.X.rows, sizeof(float));
for(i = 0; i < test.X.rows; i += net.batch){
for(b = 0; b < net.batch; ++b){
if(i+b == test.X.rows) break;
memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float));
}
for(m = 0; m < n; ++m){
float *out = network_predict(net, X);
for(b = 0; b < net.batch; ++b){
if(i+b == test.X.rows) break;
for(j = 0; j < k; ++j){
pred.vals[i+b][j] += out[j+b*k]/n;
}
}
}
}
free(X);
return pred;
}
2013-12-07 21:38:50 +04:00
matrix network_predict_data(network net, data test)
{
2014-07-14 09:07:51 +04:00
int i,j,b;
int k = net.outputs;
2013-12-07 21:38:50 +04:00
matrix pred = make_matrix(test.X.rows, k);
2014-11-06 01:49:58 +03:00
float *X = calloc(net.batch*test.X.cols, sizeof(float));
2014-07-14 09:07:51 +04:00
for(i = 0; i < test.X.rows; i += net.batch){
for(b = 0; b < net.batch; ++b){
if(i+b == test.X.rows) break;
memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float));
}
float *out = network_predict(net, X);
for(b = 0; b < net.batch; ++b){
if(i+b == test.X.rows) break;
for(j = 0; j < k; ++j){
pred.vals[i+b][j] = out[j+b*k];
}
2013-12-07 21:38:50 +04:00
}
}
2014-07-14 09:07:51 +04:00
free(X);
2013-12-07 21:38:50 +04:00
return pred;
}
2013-12-03 04:41:40 +04:00
void print_network(network net)
{
int i,j;
for(i = 0; i < net.n; ++i){
2015-05-11 23:46:49 +03:00
layer l = net.layers[i];
float *output = l.output;
int n = l.outputs;
float mean = mean_array(output, n);
float vari = variance_array(output, n);
2013-12-06 01:17:16 +04:00
fprintf(stderr, "Layer %d - Mean: %f, Variance: %f\n",i,mean, vari);
2013-12-03 04:41:40 +04:00
if(n > 100) n = 100;
2013-12-06 01:17:16 +04:00
for(j = 0; j < n; ++j) fprintf(stderr, "%f, ", output[j]);
if(n == 100)fprintf(stderr,".....\n");
fprintf(stderr, "\n");
2013-12-03 04:41:40 +04:00
}
}
2013-12-07 21:38:50 +04:00
2014-12-18 22:28:42 +03:00
void compare_networks(network n1, network n2, data test)
{
matrix g1 = network_predict_data(n1, test);
matrix g2 = network_predict_data(n2, test);
int i;
int a,b,c,d;
a = b = c = d = 0;
for(i = 0; i < g1.rows; ++i){
int truth = max_index(test.y.vals[i], test.y.cols);
int p1 = max_index(g1.vals[i], g1.cols);
int p2 = max_index(g2.vals[i], g2.cols);
if(p1 == truth){
if(p2 == truth) ++d;
else ++c;
}else{
if(p2 == truth) ++b;
else ++a;
}
}
printf("%5d %5d\n%5d %5d\n", a, b, c, d);
2014-12-19 00:21:30 +03:00
float num = pow((abs(b - c) - 1.), 2.);
float den = b + c;
printf("%f\n", num/den);
2014-12-18 22:28:42 +03:00
}
float network_accuracy(network net, data d)
2013-12-07 01:26:09 +04:00
{
2013-12-07 21:38:50 +04:00
matrix guess = network_predict_data(net, d);
2014-12-12 00:15:26 +03:00
float acc = matrix_topk_accuracy(d.y, guess,1);
free_matrix(guess);
return acc;
}
2015-10-09 22:50:43 +03:00
float *network_accuracies(network net, data d, int n)
2014-12-12 00:15:26 +03:00
{
static float acc[2];
matrix guess = network_predict_data(net, d);
2015-10-09 22:50:43 +03:00
acc[0] = matrix_topk_accuracy(d.y, guess, 1);
acc[1] = matrix_topk_accuracy(d.y, guess, n);
2013-12-07 21:38:50 +04:00
free_matrix(guess);
return acc;
2013-12-07 01:26:09 +04:00
}
layer get_network_output_layer(network net)
{
int i;
for(i = net.n - 1; i >= 0; --i){
if(net.layers[i].type != COST) break;
}
return net.layers[i];
}
2014-08-11 23:52:07 +04:00
float network_accuracy_multi(network net, data d, int n)
{
matrix guess = network_predict_data_multi(net, d, n);
2014-12-12 00:15:26 +03:00
float acc = matrix_topk_accuracy(d.y, guess,1);
2014-08-11 23:52:07 +04:00
free_matrix(guess);
return acc;
}
2015-09-01 21:21:01 +03:00
void free_network(network net)
{
int i;
for(i = 0; i < net.n; ++i){
free_layer(net.layers[i]);
}
free(net.layers);
if(net.input) free(net.input);
if(net.truth) free(net.truth);
2016-08-06 01:27:07 +03:00
#ifdef GPU
if(net.input_gpu) cuda_free(net.input_gpu);
if(net.truth_gpu) cuda_free(net.truth_gpu);
2016-08-06 01:27:07 +03:00
#endif
2015-09-01 21:21:01 +03:00
}
// Some day...
layer network_output_layer(network net)
{
int i;
for(i = net.n - 1; i >= 0; --i){
if(net.layers[i].type != COST) break;
}
return net.layers[i];
}
int network_inputs(network net)
{
return net.layers[0].inputs;
}
int network_outputs(network net)
{
return network_output_layer(net).outputs;
}
float *network_output(network net)
{
return network_output_layer(net).output;
}