mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
Minor fix
This commit is contained in:
@ -64,65 +64,65 @@ layer make_conv_lstm_layer(int batch, int h, int w, int c, int output_filters, i
|
|||||||
l.peephole = peephole;
|
l.peephole = peephole;
|
||||||
|
|
||||||
// U
|
// U
|
||||||
l.uf = (layer*)malloc(sizeof(layer));
|
l.uf = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.uf) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.uf) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.uf->batch = batch;
|
l.uf->batch = batch;
|
||||||
if (l.workspace_size < l.uf->workspace_size) l.workspace_size = l.uf->workspace_size;
|
if (l.workspace_size < l.uf->workspace_size) l.workspace_size = l.uf->workspace_size;
|
||||||
|
|
||||||
l.ui = (layer*)malloc(sizeof(layer));
|
l.ui = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.ui) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.ui) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.ui->batch = batch;
|
l.ui->batch = batch;
|
||||||
if (l.workspace_size < l.ui->workspace_size) l.workspace_size = l.ui->workspace_size;
|
if (l.workspace_size < l.ui->workspace_size) l.workspace_size = l.ui->workspace_size;
|
||||||
|
|
||||||
l.ug = (layer*)malloc(sizeof(layer));
|
l.ug = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.ug) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.ug) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.ug->batch = batch;
|
l.ug->batch = batch;
|
||||||
if (l.workspace_size < l.ug->workspace_size) l.workspace_size = l.ug->workspace_size;
|
if (l.workspace_size < l.ug->workspace_size) l.workspace_size = l.ug->workspace_size;
|
||||||
|
|
||||||
l.uo = (layer*)malloc(sizeof(layer));
|
l.uo = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.uo) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.uo) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.uo->batch = batch;
|
l.uo->batch = batch;
|
||||||
if (l.workspace_size < l.uo->workspace_size) l.workspace_size = l.uo->workspace_size;
|
if (l.workspace_size < l.uo->workspace_size) l.workspace_size = l.uo->workspace_size;
|
||||||
|
|
||||||
|
|
||||||
// W
|
// W
|
||||||
l.wf = (layer*)malloc(sizeof(layer));
|
l.wf = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.wf) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.wf) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.wf->batch = batch;
|
l.wf->batch = batch;
|
||||||
if (l.workspace_size < l.wf->workspace_size) l.workspace_size = l.wf->workspace_size;
|
if (l.workspace_size < l.wf->workspace_size) l.workspace_size = l.wf->workspace_size;
|
||||||
|
|
||||||
l.wi = (layer*)malloc(sizeof(layer));
|
l.wi = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.wi) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.wi) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.wi->batch = batch;
|
l.wi->batch = batch;
|
||||||
if (l.workspace_size < l.wi->workspace_size) l.workspace_size = l.wi->workspace_size;
|
if (l.workspace_size < l.wi->workspace_size) l.workspace_size = l.wi->workspace_size;
|
||||||
|
|
||||||
l.wg = (layer*)malloc(sizeof(layer));
|
l.wg = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.wg) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.wg) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.wg->batch = batch;
|
l.wg->batch = batch;
|
||||||
if (l.workspace_size < l.wg->workspace_size) l.workspace_size = l.wg->workspace_size;
|
if (l.workspace_size < l.wg->workspace_size) l.workspace_size = l.wg->workspace_size;
|
||||||
|
|
||||||
l.wo = (layer*)malloc(sizeof(layer));
|
l.wo = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.wo) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.wo) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.wo->batch = batch;
|
l.wo->batch = batch;
|
||||||
if (l.workspace_size < l.wo->workspace_size) l.workspace_size = l.wo->workspace_size;
|
if (l.workspace_size < l.wo->workspace_size) l.workspace_size = l.wo->workspace_size;
|
||||||
|
|
||||||
|
|
||||||
// V
|
// V
|
||||||
l.vf = (layer*)malloc(sizeof(layer));
|
l.vf = (layer*)calloc(1, sizeof(layer));
|
||||||
if (l.peephole) {
|
if (l.peephole) {
|
||||||
*(l.vf) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.vf) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.vf->batch = batch;
|
l.vf->batch = batch;
|
||||||
if (l.workspace_size < l.vf->workspace_size) l.workspace_size = l.vf->workspace_size;
|
if (l.workspace_size < l.vf->workspace_size) l.workspace_size = l.vf->workspace_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
l.vi = (layer*)malloc(sizeof(layer));
|
l.vi = (layer*)calloc(1, sizeof(layer));
|
||||||
if (l.peephole) {
|
if (l.peephole) {
|
||||||
*(l.vi) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.vi) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.vi->batch = batch;
|
l.vi->batch = batch;
|
||||||
if (l.workspace_size < l.vi->workspace_size) l.workspace_size = l.vi->workspace_size;
|
if (l.workspace_size < l.vi->workspace_size) l.workspace_size = l.vi->workspace_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
l.vo = (layer*)malloc(sizeof(layer));
|
l.vo = (layer*)calloc(1, sizeof(layer));
|
||||||
if (l.peephole) {
|
if (l.peephole) {
|
||||||
*(l.vo) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.vo) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.vo->batch = batch;
|
l.vo->batch = batch;
|
||||||
@ -909,7 +909,7 @@ void forward_conv_lstm_layer_gpu(layer l, network_state state)
|
|||||||
fix_nan_and_inf(l.h_gpu, l.outputs*l.batch);
|
fix_nan_and_inf(l.h_gpu, l.outputs*l.batch);
|
||||||
|
|
||||||
copy_ongpu(l.outputs*l.batch, l.c_gpu, 1, l.cell_gpu, 1);
|
copy_ongpu(l.outputs*l.batch, l.c_gpu, 1, l.cell_gpu, 1);
|
||||||
copy_ongpu(l.outputs*l.batch, l.h_gpu, 1, l.output_gpu, 1); // required for both Detection and Training
|
copy_ongpu(l.outputs*l.batch, l.h_gpu, 1, l.output_gpu, 1); // is required for both Detection and Training
|
||||||
|
|
||||||
state.input += l.inputs*l.batch;
|
state.input += l.inputs*l.batch;
|
||||||
l.output_gpu += l.outputs*l.batch;
|
l.output_gpu += l.outputs*l.batch;
|
||||||
|
@ -48,17 +48,17 @@ layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int ou
|
|||||||
|
|
||||||
l.state = (float*)calloc(l.hidden * l.batch * (l.steps + 1), sizeof(float));
|
l.state = (float*)calloc(l.hidden * l.batch * (l.steps + 1), sizeof(float));
|
||||||
|
|
||||||
l.input_layer = (layer*)malloc(sizeof(layer));
|
l.input_layer = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.input_layer) = make_convolutional_layer(batch, steps, h, w, c, hidden_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.input_layer) = make_convolutional_layer(batch, steps, h, w, c, hidden_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.input_layer->batch = batch;
|
l.input_layer->batch = batch;
|
||||||
if (l.workspace_size < l.input_layer->workspace_size) l.workspace_size = l.input_layer->workspace_size;
|
if (l.workspace_size < l.input_layer->workspace_size) l.workspace_size = l.input_layer->workspace_size;
|
||||||
|
|
||||||
l.self_layer = (layer*)malloc(sizeof(layer));
|
l.self_layer = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.self_layer) = make_convolutional_layer(batch, steps, h, w, hidden_filters, hidden_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.self_layer) = make_convolutional_layer(batch, steps, h, w, hidden_filters, hidden_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.self_layer->batch = batch;
|
l.self_layer->batch = batch;
|
||||||
if (l.workspace_size < l.self_layer->workspace_size) l.workspace_size = l.self_layer->workspace_size;
|
if (l.workspace_size < l.self_layer->workspace_size) l.workspace_size = l.self_layer->workspace_size;
|
||||||
|
|
||||||
l.output_layer = (layer*)malloc(sizeof(layer));
|
l.output_layer = (layer*)calloc(1, sizeof(layer));
|
||||||
*(l.output_layer) = make_convolutional_layer(batch, steps, h, w, hidden_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
*(l.output_layer) = make_convolutional_layer(batch, steps, h, w, hidden_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0);
|
||||||
l.output_layer->batch = batch;
|
l.output_layer->batch = batch;
|
||||||
if (l.workspace_size < l.output_layer->workspace_size) l.workspace_size = l.output_layer->workspace_size;
|
if (l.workspace_size < l.output_layer->workspace_size) l.workspace_size = l.output_layer->workspace_size;
|
||||||
|
@ -39,49 +39,49 @@ layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_n
|
|||||||
l.out_h = 1;
|
l.out_h = 1;
|
||||||
l.out_c = outputs;
|
l.out_c = outputs;
|
||||||
|
|
||||||
l.uf = (layer*)malloc(sizeof(layer));
|
l.uf = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.uf) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize);
|
*(l.uf) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize);
|
||||||
l.uf->batch = batch;
|
l.uf->batch = batch;
|
||||||
if (l.workspace_size < l.uf->workspace_size) l.workspace_size = l.uf->workspace_size;
|
if (l.workspace_size < l.uf->workspace_size) l.workspace_size = l.uf->workspace_size;
|
||||||
|
|
||||||
l.ui = (layer*)malloc(sizeof(layer));
|
l.ui = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.ui) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize);
|
*(l.ui) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize);
|
||||||
l.ui->batch = batch;
|
l.ui->batch = batch;
|
||||||
if (l.workspace_size < l.ui->workspace_size) l.workspace_size = l.ui->workspace_size;
|
if (l.workspace_size < l.ui->workspace_size) l.workspace_size = l.ui->workspace_size;
|
||||||
|
|
||||||
l.ug = (layer*)malloc(sizeof(layer));
|
l.ug = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.ug) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize);
|
*(l.ug) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize);
|
||||||
l.ug->batch = batch;
|
l.ug->batch = batch;
|
||||||
if (l.workspace_size < l.ug->workspace_size) l.workspace_size = l.ug->workspace_size;
|
if (l.workspace_size < l.ug->workspace_size) l.workspace_size = l.ug->workspace_size;
|
||||||
|
|
||||||
l.uo = (layer*)malloc(sizeof(layer));
|
l.uo = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.uo) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize);
|
*(l.uo) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize);
|
||||||
l.uo->batch = batch;
|
l.uo->batch = batch;
|
||||||
if (l.workspace_size < l.uo->workspace_size) l.workspace_size = l.uo->workspace_size;
|
if (l.workspace_size < l.uo->workspace_size) l.workspace_size = l.uo->workspace_size;
|
||||||
|
|
||||||
l.wf = (layer*)malloc(sizeof(layer));
|
l.wf = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.wf) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize);
|
*(l.wf) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize);
|
||||||
l.wf->batch = batch;
|
l.wf->batch = batch;
|
||||||
if (l.workspace_size < l.wf->workspace_size) l.workspace_size = l.wf->workspace_size;
|
if (l.workspace_size < l.wf->workspace_size) l.workspace_size = l.wf->workspace_size;
|
||||||
|
|
||||||
l.wi = (layer*)malloc(sizeof(layer));
|
l.wi = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.wi) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize);
|
*(l.wi) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize);
|
||||||
l.wi->batch = batch;
|
l.wi->batch = batch;
|
||||||
if (l.workspace_size < l.wi->workspace_size) l.workspace_size = l.wi->workspace_size;
|
if (l.workspace_size < l.wi->workspace_size) l.workspace_size = l.wi->workspace_size;
|
||||||
|
|
||||||
l.wg = (layer*)malloc(sizeof(layer));
|
l.wg = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.wg) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize);
|
*(l.wg) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize);
|
||||||
l.wg->batch = batch;
|
l.wg->batch = batch;
|
||||||
if (l.workspace_size < l.wg->workspace_size) l.workspace_size = l.wg->workspace_size;
|
if (l.workspace_size < l.wg->workspace_size) l.workspace_size = l.wg->workspace_size;
|
||||||
|
|
||||||
l.wo = (layer*)malloc(sizeof(layer));
|
l.wo = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.wo) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize);
|
*(l.wo) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize);
|
||||||
l.wo->batch = batch;
|
l.wo->batch = batch;
|
||||||
|
@ -258,7 +258,7 @@ layer parse_conv_lstm(list *options, size_params params)
|
|||||||
ACTIVATION activation = get_activation(activation_s);
|
ACTIVATION activation = get_activation(activation_s);
|
||||||
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
|
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
|
||||||
int xnor = option_find_int_quiet(options, "xnor", 0);
|
int xnor = option_find_int_quiet(options, "xnor", 0);
|
||||||
int peephole = option_find_int_quiet(options, "peephole", 1);
|
int peephole = option_find_int_quiet(options, "peephole", 0);
|
||||||
|
|
||||||
layer l = make_conv_lstm_layer(params.batch, params.h, params.w, params.c, output_filters, groups, params.time_steps, size, stride, padding, activation, batch_normalize, peephole, xnor);
|
layer l = make_conv_lstm_layer(params.batch, params.h, params.w, params.c, output_filters, groups, params.time_steps, size, stride, padding, activation, batch_normalize, peephole, xnor);
|
||||||
|
|
||||||
|
@ -42,19 +42,19 @@ layer make_rnn_layer(int batch, int inputs, int hidden, int outputs, int steps,
|
|||||||
|
|
||||||
l.state = (float*)calloc(batch * hidden * (steps + 1), sizeof(float));
|
l.state = (float*)calloc(batch * hidden * (steps + 1), sizeof(float));
|
||||||
|
|
||||||
l.input_layer = (layer*)malloc(sizeof(layer));
|
l.input_layer = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.input_layer) = make_connected_layer(batch, steps, inputs, hidden, activation, batch_normalize);
|
*(l.input_layer) = make_connected_layer(batch, steps, inputs, hidden, activation, batch_normalize);
|
||||||
l.input_layer->batch = batch;
|
l.input_layer->batch = batch;
|
||||||
if (l.workspace_size < l.input_layer->workspace_size) l.workspace_size = l.input_layer->workspace_size;
|
if (l.workspace_size < l.input_layer->workspace_size) l.workspace_size = l.input_layer->workspace_size;
|
||||||
|
|
||||||
l.self_layer = (layer*)malloc(sizeof(layer));
|
l.self_layer = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.self_layer) = make_connected_layer(batch, steps, hidden, hidden, (log==2)?LOGGY:(log==1?LOGISTIC:activation), batch_normalize);
|
*(l.self_layer) = make_connected_layer(batch, steps, hidden, hidden, (log==2)?LOGGY:(log==1?LOGISTIC:activation), batch_normalize);
|
||||||
l.self_layer->batch = batch;
|
l.self_layer->batch = batch;
|
||||||
if (l.workspace_size < l.self_layer->workspace_size) l.workspace_size = l.self_layer->workspace_size;
|
if (l.workspace_size < l.self_layer->workspace_size) l.workspace_size = l.self_layer->workspace_size;
|
||||||
|
|
||||||
l.output_layer = (layer*)malloc(sizeof(layer));
|
l.output_layer = (layer*)calloc(1, sizeof(layer));
|
||||||
fprintf(stderr, "\t\t");
|
fprintf(stderr, "\t\t");
|
||||||
*(l.output_layer) = make_connected_layer(batch, steps, hidden, outputs, activation, batch_normalize);
|
*(l.output_layer) = make_connected_layer(batch, steps, hidden, outputs, activation, batch_normalize);
|
||||||
l.output_layer->batch = batch;
|
l.output_layer->batch = batch;
|
||||||
|
Reference in New Issue
Block a user