diff --git a/src/conv_lstm_layer.c b/src/conv_lstm_layer.c index b51a3aed..12426ab6 100644 --- a/src/conv_lstm_layer.c +++ b/src/conv_lstm_layer.c @@ -64,65 +64,65 @@ layer make_conv_lstm_layer(int batch, int h, int w, int c, int output_filters, i l.peephole = peephole; // U - l.uf = (layer*)malloc(sizeof(layer)); + l.uf = (layer*)calloc(1, sizeof(layer)); *(l.uf) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.uf->batch = batch; if (l.workspace_size < l.uf->workspace_size) l.workspace_size = l.uf->workspace_size; - l.ui = (layer*)malloc(sizeof(layer)); + l.ui = (layer*)calloc(1, sizeof(layer)); *(l.ui) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.ui->batch = batch; if (l.workspace_size < l.ui->workspace_size) l.workspace_size = l.ui->workspace_size; - l.ug = (layer*)malloc(sizeof(layer)); + l.ug = (layer*)calloc(1, sizeof(layer)); *(l.ug) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.ug->batch = batch; if (l.workspace_size < l.ug->workspace_size) l.workspace_size = l.ug->workspace_size; - l.uo = (layer*)malloc(sizeof(layer)); + l.uo = (layer*)calloc(1, sizeof(layer)); *(l.uo) = make_convolutional_layer(batch, steps, h, w, c, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.uo->batch = batch; if (l.workspace_size < l.uo->workspace_size) l.workspace_size = l.uo->workspace_size; // W - l.wf = (layer*)malloc(sizeof(layer)); + l.wf = (layer*)calloc(1, sizeof(layer)); *(l.wf) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.wf->batch = batch; if (l.workspace_size < l.wf->workspace_size) l.workspace_size = l.wf->workspace_size; - l.wi = (layer*)malloc(sizeof(layer)); + l.wi = (layer*)calloc(1, sizeof(layer)); *(l.wi) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.wi->batch = batch; if (l.workspace_size < l.wi->workspace_size) l.workspace_size = l.wi->workspace_size; - l.wg = (layer*)malloc(sizeof(layer)); + l.wg = (layer*)calloc(1, sizeof(layer)); *(l.wg) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.wg->batch = batch; if (l.workspace_size < l.wg->workspace_size) l.workspace_size = l.wg->workspace_size; - l.wo = (layer*)malloc(sizeof(layer)); + l.wo = (layer*)calloc(1, sizeof(layer)); *(l.wo) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.wo->batch = batch; if (l.workspace_size < l.wo->workspace_size) l.workspace_size = l.wo->workspace_size; // V - l.vf = (layer*)malloc(sizeof(layer)); + l.vf = (layer*)calloc(1, sizeof(layer)); if (l.peephole) { *(l.vf) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.vf->batch = batch; if (l.workspace_size < l.vf->workspace_size) l.workspace_size = l.vf->workspace_size; } - l.vi = (layer*)malloc(sizeof(layer)); + l.vi = (layer*)calloc(1, sizeof(layer)); if (l.peephole) { *(l.vi) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.vi->batch = batch; if (l.workspace_size < l.vi->workspace_size) l.workspace_size = l.vi->workspace_size; } - l.vo = (layer*)malloc(sizeof(layer)); + l.vo = (layer*)calloc(1, sizeof(layer)); if (l.peephole) { *(l.vo) = make_convolutional_layer(batch, steps, h, w, output_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.vo->batch = batch; @@ -909,7 +909,7 @@ void forward_conv_lstm_layer_gpu(layer l, network_state state) fix_nan_and_inf(l.h_gpu, l.outputs*l.batch); copy_ongpu(l.outputs*l.batch, l.c_gpu, 1, l.cell_gpu, 1); - copy_ongpu(l.outputs*l.batch, l.h_gpu, 1, l.output_gpu, 1); // required for both Detection and Training + copy_ongpu(l.outputs*l.batch, l.h_gpu, 1, l.output_gpu, 1); // is required for both Detection and Training state.input += l.inputs*l.batch; l.output_gpu += l.outputs*l.batch; diff --git a/src/crnn_layer.c b/src/crnn_layer.c index f466508e..8534c69a 100644 --- a/src/crnn_layer.c +++ b/src/crnn_layer.c @@ -48,17 +48,17 @@ layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int ou l.state = (float*)calloc(l.hidden * l.batch * (l.steps + 1), sizeof(float)); - l.input_layer = (layer*)malloc(sizeof(layer)); + l.input_layer = (layer*)calloc(1, sizeof(layer)); *(l.input_layer) = make_convolutional_layer(batch, steps, h, w, c, hidden_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.input_layer->batch = batch; if (l.workspace_size < l.input_layer->workspace_size) l.workspace_size = l.input_layer->workspace_size; - l.self_layer = (layer*)malloc(sizeof(layer)); + l.self_layer = (layer*)calloc(1, sizeof(layer)); *(l.self_layer) = make_convolutional_layer(batch, steps, h, w, hidden_filters, hidden_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.self_layer->batch = batch; if (l.workspace_size < l.self_layer->workspace_size) l.workspace_size = l.self_layer->workspace_size; - l.output_layer = (layer*)malloc(sizeof(layer)); + l.output_layer = (layer*)calloc(1, sizeof(layer)); *(l.output_layer) = make_convolutional_layer(batch, steps, h, w, hidden_filters, output_filters, groups, size, stride, pad, activation, batch_normalize, 0, xnor, 0, 0, 0); l.output_layer->batch = batch; if (l.workspace_size < l.output_layer->workspace_size) l.workspace_size = l.output_layer->workspace_size; diff --git a/src/lstm_layer.c b/src/lstm_layer.c index f7b79477..94664ce3 100644 --- a/src/lstm_layer.c +++ b/src/lstm_layer.c @@ -39,49 +39,49 @@ layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_n l.out_h = 1; l.out_c = outputs; - l.uf = (layer*)malloc(sizeof(layer)); + l.uf = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.uf) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize); l.uf->batch = batch; if (l.workspace_size < l.uf->workspace_size) l.workspace_size = l.uf->workspace_size; - l.ui = (layer*)malloc(sizeof(layer)); + l.ui = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.ui) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize); l.ui->batch = batch; if (l.workspace_size < l.ui->workspace_size) l.workspace_size = l.ui->workspace_size; - l.ug = (layer*)malloc(sizeof(layer)); + l.ug = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.ug) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize); l.ug->batch = batch; if (l.workspace_size < l.ug->workspace_size) l.workspace_size = l.ug->workspace_size; - l.uo = (layer*)malloc(sizeof(layer)); + l.uo = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.uo) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize); l.uo->batch = batch; if (l.workspace_size < l.uo->workspace_size) l.workspace_size = l.uo->workspace_size; - l.wf = (layer*)malloc(sizeof(layer)); + l.wf = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.wf) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize); l.wf->batch = batch; if (l.workspace_size < l.wf->workspace_size) l.workspace_size = l.wf->workspace_size; - l.wi = (layer*)malloc(sizeof(layer)); + l.wi = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.wi) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize); l.wi->batch = batch; if (l.workspace_size < l.wi->workspace_size) l.workspace_size = l.wi->workspace_size; - l.wg = (layer*)malloc(sizeof(layer)); + l.wg = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.wg) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize); l.wg->batch = batch; if (l.workspace_size < l.wg->workspace_size) l.workspace_size = l.wg->workspace_size; - l.wo = (layer*)malloc(sizeof(layer)); + l.wo = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.wo) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize); l.wo->batch = batch; diff --git a/src/parser.c b/src/parser.c index 95c92250..63a22319 100644 --- a/src/parser.c +++ b/src/parser.c @@ -258,7 +258,7 @@ layer parse_conv_lstm(list *options, size_params params) ACTIVATION activation = get_activation(activation_s); int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0); int xnor = option_find_int_quiet(options, "xnor", 0); - int peephole = option_find_int_quiet(options, "peephole", 1); + int peephole = option_find_int_quiet(options, "peephole", 0); layer l = make_conv_lstm_layer(params.batch, params.h, params.w, params.c, output_filters, groups, params.time_steps, size, stride, padding, activation, batch_normalize, peephole, xnor); diff --git a/src/rnn_layer.c b/src/rnn_layer.c index 28163d75..4b5b9c2c 100644 --- a/src/rnn_layer.c +++ b/src/rnn_layer.c @@ -42,19 +42,19 @@ layer make_rnn_layer(int batch, int inputs, int hidden, int outputs, int steps, l.state = (float*)calloc(batch * hidden * (steps + 1), sizeof(float)); - l.input_layer = (layer*)malloc(sizeof(layer)); + l.input_layer = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.input_layer) = make_connected_layer(batch, steps, inputs, hidden, activation, batch_normalize); l.input_layer->batch = batch; if (l.workspace_size < l.input_layer->workspace_size) l.workspace_size = l.input_layer->workspace_size; - l.self_layer = (layer*)malloc(sizeof(layer)); + l.self_layer = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.self_layer) = make_connected_layer(batch, steps, hidden, hidden, (log==2)?LOGGY:(log==1?LOGISTIC:activation), batch_normalize); l.self_layer->batch = batch; if (l.workspace_size < l.self_layer->workspace_size) l.workspace_size = l.self_layer->workspace_size; - l.output_layer = (layer*)malloc(sizeof(layer)); + l.output_layer = (layer*)calloc(1, sizeof(layer)); fprintf(stderr, "\t\t"); *(l.output_layer) = make_connected_layer(batch, steps, hidden, outputs, activation, batch_normalize); l.output_layer->batch = batch;