mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
improve compatibility with c++ compilers, prepare for CMake
This commit is contained in:
@ -54,7 +54,7 @@ connected_layer make_connected_layer(int batch, int steps, int inputs, int outpu
|
||||
{
|
||||
int total_batch = batch*steps;
|
||||
int i;
|
||||
connected_layer l = {0};
|
||||
connected_layer l = { (LAYER_TYPE)0 };
|
||||
l.type = CONNECTED;
|
||||
|
||||
l.inputs = inputs;
|
||||
@ -74,14 +74,14 @@ connected_layer make_connected_layer(int batch, int steps, int inputs, int outpu
|
||||
l.activation = activation;
|
||||
l.learning_rate_scale = 1;
|
||||
|
||||
l.output = calloc(total_batch*outputs, sizeof(float));
|
||||
l.delta = calloc(total_batch*outputs, sizeof(float));
|
||||
l.output = (float*)calloc(total_batch * outputs, sizeof(float));
|
||||
l.delta = (float*)calloc(total_batch * outputs, sizeof(float));
|
||||
|
||||
l.weight_updates = calloc(inputs*outputs, sizeof(float));
|
||||
l.bias_updates = calloc(outputs, sizeof(float));
|
||||
l.weight_updates = (float*)calloc(inputs * outputs, sizeof(float));
|
||||
l.bias_updates = (float*)calloc(outputs, sizeof(float));
|
||||
|
||||
l.weights = calloc(outputs*inputs, sizeof(float));
|
||||
l.biases = calloc(outputs, sizeof(float));
|
||||
l.weights = (float*)calloc(outputs * inputs, sizeof(float));
|
||||
l.biases = (float*)calloc(outputs, sizeof(float));
|
||||
|
||||
l.forward = forward_connected_layer;
|
||||
l.backward = backward_connected_layer;
|
||||
@ -98,22 +98,22 @@ connected_layer make_connected_layer(int batch, int steps, int inputs, int outpu
|
||||
}
|
||||
|
||||
if(batch_normalize){
|
||||
l.scales = calloc(outputs, sizeof(float));
|
||||
l.scale_updates = calloc(outputs, sizeof(float));
|
||||
l.scales = (float*)calloc(outputs, sizeof(float));
|
||||
l.scale_updates = (float*)calloc(outputs, sizeof(float));
|
||||
for(i = 0; i < outputs; ++i){
|
||||
l.scales[i] = 1;
|
||||
}
|
||||
|
||||
l.mean = calloc(outputs, sizeof(float));
|
||||
l.mean_delta = calloc(outputs, sizeof(float));
|
||||
l.variance = calloc(outputs, sizeof(float));
|
||||
l.variance_delta = calloc(outputs, sizeof(float));
|
||||
l.mean = (float*)calloc(outputs, sizeof(float));
|
||||
l.mean_delta = (float*)calloc(outputs, sizeof(float));
|
||||
l.variance = (float*)calloc(outputs, sizeof(float));
|
||||
l.variance_delta = (float*)calloc(outputs, sizeof(float));
|
||||
|
||||
l.rolling_mean = calloc(outputs, sizeof(float));
|
||||
l.rolling_variance = calloc(outputs, sizeof(float));
|
||||
l.rolling_mean = (float*)calloc(outputs, sizeof(float));
|
||||
l.rolling_variance = (float*)calloc(outputs, sizeof(float));
|
||||
|
||||
l.x = calloc(total_batch*outputs, sizeof(float));
|
||||
l.x_norm = calloc(total_batch*outputs, sizeof(float));
|
||||
l.x = (float*)calloc(total_batch * outputs, sizeof(float));
|
||||
l.x_norm = (float*)calloc(total_batch * outputs, sizeof(float));
|
||||
}
|
||||
|
||||
#ifdef GPU
|
||||
|
Reference in New Issue
Block a user