Stable place to commit

This commit is contained in:
Joseph Redmon
2015-02-04 12:41:20 -08:00
parent 0f1a31648c
commit bfffadc755
9 changed files with 72 additions and 22 deletions

View File

@ -43,6 +43,7 @@ connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVA
for(i = 0; i < outputs; ++i){
layer->biases[i] = scale;
// layer->biases[i] = 1;
}
#ifdef GPU
@ -113,9 +114,10 @@ void forward_connected_layer(connected_layer layer, float *input)
void backward_connected_layer(connected_layer layer, float *input, float *delta)
{
int i;
float alpha = 1./layer.batch;
gradient_array(layer.output, layer.outputs*layer.batch, layer.activation, layer.delta);
for(i = 0; i < layer.batch; ++i){
axpy_cpu(layer.outputs, 1, layer.delta + i*layer.outputs, 1, layer.bias_updates, 1);
axpy_cpu(layer.outputs, alpha, layer.delta + i*layer.outputs, 1, layer.bias_updates, 1);
}
int m = layer.inputs;
int k = layer.batch;
@ -123,7 +125,7 @@ void backward_connected_layer(connected_layer layer, float *input, float *delta)
float *a = input;
float *b = layer.delta;
float *c = layer.weight_updates;
gemm(1,0,m,n,k,1,a,m,b,n,1,c,n);
gemm(1,0,m,n,k,alpha,a,m,b,n,1,c,n);
m = layer.batch;
k = layer.outputs;
@ -156,13 +158,18 @@ void push_connected_layer(connected_layer layer)
void update_connected_layer_gpu(connected_layer layer)
{
/*
cuda_pull_array(layer.weights_gpu, layer.weights, layer.inputs*layer.outputs);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.inputs*layer.outputs);
printf("Weights: %f updates: %f\n", mag_array(layer.weights, layer.inputs*layer.outputs), layer.learning_rate*mag_array(layer.weight_updates, layer.inputs*layer.outputs));
*/
axpy_ongpu(layer.outputs, layer.learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.outputs, layer.momentum, layer.bias_updates_gpu, 1);
axpy_ongpu(layer.inputs*layer.outputs, -layer.decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(layer.inputs*layer.outputs, layer.learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(layer.inputs*layer.outputs, layer.momentum, layer.weight_updates_gpu, 1);
//pull_connected_layer(layer);
}
void forward_connected_layer_gpu(connected_layer layer, float * input)
@ -183,10 +190,11 @@ void forward_connected_layer_gpu(connected_layer layer, float * input)
void backward_connected_layer_gpu(connected_layer layer, float * input, float * delta)
{
float alpha = 1./layer.batch;
int i;
gradient_array_ongpu(layer.output_gpu, layer.outputs*layer.batch, layer.activation, layer.delta_gpu);
for(i = 0; i < layer.batch; ++i){
axpy_ongpu_offset(layer.outputs, 1, layer.delta_gpu, i*layer.outputs, 1, layer.bias_updates_gpu, 0, 1);
axpy_ongpu_offset(layer.outputs, alpha, layer.delta_gpu, i*layer.outputs, 1, layer.bias_updates_gpu, 0, 1);
}
int m = layer.inputs;
int k = layer.batch;
@ -194,7 +202,7 @@ void backward_connected_layer_gpu(connected_layer layer, float * input, float *
float * a = input;
float * b = layer.delta_gpu;
float * c = layer.weight_updates_gpu;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,1,c,n);
gemm_ongpu(1,0,m,n,k,alpha,a,m,b,n,1,c,n);
m = layer.batch;
k = layer.outputs;