2013-12-03 04:41:40 +04:00
|
|
|
#include "softmax_layer.h"
|
2014-08-08 23:04:15 +04:00
|
|
|
#include "mini_blas.h"
|
2014-10-22 01:49:18 +04:00
|
|
|
#include <float.h>
|
2013-12-03 04:41:40 +04:00
|
|
|
#include <math.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
|
2014-03-13 08:57:34 +04:00
|
|
|
softmax_layer *make_softmax_layer(int batch, int inputs)
|
2013-12-03 04:41:40 +04:00
|
|
|
{
|
2013-12-06 01:17:16 +04:00
|
|
|
fprintf(stderr, "Softmax Layer: %d inputs\n", inputs);
|
2013-12-03 04:41:40 +04:00
|
|
|
softmax_layer *layer = calloc(1, sizeof(softmax_layer));
|
2014-03-13 08:57:34 +04:00
|
|
|
layer->batch = batch;
|
2013-12-03 04:41:40 +04:00
|
|
|
layer->inputs = inputs;
|
2014-03-13 08:57:34 +04:00
|
|
|
layer->output = calloc(inputs*batch, sizeof(float));
|
|
|
|
layer->delta = calloc(inputs*batch, sizeof(float));
|
2014-08-08 23:04:15 +04:00
|
|
|
layer->jacobian = calloc(inputs*inputs*batch, sizeof(float));
|
2014-10-22 01:49:18 +04:00
|
|
|
#ifdef GPU
|
|
|
|
layer->output_cl = cl_make_array(layer->output, inputs*batch);
|
|
|
|
layer->delta_cl = cl_make_array(layer->delta, inputs*batch);
|
|
|
|
#endif
|
2013-12-03 04:41:40 +04:00
|
|
|
return layer;
|
|
|
|
}
|
|
|
|
|
2014-01-29 04:28:42 +04:00
|
|
|
void forward_softmax_layer(const softmax_layer layer, float *input)
|
|
|
|
{
|
2014-03-13 08:57:34 +04:00
|
|
|
int i,b;
|
|
|
|
for(b = 0; b < layer.batch; ++b){
|
|
|
|
float sum = 0;
|
2014-10-22 01:49:18 +04:00
|
|
|
float largest = -FLT_MAX;
|
2014-03-13 08:57:34 +04:00
|
|
|
for(i = 0; i < layer.inputs; ++i){
|
|
|
|
if(input[i+b*layer.inputs] > largest) largest = input[i+b*layer.inputs];
|
|
|
|
}
|
|
|
|
for(i = 0; i < layer.inputs; ++i){
|
|
|
|
sum += exp(input[i+b*layer.inputs]-largest);
|
|
|
|
}
|
|
|
|
if(sum) sum = largest+log(sum);
|
|
|
|
else sum = largest-100;
|
|
|
|
for(i = 0; i < layer.inputs; ++i){
|
|
|
|
layer.output[i+b*layer.inputs] = exp(input[i+b*layer.inputs]-sum);
|
|
|
|
}
|
2014-01-29 04:28:42 +04:00
|
|
|
}
|
|
|
|
}
|
2013-12-03 04:41:40 +04:00
|
|
|
|
2014-10-22 01:49:18 +04:00
|
|
|
void backward_softmax_layer(const softmax_layer layer, float *delta)
|
2013-12-03 04:41:40 +04:00
|
|
|
{
|
|
|
|
int i;
|
2014-03-13 08:57:34 +04:00
|
|
|
for(i = 0; i < layer.inputs*layer.batch; ++i){
|
2013-12-03 04:41:40 +04:00
|
|
|
delta[i] = layer.delta[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-22 01:49:18 +04:00
|
|
|
#ifdef GPU
|
2014-11-06 01:49:58 +03:00
|
|
|
|
|
|
|
void pull_softmax_layer_output(const softmax_layer layer)
|
|
|
|
{
|
|
|
|
cl_read_array(layer.output_cl, layer.output, layer.inputs*layer.batch);
|
|
|
|
}
|
|
|
|
|
2014-10-22 01:49:18 +04:00
|
|
|
cl_kernel get_softmax_forward_kernel()
|
|
|
|
{
|
|
|
|
static int init = 0;
|
|
|
|
static cl_kernel kernel;
|
|
|
|
if(!init){
|
|
|
|
kernel = get_kernel("src/softmax_layer.cl", "forward", 0);
|
|
|
|
init = 1;
|
|
|
|
}
|
|
|
|
return kernel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void forward_softmax_layer_gpu(const softmax_layer layer, cl_mem input)
|
|
|
|
{
|
|
|
|
cl_kernel kernel = get_softmax_forward_kernel();
|
|
|
|
cl_command_queue queue = cl.queue;
|
|
|
|
|
|
|
|
cl_uint i = 0;
|
|
|
|
cl.error = clSetKernelArg(kernel, i++, sizeof(layer.inputs), (void*) &layer.inputs);
|
|
|
|
cl.error = clSetKernelArg(kernel, i++, sizeof(input), (void*) &input);
|
|
|
|
cl.error = clSetKernelArg(kernel, i++, sizeof(layer.output_cl), (void*) &layer.output_cl);
|
|
|
|
check_error(cl);
|
|
|
|
|
|
|
|
const size_t global_size[] = {layer.batch};
|
|
|
|
|
2014-11-19 00:51:04 +03:00
|
|
|
cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0);
|
2014-10-22 01:49:18 +04:00
|
|
|
check_error(cl);
|
2014-11-06 01:49:58 +03:00
|
|
|
|
2014-11-19 00:51:04 +03:00
|
|
|
/*
|
2014-11-06 01:49:58 +03:00
|
|
|
cl_read_array(layer.output_cl, layer.output, layer.inputs*layer.batch);
|
|
|
|
int z;
|
|
|
|
for(z = 0; z < layer.inputs*layer.batch; ++z) printf("%f,",layer.output[z]);
|
|
|
|
*/
|
2014-10-22 01:49:18 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void backward_softmax_layer_gpu(const softmax_layer layer, cl_mem delta)
|
|
|
|
{
|
|
|
|
copy_ongpu(layer.batch*layer.inputs, layer.delta_cl, 1, delta, 1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* This is if you want softmax w/o log-loss classification. You probably don't.
|
|
|
|
int i,j,b;
|
|
|
|
for(b = 0; b < layer.batch; ++b){
|
|
|
|
for(i = 0; i < layer.inputs; ++i){
|
|
|
|
for(j = 0; j < layer.inputs; ++j){
|
|
|
|
int d = (i==j);
|
|
|
|
layer.jacobian[b*layer.inputs*layer.inputs + i*layer.inputs + j] =
|
|
|
|
layer.output[b*layer.inputs + i] * (d - layer.output[b*layer.inputs + j]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for(b = 0; b < layer.batch; ++b){
|
|
|
|
int M = layer.inputs;
|
|
|
|
int N = 1;
|
|
|
|
int K = layer.inputs;
|
|
|
|
float *A = layer.jacobian + b*layer.inputs*layer.inputs;
|
|
|
|
float *B = layer.delta + b*layer.inputs;
|
|
|
|
float *C = delta + b*layer.inputs;
|
|
|
|
gemm(0,0,M,N,K,1,A,K,B,N,0,C,N);
|
|
|
|
}
|
|
|
|
*/
|