mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
136 lines
3.4 KiB
C
136 lines
3.4 KiB
C
#include "blas.h"
|
|
#include "math.h"
|
|
#include <assert.h>
|
|
|
|
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
|
|
{
|
|
int stride = w1/w2;
|
|
int sample = w2/w1;
|
|
assert(stride == h1/h2);
|
|
assert(sample == h2/h1);
|
|
if(stride < 1) stride = 1;
|
|
if(sample < 1) sample = 1;
|
|
int minw = (w1 < w2) ? w1 : w2;
|
|
int minh = (h1 < h2) ? h1 : h2;
|
|
int minc = (c1 < c2) ? c1 : c2;
|
|
|
|
int i,j,k,b;
|
|
for(b = 0; b < batch; ++b){
|
|
for(k = 0; k < minc; ++k){
|
|
for(j = 0; j < minh; ++j){
|
|
for(i = 0; i < minw; ++i){
|
|
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
|
|
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
|
|
out[out_index] += add[add_index];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void mean_cpu(float *x, int batch, int filters, int spatial, float *mean)
|
|
{
|
|
float scale = 1./(batch * spatial);
|
|
int i,j,k;
|
|
for(i = 0; i < filters; ++i){
|
|
mean[i] = 0;
|
|
for(j = 0; j < batch; ++j){
|
|
for(k = 0; k < spatial; ++k){
|
|
int index = j*filters*spatial + i*spatial + k;
|
|
mean[i] += x[index];
|
|
}
|
|
}
|
|
mean[i] *= scale;
|
|
}
|
|
}
|
|
|
|
void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
|
|
{
|
|
float scale = 1./(batch * spatial);
|
|
int i,j,k;
|
|
for(i = 0; i < filters; ++i){
|
|
variance[i] = 0;
|
|
for(j = 0; j < batch; ++j){
|
|
for(k = 0; k < spatial; ++k){
|
|
int index = j*filters*spatial + i*spatial + k;
|
|
variance[i] += pow((x[index] - mean[i]), 2);
|
|
}
|
|
}
|
|
variance[i] *= scale;
|
|
}
|
|
}
|
|
|
|
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
|
|
{
|
|
int b, f, i;
|
|
for(b = 0; b < batch; ++b){
|
|
for(f = 0; f < filters; ++f){
|
|
for(i = 0; i < spatial; ++i){
|
|
int index = b*filters*spatial + f*spatial + i;
|
|
x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .00001f);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void const_cpu(int N, float ALPHA, float *X, int INCX)
|
|
{
|
|
int i;
|
|
for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
|
|
}
|
|
|
|
void mul_cpu(int N, float *X, int INCX, float *Y, int INCY)
|
|
{
|
|
int i;
|
|
for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX];
|
|
}
|
|
|
|
void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
|
|
{
|
|
int i;
|
|
for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA);
|
|
}
|
|
|
|
void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
|
|
{
|
|
int i;
|
|
for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX];
|
|
}
|
|
|
|
void scal_cpu(int N, float ALPHA, float *X, int INCX)
|
|
{
|
|
int i;
|
|
for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA;
|
|
}
|
|
|
|
void fill_cpu(int N, float ALPHA, float *X, int INCX)
|
|
{
|
|
int i;
|
|
for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
|
|
}
|
|
|
|
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
|
|
{
|
|
int i;
|
|
for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
|
|
}
|
|
|
|
void smooth_l1_cpu(int n, float *pred, float *truth, float *delta)
|
|
{
|
|
int i;
|
|
for(i = 0; i < n; ++i){
|
|
float diff = truth[i] - pred[i];
|
|
if(fabs(diff) > 1) delta[i] = diff;
|
|
else delta[i] = (diff > 0) ? 1 : -1;
|
|
}
|
|
}
|
|
|
|
float dot_cpu(int N, float *X, int INCX, float *Y, int INCY)
|
|
{
|
|
int i;
|
|
float dot = 0;
|
|
for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY];
|
|
return dot;
|
|
}
|
|
|