mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
Added grouped convolutional (depth-wise convolutional)
This commit is contained in:
@ -140,7 +140,7 @@ size_t get_workspace_size32(layer l){
|
||||
if (workspace_size < re_packed_input_size) workspace_size = re_packed_input_size;
|
||||
return workspace_size;
|
||||
}
|
||||
return (size_t)l.out_h*l.out_w*l.size*l.size*l.c*sizeof(float);
|
||||
return (size_t)l.out_h*l.out_w*l.size*l.size*(l.c / l.groups)*sizeof(float);
|
||||
}
|
||||
|
||||
size_t get_workspace_size16(layer l) {
|
||||
@ -231,9 +231,14 @@ void cudnn_convolutional_setup(layer *l, int cudnn_preference)
|
||||
// 3. FP32 Master Copy of Weights
|
||||
// More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops
|
||||
CHECK_CUDNN(cudnnSetConvolutionMathType(l->convDesc, CUDNN_TENSOR_OP_MATH));
|
||||
CHECK_CUDNN(cudnnSetConvolutionGroupCount(l->convDesc, l->groups));
|
||||
#if((CUDNN_MAJOR*10 + CUDNN_MINOR) >= 72) // cuDNN >= 7.2
|
||||
CHECK_CUDNN(cudnnSetConvolutionMathType(l->convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION));
|
||||
#endif
|
||||
#else //if(CUDNN_MAJOR >= 7)
|
||||
if (l->groups > 1) {
|
||||
error("CUDNN < 7 doesn't support groups, please upgrade!");
|
||||
}
|
||||
#endif
|
||||
|
||||
// INT8_CONFIG, INT8_EXT_CONFIG, INT8x4_CONFIG and INT8x4_EXT_CONFIG are only supported
|
||||
@ -243,23 +248,23 @@ void cudnn_convolutional_setup(layer *l, int cudnn_preference)
|
||||
// backward delta
|
||||
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->dsrcTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->c, l->h, l->w));
|
||||
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->ddstTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->out_c, l->out_h, l->out_w));
|
||||
CHECK_CUDNN(cudnnSetFilter4dDescriptor(l->dweightDesc, data_type, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size));
|
||||
CHECK_CUDNN(cudnnSetFilter4dDescriptor(l->dweightDesc, data_type, CUDNN_TENSOR_NCHW, l->n, l->c / l->groups, l->size, l->size));
|
||||
|
||||
// forward
|
||||
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->c, l->h, l->w));
|
||||
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->out_c, l->out_h, l->out_w));
|
||||
CHECK_CUDNN(cudnnSetFilter4dDescriptor(l->weightDesc, data_type, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size));
|
||||
CHECK_CUDNN(cudnnSetFilter4dDescriptor(l->weightDesc, data_type, CUDNN_TENSOR_NCHW, l->n, l->c / l->groups, l->size, l->size));
|
||||
|
||||
//#ifdef CUDNN_HALF
|
||||
// backward delta
|
||||
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->dsrcTensorDesc16, CUDNN_TENSOR_NCHW, CUDNN_DATA_HALF, l->batch, l->c, l->h, l->w));
|
||||
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->ddstTensorDesc16, CUDNN_TENSOR_NCHW, CUDNN_DATA_HALF, l->batch, l->out_c, l->out_h, l->out_w));
|
||||
CHECK_CUDNN(cudnnSetFilter4dDescriptor(l->dweightDesc16, CUDNN_DATA_HALF, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size));
|
||||
CHECK_CUDNN(cudnnSetFilter4dDescriptor(l->dweightDesc16, CUDNN_DATA_HALF, CUDNN_TENSOR_NCHW, l->n, l->c / l->groups, l->size, l->size));
|
||||
|
||||
// forward
|
||||
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->srcTensorDesc16, CUDNN_TENSOR_NCHW, CUDNN_DATA_HALF, l->batch, l->c, l->h, l->w));
|
||||
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->dstTensorDesc16, CUDNN_TENSOR_NCHW, CUDNN_DATA_HALF, l->batch, l->out_c, l->out_h, l->out_w));
|
||||
CHECK_CUDNN(cudnnSetFilter4dDescriptor(l->weightDesc16, CUDNN_DATA_HALF, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size));
|
||||
CHECK_CUDNN(cudnnSetFilter4dDescriptor(l->weightDesc16, CUDNN_DATA_HALF, CUDNN_TENSOR_NCHW, l->n, l->c / l->groups, l->size, l->size));
|
||||
|
||||
// batch norm
|
||||
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->normDstTensorDescF16, CUDNN_TENSOR_NCHW, CUDNN_DATA_HALF, l->batch, l->out_c, l->out_h, l->out_w));
|
||||
@ -326,17 +331,21 @@ void cudnn_convolutional_setup(layer *l, int cudnn_preference)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam, int use_bin_output, int index)
|
||||
convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam, int use_bin_output, int index)
|
||||
{
|
||||
int total_batch = batch*steps;
|
||||
int i;
|
||||
convolutional_layer l = { (LAYER_TYPE)0 };
|
||||
l.type = CONVOLUTIONAL;
|
||||
|
||||
if (xnor) groups = 1; // disable groups for XNOR-net
|
||||
if (groups < 1) groups = 1;
|
||||
|
||||
l.index = index;
|
||||
l.h = h;
|
||||
l.w = w;
|
||||
l.c = c;
|
||||
l.groups = groups;
|
||||
l.n = n;
|
||||
l.binary = binary;
|
||||
l.xnor = xnor;
|
||||
@ -348,17 +357,17 @@ convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w,
|
||||
l.pad = padding;
|
||||
l.batch_normalize = batch_normalize;
|
||||
l.learning_rate_scale = 1;
|
||||
l.nweights = l.c*l.n*l.size*l.size;
|
||||
l.nweights = (c / groups) * n * size * size;
|
||||
|
||||
l.weights = (float*)calloc(c * n * size * size, sizeof(float));
|
||||
l.weight_updates = (float*)calloc(c * n * size * size, sizeof(float));
|
||||
l.weights = (float*)calloc(l.nweights, sizeof(float));
|
||||
l.weight_updates = (float*)calloc(l.nweights, sizeof(float));
|
||||
|
||||
l.biases = (float*)calloc(n, sizeof(float));
|
||||
l.bias_updates = (float*)calloc(n, sizeof(float));
|
||||
|
||||
// float scale = 1./sqrt(size*size*c);
|
||||
float scale = sqrt(2./(size*size*c));
|
||||
for(i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1);
|
||||
float scale = sqrt(2./(size*size*c/groups));
|
||||
for(i = 0; i < l.nweights; ++i) l.weights[i] = scale*rand_uniform(-1, 1); // rand_normal();
|
||||
int out_h = convolutional_out_height(l);
|
||||
int out_w = convolutional_out_width(l);
|
||||
l.out_h = out_h;
|
||||
@ -375,12 +384,12 @@ convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w,
|
||||
l.backward = backward_convolutional_layer;
|
||||
l.update = update_convolutional_layer;
|
||||
if(binary){
|
||||
l.binary_weights = (float*)calloc(c * n * size * size, sizeof(float));
|
||||
l.cweights = (char*)calloc(c * n * size * size, sizeof(char));
|
||||
l.binary_weights = (float*)calloc(l.nweights, sizeof(float));
|
||||
l.cweights = (char*)calloc(l.nweights, sizeof(char));
|
||||
l.scales = (float*)calloc(n, sizeof(float));
|
||||
}
|
||||
if(xnor){
|
||||
l.binary_weights = (float*)calloc(c * n * size * size, sizeof(float));
|
||||
l.binary_weights = (float*)calloc(l.nweights, sizeof(float));
|
||||
l.binary_input = (float*)calloc(l.inputs * l.batch, sizeof(float));
|
||||
|
||||
int align = 32;// 8;
|
||||
@ -420,8 +429,8 @@ convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w,
|
||||
}
|
||||
if(adam){
|
||||
l.adam = 1;
|
||||
l.m = (float*)calloc(c * n * size * size, sizeof(float));
|
||||
l.v = (float*)calloc(c * n * size * size, sizeof(float));
|
||||
l.m = (float*)calloc(l.nweights, sizeof(float));
|
||||
l.v = (float*)calloc(l.nweights, sizeof(float));
|
||||
l.bias_m = (float*)calloc(n, sizeof(float));
|
||||
l.scale_m = (float*)calloc(n, sizeof(float));
|
||||
l.bias_v = (float*)calloc(n, sizeof(float));
|
||||
@ -435,19 +444,19 @@ convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w,
|
||||
|
||||
if(gpu_index >= 0){
|
||||
if (adam) {
|
||||
l.m_gpu = cuda_make_array(l.m, c*n*size*size);
|
||||
l.v_gpu = cuda_make_array(l.v, c*n*size*size);
|
||||
l.m_gpu = cuda_make_array(l.m, l.nweights);
|
||||
l.v_gpu = cuda_make_array(l.v, l.nweights);
|
||||
l.bias_m_gpu = cuda_make_array(l.bias_m, n);
|
||||
l.bias_v_gpu = cuda_make_array(l.bias_v, n);
|
||||
l.scale_m_gpu = cuda_make_array(l.scale_m, n);
|
||||
l.scale_v_gpu = cuda_make_array(l.scale_v, n);
|
||||
}
|
||||
|
||||
l.weights_gpu = cuda_make_array(l.weights, c*n*size*size);
|
||||
l.weight_updates_gpu = cuda_make_array(l.weight_updates, c*n*size*size);
|
||||
l.weights_gpu = cuda_make_array(l.weights, l.nweights);
|
||||
l.weight_updates_gpu = cuda_make_array(l.weight_updates, l.nweights);
|
||||
#ifdef CUDNN_HALF
|
||||
l.weights_gpu16 = cuda_make_array(NULL, c*n*size*size / 2 + 1); //cuda_make_array(l.weights, c*n*size*size / 2);
|
||||
l.weight_updates_gpu16 = cuda_make_array(NULL, c*n*size*size / 2 + 1); //cuda_make_array(l.weight_updates, c*n*size*size / 2);
|
||||
l.weights_gpu16 = cuda_make_array(NULL, l.nweights / 2 + 1);
|
||||
l.weight_updates_gpu16 = cuda_make_array(NULL, l.nweights / 2 + 1);
|
||||
#endif
|
||||
|
||||
l.biases_gpu = cuda_make_array(l.biases, n);
|
||||
@ -457,10 +466,10 @@ convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w,
|
||||
l.delta_gpu = cuda_make_array(l.delta, total_batch*out_h*out_w*n);
|
||||
|
||||
if(binary){
|
||||
l.binary_weights_gpu = cuda_make_array(l.weights, c*n*size*size);
|
||||
l.binary_weights_gpu = cuda_make_array(l.weights, l.nweights);
|
||||
}
|
||||
if(xnor){
|
||||
l.binary_weights_gpu = cuda_make_array(l.weights, c*n*size*size);
|
||||
l.binary_weights_gpu = cuda_make_array(l.weights, l.nweights);
|
||||
l.mean_arr_gpu = cuda_make_array(0, l.n);
|
||||
l.binary_input_gpu = cuda_make_array(0, l.inputs*l.batch);
|
||||
}
|
||||
@ -490,7 +499,7 @@ convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w,
|
||||
l.workspace_size = get_convolutional_workspace_size(l);
|
||||
|
||||
//fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
|
||||
l.bflops = (2.0 * l.n * l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.;
|
||||
l.bflops = (2.0 * l.nweights * l.out_h*l.out_w) / 1000000000.;
|
||||
if (l.xnor && l.use_bin_output) fprintf(stderr, "convXB");
|
||||
else if (l.xnor) fprintf(stderr, "convX ");
|
||||
else fprintf(stderr, "conv ");
|
||||
@ -504,8 +513,8 @@ void denormalize_convolutional_layer(convolutional_layer l)
|
||||
int i, j;
|
||||
for(i = 0; i < l.n; ++i){
|
||||
float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .00001);
|
||||
for(j = 0; j < l.c*l.size*l.size; ++j){
|
||||
l.weights[i*l.c*l.size*l.size + j] *= scale;
|
||||
for(j = 0; j < l.nweights; ++j){
|
||||
l.weights[i*l.nweights + j] *= scale;
|
||||
}
|
||||
l.biases[i] -= l.rolling_mean[i] * scale;
|
||||
l.scales[i] = 1;
|
||||
@ -516,7 +525,7 @@ void denormalize_convolutional_layer(convolutional_layer l)
|
||||
|
||||
void test_convolutional_layer()
|
||||
{
|
||||
convolutional_layer l = make_convolutional_layer(1, 1, 5, 5, 3, 2, 5, 2, 1, LEAKY, 1, 0, 0, 0, 0, 0);
|
||||
convolutional_layer l = make_convolutional_layer(1, 1, 5, 5, 3, 2, 1, 5, 2, 1, LEAKY, 1, 0, 0, 0, 0, 0);
|
||||
l.batch_normalize = 1;
|
||||
float data[] = {1,1,1,1,1,
|
||||
1,1,1,1,1,
|
||||
@ -691,8 +700,8 @@ void bit_to_float(unsigned char *src, float *dst, size_t size, size_t filters, f
|
||||
|
||||
void binary_align_weights(convolutional_layer *l)
|
||||
{
|
||||
int m = l->n;
|
||||
int k = l->size*l->size*l->c;
|
||||
int m = l->n; // (l->n / l->groups)
|
||||
int k = l->size*l->size*l->c; // ->size*l->size*(l->c / l->groups)
|
||||
size_t new_lda = k + (l->lda_align - k % l->lda_align); // (k / 8 + 1) * 8;
|
||||
l->new_lda = new_lda;
|
||||
|
||||
@ -823,13 +832,13 @@ void forward_convolutional_layer(convolutional_layer l, network_state state)
|
||||
{
|
||||
int out_h = convolutional_out_height(l);
|
||||
int out_w = convolutional_out_width(l);
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
fill_cpu(l.outputs*l.batch, 0, l.output, 1);
|
||||
|
||||
if (l.xnor && (!l.align_bit_weights || state.train)) {
|
||||
if (!l.align_bit_weights || state.train) {
|
||||
binarize_weights(l.weights, l.n, l.c*l.size*l.size, l.binary_weights);
|
||||
binarize_weights(l.weights, l.n, l.nweights, l.binary_weights);
|
||||
//printf("\n binarize_weights l.align_bit_weights = %p \n", l.align_bit_weights);
|
||||
}
|
||||
swap_binary(&l);
|
||||
@ -837,147 +846,150 @@ void forward_convolutional_layer(convolutional_layer l, network_state state)
|
||||
state.input = l.binary_input;
|
||||
}
|
||||
|
||||
int m = l.n;
|
||||
int k = l.size*l.size*l.c;
|
||||
int m = l.n / l.groups;
|
||||
int k = l.size*l.size*l.c / l.groups;
|
||||
int n = out_h*out_w;
|
||||
|
||||
float *a = l.weights;
|
||||
float *b = state.workspace;
|
||||
float *c = l.output;
|
||||
|
||||
static int u = 0;
|
||||
u++;
|
||||
|
||||
for(i = 0; i < l.batch; ++i){
|
||||
for (j = 0; j < l.groups; ++j) {
|
||||
|
||||
//gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
|
||||
//gemm_nn_custom(m, n, k, 1, a, k, b, n, c, n);
|
||||
if (l.xnor && l.align_bit_weights && !state.train)
|
||||
{
|
||||
memset(b, 0, l.bit_align*l.size*l.size*l.c * sizeof(float));
|
||||
float *a = l.weights + j*l.nweights / l.groups;
|
||||
float *b = state.workspace;
|
||||
float *c = l.output + (i*l.groups + j)*n*m;
|
||||
|
||||
if(l.c % 32 == 0)
|
||||
//gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
|
||||
//gemm_nn_custom(m, n, k, 1, a, k, b, n, c, n);
|
||||
if (l.xnor && l.align_bit_weights && !state.train)
|
||||
{
|
||||
//printf(" l.index = %d - new XNOR \n", l.index);
|
||||
memset(b, 0, l.bit_align*l.size*l.size*l.c * sizeof(float));
|
||||
|
||||
int ldb_align = l.lda_align;
|
||||
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
|
||||
//size_t t_intput_size = new_ldb * l.bit_align;// n;
|
||||
//size_t t_bit_input_size = t_intput_size / 8;// +1;
|
||||
|
||||
int re_packed_input_size = l.c * l.w * l.h;
|
||||
memset(state.workspace, 0, re_packed_input_size * sizeof(float));
|
||||
|
||||
const size_t new_c = l.c / 32;
|
||||
size_t in_re_packed_input_size = new_c * l.w * l.h + 1;
|
||||
memset(l.bin_re_packed_input, 0, in_re_packed_input_size * sizeof(uint32_t));
|
||||
|
||||
//float *re_packed_input = calloc(l.c * l.w * l.h, sizeof(float));
|
||||
//uint32_t *bin_re_packed_input = calloc(new_c * l.w * l.h + 1, sizeof(uint32_t));
|
||||
|
||||
// float32x4 by channel (as in cuDNN)
|
||||
repack_input(state.input, state.workspace, l.w, l.h, l.c);
|
||||
|
||||
// 32 x floats -> 1 x uint32_t
|
||||
float_to_bit(state.workspace, (unsigned char *)l.bin_re_packed_input, l.c * l.w * l.h);
|
||||
|
||||
//free(re_packed_input);
|
||||
|
||||
// slow - convolution the packed inputs and weights: float x 32 by channel (as in cuDNN)
|
||||
//convolution_repacked((uint32_t *)bin_re_packed_input, (uint32_t *)l.align_bit_weights, l.output,
|
||||
// l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr);
|
||||
|
||||
// // then exit from if()
|
||||
|
||||
|
||||
im2col_cpu_custom((float *)l.bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
|
||||
//im2col_cpu((float *)bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, b);
|
||||
|
||||
//free(bin_re_packed_input);
|
||||
|
||||
int new_k = l.size*l.size*l.c / 32;
|
||||
|
||||
// good for (l.c == 64)
|
||||
//gemm_nn_bin_32bit_packed(m, n, new_k, 1,
|
||||
// l.align_bit_weights, l.new_lda/32,
|
||||
// b, n,
|
||||
// c, n, l.mean_arr);
|
||||
|
||||
// // then exit from if()
|
||||
|
||||
transpose_uint32((uint32_t *)state.workspace, (uint32_t*)l.t_bit_input, new_k, n, n, new_ldb);
|
||||
|
||||
// the main GEMM function
|
||||
gemm_nn_custom_bin_mean_transposed(m, n, k, 1, (unsigned char*)l.align_bit_weights, new_ldb, (unsigned char*)l.t_bit_input, new_ldb, c, n, l.mean_arr);
|
||||
|
||||
// // alternative GEMM
|
||||
//gemm_nn_bin_transposed_32bit_packed(m, n, new_k, 1,
|
||||
// l.align_bit_weights, l.new_lda/32,
|
||||
// t_bit_input, new_ldb / 32,
|
||||
// c, n, l.mean_arr);
|
||||
|
||||
//free(t_bit_input);
|
||||
|
||||
}
|
||||
else
|
||||
{ // else (l.c % 32 != 0)
|
||||
|
||||
//--------------------------------------------------------
|
||||
//printf(" l.index = %d - old XNOR \n", l.index);
|
||||
|
||||
//im2col_cpu_custom_align(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b, l.bit_align);
|
||||
im2col_cpu_custom_bin(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align);
|
||||
|
||||
//size_t output_size = l.outputs;
|
||||
//float *count_output = calloc(output_size, sizeof(float));
|
||||
//size_t bit_output_size = output_size / 8 + 1;
|
||||
//char *bit_output = calloc(bit_output_size, sizeof(char));
|
||||
|
||||
//size_t intput_size = n * k; // (out_h*out_w) X (l.size*l.size*l.c) : after im2col()
|
||||
//size_t bit_input_size = intput_size / 8 + 1;
|
||||
//char *bit_input = calloc(bit_input_size, sizeof(char));
|
||||
|
||||
//size_t weights_size = k * m; //l.size*l.size*l.c*l.n;
|
||||
//size_t bit_weights_size = weights_size / 8 + 1;
|
||||
|
||||
//char *bit_weights = calloc(bit_weights_size, sizeof(char));
|
||||
//float *mean_arr = calloc(l.n, sizeof(float));
|
||||
|
||||
// transpose B from NxK to KxN (x-axis (ldb = l.size*l.size*l.c) - should be multiple of 8 bits)
|
||||
if (l.c % 32 == 0)
|
||||
{
|
||||
//size_t ldb_align = 256; // 256 bit for AVX2
|
||||
int ldb_align = l.lda_align;
|
||||
size_t new_ldb = k + (ldb_align - k%ldb_align);
|
||||
size_t t_intput_size = binary_transpose_align_input(k, n, state.workspace, &l.t_bit_input, ldb_align, l.bit_align);
|
||||
//printf(" l.index = %d - new XNOR \n", l.index);
|
||||
|
||||
// 5x times faster than gemm()-float32
|
||||
int ldb_align = l.lda_align;
|
||||
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
|
||||
//size_t t_intput_size = new_ldb * l.bit_align;// n;
|
||||
//size_t t_bit_input_size = t_intput_size / 8;// +1;
|
||||
|
||||
int re_packed_input_size = l.c * l.w * l.h;
|
||||
memset(state.workspace, 0, re_packed_input_size * sizeof(float));
|
||||
|
||||
const size_t new_c = l.c / 32;
|
||||
size_t in_re_packed_input_size = new_c * l.w * l.h + 1;
|
||||
memset(l.bin_re_packed_input, 0, in_re_packed_input_size * sizeof(uint32_t));
|
||||
|
||||
//float *re_packed_input = calloc(l.c * l.w * l.h, sizeof(float));
|
||||
//uint32_t *bin_re_packed_input = calloc(new_c * l.w * l.h + 1, sizeof(uint32_t));
|
||||
|
||||
// float32x4 by channel (as in cuDNN)
|
||||
repack_input(state.input, state.workspace, l.w, l.h, l.c);
|
||||
|
||||
// 32 x floats -> 1 x uint32_t
|
||||
float_to_bit(state.workspace, (unsigned char *)l.bin_re_packed_input, l.c * l.w * l.h);
|
||||
|
||||
//free(re_packed_input);
|
||||
|
||||
// slow - convolution the packed inputs and weights: float x 32 by channel (as in cuDNN)
|
||||
//convolution_repacked((uint32_t *)bin_re_packed_input, (uint32_t *)l.align_bit_weights, l.output,
|
||||
// l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr);
|
||||
|
||||
// // then exit from if()
|
||||
|
||||
|
||||
im2col_cpu_custom((float *)l.bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
|
||||
//im2col_cpu((float *)bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, b);
|
||||
|
||||
//free(bin_re_packed_input);
|
||||
|
||||
int new_k = l.size*l.size*l.c / 32;
|
||||
|
||||
// good for (l.c == 64)
|
||||
//gemm_nn_bin_32bit_packed(m, n, new_k, 1,
|
||||
// l.align_bit_weights, l.new_lda/32,
|
||||
// b, n,
|
||||
// c, n, l.mean_arr);
|
||||
|
||||
// // then exit from if()
|
||||
|
||||
transpose_uint32((uint32_t *)state.workspace, (uint32_t*)l.t_bit_input, new_k, n, n, new_ldb);
|
||||
|
||||
// the main GEMM function
|
||||
gemm_nn_custom_bin_mean_transposed(m, n, k, 1, (unsigned char*)l.align_bit_weights, new_ldb, (unsigned char*)l.t_bit_input, new_ldb, c, n, l.mean_arr);
|
||||
|
||||
//gemm_nn_custom_bin_mean_transposed(m, n, k, 1, bit_weights, k, t_bit_input, new_ldb, c, n, mean_arr);
|
||||
// // alternative GEMM
|
||||
//gemm_nn_bin_transposed_32bit_packed(m, n, new_k, 1,
|
||||
// l.align_bit_weights, l.new_lda/32,
|
||||
// t_bit_input, new_ldb / 32,
|
||||
// c, n, l.mean_arr);
|
||||
|
||||
//free(t_input);
|
||||
//free(t_bit_input);
|
||||
//}
|
||||
|
||||
}
|
||||
else
|
||||
{ // else (l.c % 32 != 0)
|
||||
|
||||
//--------------------------------------------------------
|
||||
//printf(" l.index = %d - old XNOR \n", l.index);
|
||||
|
||||
//im2col_cpu_custom_align(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b, l.bit_align);
|
||||
im2col_cpu_custom_bin(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align);
|
||||
|
||||
//size_t output_size = l.outputs;
|
||||
//float *count_output = calloc(output_size, sizeof(float));
|
||||
//size_t bit_output_size = output_size / 8 + 1;
|
||||
//char *bit_output = calloc(bit_output_size, sizeof(char));
|
||||
|
||||
//size_t intput_size = n * k; // (out_h*out_w) X (l.size*l.size*l.c) : after im2col()
|
||||
//size_t bit_input_size = intput_size / 8 + 1;
|
||||
//char *bit_input = calloc(bit_input_size, sizeof(char));
|
||||
|
||||
//size_t weights_size = k * m; //l.size*l.size*l.c*l.n;
|
||||
//size_t bit_weights_size = weights_size / 8 + 1;
|
||||
|
||||
//char *bit_weights = calloc(bit_weights_size, sizeof(char));
|
||||
//float *mean_arr = calloc(l.n, sizeof(float));
|
||||
|
||||
// transpose B from NxK to KxN (x-axis (ldb = l.size*l.size*l.c) - should be multiple of 8 bits)
|
||||
{
|
||||
//size_t ldb_align = 256; // 256 bit for AVX2
|
||||
int ldb_align = l.lda_align;
|
||||
size_t new_ldb = k + (ldb_align - k%ldb_align);
|
||||
size_t t_intput_size = binary_transpose_align_input(k, n, state.workspace, &l.t_bit_input, ldb_align, l.bit_align);
|
||||
|
||||
// 5x times faster than gemm()-float32
|
||||
gemm_nn_custom_bin_mean_transposed(m, n, k, 1, (unsigned char*)l.align_bit_weights, new_ldb, (unsigned char*)l.t_bit_input, new_ldb, c, n, l.mean_arr);
|
||||
|
||||
//gemm_nn_custom_bin_mean_transposed(m, n, k, 1, bit_weights, k, t_bit_input, new_ldb, c, n, mean_arr);
|
||||
|
||||
//free(t_input);
|
||||
//free(t_bit_input);
|
||||
//}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
add_bias(l.output, l.biases, l.batch, l.n, out_h*out_w);
|
||||
|
||||
//activate_array(l.output, m*n*l.batch, l.activation);
|
||||
activate_array_cpu_custom(l.output, m*n*l.batch, l.activation);
|
||||
return;
|
||||
|
||||
}
|
||||
else {
|
||||
//printf(" l.index = %d - FP32 \n", l.index);
|
||||
im2col_cpu(state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w,
|
||||
l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
|
||||
|
||||
add_bias(l.output, l.biases, l.batch, l.n, out_h*out_w);
|
||||
|
||||
//activate_array(l.output, m*n*l.batch, l.activation);
|
||||
activate_array_cpu_custom(l.output, m*n*l.batch, l.activation);
|
||||
return;
|
||||
|
||||
gemm(0, 0, m, n, k, 1, a, k, b, n, 1, c, n);
|
||||
// bit-count to float
|
||||
}
|
||||
c += n*m;
|
||||
state.input += l.c*l.h*l.w;
|
||||
}
|
||||
else {
|
||||
//printf(" l.index = %d - FP32 \n", l.index);
|
||||
im2col_cpu_custom(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b);
|
||||
|
||||
gemm(0, 0, m, n, k, 1, a, k, b, n, 1, c, n);
|
||||
// bit-count to float
|
||||
}
|
||||
c += n*m;
|
||||
state.input += l.c*l.h*l.w;
|
||||
}
|
||||
|
||||
if(l.batch_normalize){
|
||||
@ -986,63 +998,72 @@ void forward_convolutional_layer(convolutional_layer l, network_state state)
|
||||
add_bias(l.output, l.biases, l.batch, l.n, out_h*out_w);
|
||||
|
||||
//activate_array(l.output, m*n*l.batch, l.activation);
|
||||
activate_array_cpu_custom(l.output, m*n*l.batch, l.activation);
|
||||
activate_array_cpu_custom(l.output, l.outputs*l.batch, l.activation);
|
||||
|
||||
if(l.binary || l.xnor) swap_binary(&l);
|
||||
}
|
||||
|
||||
|
||||
void backward_convolutional_layer(convolutional_layer l, network_state state)
|
||||
{
|
||||
int i;
|
||||
int m = l.n;
|
||||
int n = l.size*l.size*l.c;
|
||||
int k = convolutional_out_height(l)*
|
||||
convolutional_out_width(l);
|
||||
int i, j;
|
||||
int m = l.n / l.groups;
|
||||
int n = l.size*l.size*l.c / l.groups;
|
||||
int k = l.out_w*l.out_h;
|
||||
|
||||
gradient_array(l.output, m*k*l.batch, l.activation, l.delta);
|
||||
backward_bias(l.bias_updates, l.delta, l.batch, l.n, k);
|
||||
gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
|
||||
|
||||
if(l.batch_normalize){
|
||||
if (l.batch_normalize) {
|
||||
backward_batchnorm_layer(l, state);
|
||||
}
|
||||
else {
|
||||
backward_bias(l.bias_updates, l.delta, l.batch, l.n, k);
|
||||
}
|
||||
|
||||
for(i = 0; i < l.batch; ++i){
|
||||
float *a = l.delta + i*m*k;
|
||||
float *b = state.workspace;
|
||||
float *c = l.weight_updates;
|
||||
for (i = 0; i < l.batch; ++i) {
|
||||
for (j = 0; j < l.groups; ++j) {
|
||||
float *a = l.delta + (i*l.groups + j)*m*k;
|
||||
float *b = state.workspace;
|
||||
float *c = l.weight_updates + j*l.nweights / l.groups;
|
||||
|
||||
float *im = state.input+i*l.c*l.h*l.w;
|
||||
float *im = state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w;
|
||||
|
||||
im2col_cpu(im, l.c, l.h, l.w,
|
||||
im2col_cpu(im, l.c / l.groups, l.h, l.w,
|
||||
l.size, l.stride, l.pad, b);
|
||||
gemm(0,1,m,n,k,1,a,k,b,k,1,c,n);
|
||||
gemm(0, 1, m, n, k, 1, a, k, b, k, 1, c, n);
|
||||
|
||||
if(state.delta){
|
||||
a = l.weights;
|
||||
b = l.delta + i*m*k;
|
||||
c = state.workspace;
|
||||
if (state.delta) {
|
||||
a = l.weights + j*l.nweights / l.groups;
|
||||
b = l.delta + (i*l.groups + j)*m*k;
|
||||
c = state.workspace;
|
||||
|
||||
gemm(1,0,n,k,m,1,a,n,b,k,0,c,k);
|
||||
gemm(1, 0, n, k, m, 1, a, n, b, k, 0, c, k);
|
||||
|
||||
col2im_cpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta+i*l.c*l.h*l.w);
|
||||
col2im_cpu(state.workspace, l.c / l.groups, l.h, l.w, l.size, l.stride,
|
||||
l.pad, state.delta + (i*l.groups + j)*l.c / l.groups*l.h*l.w);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void update_convolutional_layer(convolutional_layer l, int batch, float learning_rate, float momentum, float decay)
|
||||
void update_convolutional_layer(convolutional_layer l, update_args a)
|
||||
{
|
||||
int size = l.size*l.size*l.c*l.n;
|
||||
axpy_cpu(l.n, learning_rate/batch, l.bias_updates, 1, l.biases, 1);
|
||||
float learning_rate = a.learning_rate*l.learning_rate_scale;
|
||||
float momentum = a.momentum;
|
||||
float decay = a.decay;
|
||||
int batch = a.batch;
|
||||
|
||||
axpy_cpu(l.n, learning_rate / batch, l.bias_updates, 1, l.biases, 1);
|
||||
scal_cpu(l.n, momentum, l.bias_updates, 1);
|
||||
|
||||
if(l.scales){
|
||||
axpy_cpu(l.n, learning_rate/batch, l.scale_updates, 1, l.scales, 1);
|
||||
if (l.scales) {
|
||||
axpy_cpu(l.n, learning_rate / batch, l.scale_updates, 1, l.scales, 1);
|
||||
scal_cpu(l.n, momentum, l.scale_updates, 1);
|
||||
}
|
||||
|
||||
axpy_cpu(size, -decay*batch, l.weights, 1, l.weight_updates, 1);
|
||||
axpy_cpu(size, learning_rate/batch, l.weight_updates, 1, l.weights, 1);
|
||||
scal_cpu(size, momentum, l.weight_updates, 1);
|
||||
axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1);
|
||||
axpy_cpu(l.nweights, learning_rate / batch, l.weight_updates, 1, l.weights, 1);
|
||||
scal_cpu(l.nweights, momentum, l.weight_updates, 1);
|
||||
}
|
||||
|
||||
|
||||
@ -1050,14 +1071,14 @@ image get_convolutional_weight(convolutional_layer l, int i)
|
||||
{
|
||||
int h = l.size;
|
||||
int w = l.size;
|
||||
int c = l.c;
|
||||
return float_to_image(w,h,c,l.weights+i*h*w*c);
|
||||
int c = l.c / l.groups;
|
||||
return float_to_image(w, h, c, l.weights + i*h*w*c);
|
||||
}
|
||||
|
||||
void rgbgr_weights(convolutional_layer l)
|
||||
{
|
||||
int i;
|
||||
for(i = 0; i < l.n; ++i){
|
||||
for (i = 0; i < l.n; ++i) {
|
||||
image im = get_convolutional_weight(l, i);
|
||||
if (im.c == 3) {
|
||||
rgbgr_image(im);
|
||||
@ -1068,7 +1089,7 @@ void rgbgr_weights(convolutional_layer l)
|
||||
void rescale_weights(convolutional_layer l, float scale, float trans)
|
||||
{
|
||||
int i;
|
||||
for(i = 0; i < l.n; ++i){
|
||||
for (i = 0; i < l.n; ++i) {
|
||||
image im = get_convolutional_weight(l, i);
|
||||
if (im.c == 3) {
|
||||
scale_image(im, scale);
|
||||
@ -1080,12 +1101,18 @@ void rescale_weights(convolutional_layer l, float scale, float trans)
|
||||
|
||||
image *get_weights(convolutional_layer l)
|
||||
{
|
||||
image* weights = (image*)calloc(l.n, sizeof(image));
|
||||
image *weights = calloc(l.n, sizeof(image));
|
||||
int i;
|
||||
for(i = 0; i < l.n; ++i){
|
||||
for (i = 0; i < l.n; ++i) {
|
||||
weights[i] = copy_image(get_convolutional_weight(l, i));
|
||||
//normalize_image(weights[i]);
|
||||
normalize_image(weights[i]);
|
||||
/*
|
||||
char buff[256];
|
||||
sprintf(buff, "filter%d", i);
|
||||
save_image(weights[i], buff);
|
||||
*/
|
||||
}
|
||||
//error("hey");
|
||||
return weights;
|
||||
}
|
||||
|
||||
@ -1102,4 +1129,4 @@ image *visualize_convolutional_layer(convolutional_layer l, char *window, image
|
||||
//save_image(dc, buff);
|
||||
free_image(dc);
|
||||
return single_weights;
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user