Temporary Slow implementation of XNOR on CUDA (shared_memory)

This commit is contained in:
AlexeyAB
2018-09-01 00:03:25 +03:00
parent c4a9e3422e
commit 007878393f
6 changed files with 187 additions and 101 deletions

View File

@ -110,7 +110,7 @@ half *cuda_make_f16_from_f32_array(float *src, size_t n)
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
@ -123,106 +123,52 @@ void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
//cudaDeviceSynchronize();
if (l.align_bit_weights_gpu && !state.train)
{
cudaError_t status;
//status = cudaMemcpy(l.align_bit_weights, l.align_bit_weights_gpu, l.align_bit_weights_size, cudaMemcpyDeviceToHost);
//check_error(status);
//float *input = (float *)calloc(l.c*l.h*l.w*l.batch, sizeof(float));
//float *workspace = (float *)calloc(l.bit_align*l.size*l.size*l.c, sizeof(float));
//float *output = (float *)calloc(l.batch*l.out_c*l.out_h*l.out_w, sizeof(float));
//status = cudaMemcpy(input, state.input, l.c*l.h*l.w*l.batch*sizeof(float), cudaMemcpyDeviceToHost);
//check_error(status);
cudaError_t status = cudaSuccess;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
float * a = l.weights_gpu;
//float * b = state.workspace;
//float *b = workspace;
//float * c = l.output_gpu;
//float *c = output;
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
size_t t_intput_size = new_ldb * n;
size_t t_bit_input_size = t_intput_size / 8;// +1;
//char *t_bit_input = (char *)calloc(t_bit_input_size, sizeof(char));
//int src_size = k * l.bit_align;
//im2col_cpu_custom_bin(input, l.c, l.h, l.w, l.size, l.stride, l.pad, b, l.bit_align);
//float *align_workspace = NULL;
//int align_workspace_size = l.bit_align * k; // aligned: n*k
//status = cudaMalloc((void **)&align_workspace, align_workspace_size*sizeof(float));
//check_error(status);
int i = 0;
im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align);
float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size);
if(1)
{
fill_int8_gpu((unsigned char *)l.align_workspace_gpu, 0, t_bit_input_size);
transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.align_workspace_gpu, k, n, l.bit_align, new_ldb, 8);
//cudaDeviceSynchronize();
//int size_transposed_array = l.bit_align * new_ldb;
//status = cudaMemcpy(t_bit_input, l.align_workspace_gpu, t_bit_input_size, cudaMemcpyDeviceToHost);
//check_error(status);
}
/*
if (0) {
status = cudaMemcpy(b, state.workspace, l.align_workspace_size / 8, cudaMemcpyDeviceToHost);
check_error(status);
//float *im2 = (float *)calloc(l.align_workspace_size, sizeof(float));
//status = cudaMemcpy(im2, l.align_workspace_gpu, l.align_workspace_size * sizeof(float), cudaMemcpyDeviceToHost);
//check_error(status);
//float_to_bit(im2, (unsigned char *)b, l.align_workspace_size);
memset(t_bit_input, 0, t_bit_input_size);
// b - [bit_align, k] - [l.bit_align, l.size*l.size*l.c] = src_size
// t_input - [bit_align, k] - [n', k]
// t_bit_input - [new_ldb, n] - [k', n]
transpose_bin((char *)b, t_bit_input, k, n, l.bit_align, new_ldb, 8);
}
*/
//status = cudaMemcpy(l.align_bit_weights, l.align_bit_weights_gpu, new_ldb * m / 8, cudaMemcpyDeviceToHost);
//check_error(status);
//status = cudaMemcpy(l.mean_arr, l.mean_arr_gpu, l.n * sizeof(float), cudaMemcpyDeviceToHost);
//check_error(status);
// 5x times faster than gemm()-float32
//gemm_nn_custom_bin_mean_transposed(m, n, k, 1, (unsigned char *)l.align_bit_weights, new_ldb, (unsigned char *)t_bit_input, new_ldb, c, n, l.mean_arr);
//status = cudaMemcpy(l.output_gpu, output, l.batch*l.out_c*l.out_h*l.out_w * sizeof(float), cudaMemcpyHostToDevice);
//check_error(status);
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, 1,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu);
//cudaDeviceSynchronize();
//free(t_bit_input);
//free(input);
//free(workspace);
//free(output);
//cudaFree(align_workspace);
// should be optimized
float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size);
//cudaDeviceSynchronize();
//im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align);
transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8);
//cudaDeviceSynchronize();
// should be optimized
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu);
//cudaDeviceSynchronize();
//check_error(status);
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
if (l.binary || l.xnor) swap_binary(&l);
//cudaDeviceSynchronize();
return;
}
}
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
#ifdef CUDNN
float one = 1; // alpha[0], beta[0] is float for HALF and FLOAT
float alpha = 1, beta = 0;