Added antialiasing=1 param for [maxpool]-layer on GPU and CPU

This commit is contained in:
AlexeyAB
2019-09-02 15:25:42 +03:00
parent 80ceee4fca
commit 9e26472b1a
5 changed files with 126 additions and 38 deletions

View File

@ -1141,7 +1141,6 @@ void forward_convolutional_layer(convolutional_layer l, network_state state)
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
s.input = l.output;
forward_convolutional_layer(*(l.input_layer), s);
//simple_copy_ongpu(l.outputs*l.batch, l.output, l.input_antialiasing);

View File

@ -1,4 +1,5 @@
#include "maxpool_layer.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "gemm.h"
#include <stdio.h>
@ -45,10 +46,18 @@ void cudnn_maxpool_setup(layer *l)
}
maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels)
maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels, int antialiasing)
{
maxpool_layer l = { (LAYER_TYPE)0 };
l.type = MAXPOOL;
const int blur_stride_x = stride_x;
const int blur_stride_y = stride_y;
l.antialiasing = antialiasing;
if (antialiasing) {
stride_x = stride_y = l.stride = l.stride_x = l.stride_y = 1; // use stride=1 in host-layer
}
l.batch = batch;
l.h = h;
l.w = w;
@ -94,6 +103,46 @@ maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int s
else
fprintf(stderr, "max %d x %d/%2dx%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, stride_y, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
if (l.antialiasing) {
printf("AA: ");
l.input_layer = (layer*)calloc(1, sizeof(layer));
const int blur_size = 3;
*(l.input_layer) = make_convolutional_layer(batch, 1, l.out_h, l.out_w, l.out_c, l.out_c, l.out_c, blur_size, blur_stride_x, blur_stride_y, 1, blur_size / 2, LINEAR, 0, 0, 0, 0, 0, 1, 0, NULL);
const int blur_nweights = l.out_c * blur_size * blur_size; // (n / n) * n * blur_size * blur_size;
int i;
for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) {
/*
l.input_layer->weights[i + 0] = 0;
l.input_layer->weights[i + 1] = 0;
l.input_layer->weights[i + 2] = 0;
l.input_layer->weights[i + 3] = 0;
l.input_layer->weights[i + 4] = 1;
l.input_layer->weights[i + 5] = 0;
l.input_layer->weights[i + 6] = 0;
l.input_layer->weights[i + 7] = 0;
l.input_layer->weights[i + 8] = 0;
*/
l.input_layer->weights[i + 0] = 1 / 16.f;
l.input_layer->weights[i + 1] = 2 / 16.f;
l.input_layer->weights[i + 2] = 1 / 16.f;
l.input_layer->weights[i + 3] = 2 / 16.f;
l.input_layer->weights[i + 4] = 4 / 16.f;
l.input_layer->weights[i + 5] = 2 / 16.f;
l.input_layer->weights[i + 6] = 1 / 16.f;
l.input_layer->weights[i + 7] = 2 / 16.f;
l.input_layer->weights[i + 8] = 1 / 16.f;
}
for (i = 0; i < l.out_c; ++i) l.input_layer->biases[i] = 0;
#ifdef GPU
l.input_antialiasing_gpu = cuda_make_array(NULL, l.batch*l.outputs);
push_convolutional_layer(*(l.input_layer));
#endif // GPU
}
return l;
}
@ -159,8 +208,8 @@ void forward_maxpool_layer(const maxpool_layer l, network_state state)
if (!state.train && l.stride_x == l.stride_y) {
forward_maxpool_layer_avx(state.input, l.output, l.indexes, l.size, l.w, l.h, l.out_w, l.out_h, l.c, l.pad, l.stride, l.batch);
return;
}
else {
int b, i, j, k, m, n;
int w_offset = -l.pad / 2;
@ -197,6 +246,18 @@ void forward_maxpool_layer(const maxpool_layer l, network_state state)
}
}
if (l.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.input = l.output;
forward_convolutional_layer(*(l.input_layer), s);
//simple_copy_ongpu(l.outputs*l.batch, l.output, l.input_antialiasing);
memcpy(l.output, l.input_layer->output, l.input_layer->outputs * l.input_layer->batch * sizeof(float));
}
}
void backward_maxpool_layer(const maxpool_layer l, network_state state)
{
int i;

View File

@ -12,7 +12,7 @@ typedef layer maxpool_layer;
extern "C" {
#endif
image get_maxpool_image(maxpool_layer l);
maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels);
maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels, int antialiasing);
void resize_maxpool_layer(maxpool_layer *l, int w, int h);
void forward_maxpool_layer(const maxpool_layer l, network_state state);
void backward_maxpool_layer(const maxpool_layer l, network_state state);

View File

@ -3,6 +3,8 @@
#include <cublas_v2.h>
#include "maxpool_layer.h"
#include "convolutional_layer.h"
#include "blas.h"
#include "dark_cuda.h"
__global__ void forward_maxpool_depth_layer_kernel(int n, int w, int h, int c, int out_c, int batch, float *input, float *output, int *indexes)
@ -163,10 +165,10 @@ extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state sta
//cudnnDestroyTensorDescriptor(layer.srcTensorDesc);
//cudnnDestroyTensorDescriptor(layer.dstTensorDesc);
return;
}
else
#endif
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.out_c;
@ -177,8 +179,33 @@ extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state sta
CHECK_CUDA(cudaPeekAtLastError());
}
if (layer.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
s.input = layer.output_gpu;
forward_convolutional_layer_gpu(*(layer.input_layer), s);
simple_copy_ongpu(layer.outputs*layer.batch, layer.output_gpu, layer.input_antialiasing_gpu);
simple_copy_ongpu(layer.input_layer->outputs*layer.input_layer->batch, layer.input_layer->output_gpu, layer.output_gpu);
}
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
if (layer.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.delta = layer.delta_gpu;
s.input = layer.input_antialiasing_gpu;
//if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
simple_copy_ongpu(layer.input_layer->outputs*layer.input_layer->batch, layer.delta_gpu, layer.input_layer->delta_gpu);
backward_convolutional_layer_gpu(*(layer.input_layer), s);
}
if (layer.maxpool_depth) {
int h = layer.out_h;
int w = layer.out_w;

View File

@ -545,6 +545,7 @@ maxpool_layer parse_maxpool(list *options, size_params params)
int padding = option_find_int_quiet(options, "padding", size-1);
int maxpool_depth = option_find_int_quiet(options, "maxpool_depth", 0);
int out_channels = option_find_int_quiet(options, "out_channels", 1);
int antialiasing = option_find_int_quiet(options, "antialiasing", 0);
int batch,h,w,c;
h = params.h;
@ -553,7 +554,7 @@ maxpool_layer parse_maxpool(list *options, size_params params)
batch=params.batch;
if(!(h && w && c)) error("Layer before maxpool layer must output image.");
maxpool_layer layer = make_maxpool_layer(batch, h, w, c, size, stride_x, stride_y, padding, maxpool_depth, out_channels);
maxpool_layer layer = make_maxpool_layer(batch, h, w, c, size, stride_x, stride_y, padding, maxpool_depth, out_channels, antialiasing);
return layer;
}