transpose 32x32 on GPU

This commit is contained in:
AlexeyAB
2018-10-19 22:55:25 +03:00
parent 9e2c894a32
commit d487bdf471
3 changed files with 134 additions and 14 deletions

View File

@ -160,6 +160,9 @@ void cudnn_convolutional_setup(layer *l, int cudnn_preference)
// 3. FP32 Master Copy of Weights
// More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops
cudnnSetConvolutionMathType(l->convDesc, CUDNN_TENSOR_OP_MATH);
#if((CUDNN_MAJOR*10 + CUDNN_MINOR) >= 72) // cuDNN >= 7.2
cudnnSetConvolutionMathType(l->convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION);
#endif
#endif
// INT8_CONFIG, INT8_EXT_CONFIG, INT8x4_CONFIG and INT8x4_EXT_CONFIG are only supported

View File

@ -378,8 +378,10 @@ void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, in
{
unsigned A_tmp[32];
int i;
#pragma unroll
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
@ -390,7 +392,6 @@ void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb)
for (y = 0; y < 8; ++y) {
for (x = 0; x < 8; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y;
//B[x * ldb] = 1;
}
}
}

View File

@ -647,7 +647,13 @@ __device__ __host__ unsigned char reverse_byte_2(unsigned char a)
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ __host__ void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B)
__device__ unsigned char reverse_byte_CUDA(unsigned char a)
{
uint32_t tmp = __brev(a);
return tmp >> 24;
}
__device__ void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n)
{
unsigned x, y, t;
@ -665,8 +671,8 @@ __device__ __host__ void transpose8rS32_reversed_diagonale(unsigned char* A, int
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x);
B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y);
B[7 * n] = reverse_byte_CUDA(x >> 24); B[6 * n] = reverse_byte_CUDA(x >> 16); B[5 * n] = reverse_byte_CUDA(x >> 8); B[4 * n] = reverse_byte_CUDA(x);
B[3 * n] = reverse_byte_CUDA(y >> 24); B[2 * n] = reverse_byte_CUDA(y >> 16); B[1 * n] = reverse_byte_CUDA(y >> 8); B[0 * n] = reverse_byte_CUDA(y);
//__device__ unsigned int __brev(unsigned int x)
//Reverse the bit order of a 32 bit unsigned integer.
@ -674,6 +680,7 @@ __device__ __host__ void transpose8rS32_reversed_diagonale(unsigned char* A, int
}
// transpose 8x8 bit
__global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
@ -687,28 +694,137 @@ __global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, con
//for (j = 0; j < m - 8; j += 8)
{
j = ((index * 8) / n) * 8;
if (j < m - 8) {
if (j < m) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]);
}
else if (j < m) {
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
else remove_bit(B, j*ldb + i);
}
transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8);
}
//else if (j < m) {
// for (; j < m; ++j) {
// if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
// else remove_bit(B, j*ldb + i);
// }
//}
}
}
}
__device__ __host__ uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ uint32_t reverse_32_bit(uint32_t a)
{
// __device__ unsigned int __brev(unsigned int x) // CUDA
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return __brev(a);
//return (reverse_8_bit(a >> 24) << 0) |
// (reverse_8_bit(a >> 16) << 8) |
// (reverse_8_bit(a >> 8) << 16) |
// (reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
__device__ void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
#define BLOCK_TRANSPOSE32 256
__device__ void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
//unsigned A_tmp[32];
//int i;
//#pragma unroll
//for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
//transpose32_optimized(A_tmp);
//#pragma unroll
//for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
__shared__ uint32_t A_shared[32 * BLOCK_TRANSPOSE32];
uint32_t *A_tmp = &A_shared[32 * threadIdx.x];
int i;
#pragma unroll 32
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll 32
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
// transpose 32x32 bit
__global__ void transpose_bin_gpu_kernel_32(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = (blockIdx.x*blockDim.x + threadIdx.x) * 32;
//for (i = 0; i < n; i += 8)
{
i = index % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = (index / n) * 32;
if (j < m) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
}
}
}
}
void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
size_t size = n*m/64 + 1;
size_t size = n*m/ (8*8) + 1;
size_t size32 = n*m / (32*32) + 1;
const int num_blocks = size / BLOCK + 1;
transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(A, B, n, m, lda, ldb, block_size);
const int num_blocks32 = size32 / BLOCK_TRANSPOSE32 + 1;
transpose_bin_gpu_kernel_32 << <num_blocks32, BLOCK_TRANSPOSE32, 0, get_cuda_stream() >> >((uint32_t *)A, (uint32_t *)B, n, m, lda, ldb, block_size);
//transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(A, B, n, m, lda, ldb, block_size);
}
// --------------------------------