mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
Network resize is fixed
This commit is contained in:
@ -178,8 +178,13 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
|
|||||||
//int dim_h = (random_val + (init_h / 32 - 5)) * 32; // +-160
|
//int dim_h = (random_val + (init_h / 32 - 5)) * 32; // +-160
|
||||||
|
|
||||||
float random_val = rand_scale(1.4); // *x or /x
|
float random_val = rand_scale(1.4); // *x or /x
|
||||||
int dim_w = roundl(random_val*init_w / 32) * 32;
|
int dim_w = roundl(random_val*init_w / 32 + 1) * 32;
|
||||||
int dim_h = roundl(random_val*init_h / 32) * 32;
|
int dim_h = roundl(random_val*init_h / 32 + 1) * 32;
|
||||||
|
|
||||||
|
if (get_current_batch(net) == 0) {
|
||||||
|
dim_w = roundl(1.4*init_w / 32 + 1) * 32;
|
||||||
|
dim_h = roundl(1.4*init_h / 32 + 1) * 32;
|
||||||
|
}
|
||||||
|
|
||||||
if (dim_w < 32) dim_w = 32;
|
if (dim_w < 32) dim_w = 32;
|
||||||
if (dim_h < 32) dim_h = 32;
|
if (dim_h < 32) dim_h = 32;
|
||||||
|
@ -77,10 +77,26 @@ void resize_yolo_layer(layer *l, int w, int h)
|
|||||||
l->outputs = h*w*l->n*(l->classes + 4 + 1);
|
l->outputs = h*w*l->n*(l->classes + 4 + 1);
|
||||||
l->inputs = l->outputs;
|
l->inputs = l->outputs;
|
||||||
|
|
||||||
l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
|
if (!l->output_pinned) l->output = realloc(l->output, l->batch*l->outputs * sizeof(float));
|
||||||
l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
|
if (!l->delta_pinned) l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
|
||||||
|
|
||||||
#ifdef GPU
|
#ifdef GPU
|
||||||
|
if (l->output_pinned) {
|
||||||
|
cudaFreeHost(l->output);
|
||||||
|
if (cudaSuccess != cudaHostAlloc(&l->output, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {
|
||||||
|
l->output = realloc(l->output, l->batch*l->outputs * sizeof(float));
|
||||||
|
l->output_pinned = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (l->delta_pinned) {
|
||||||
|
cudaFreeHost(l->delta);
|
||||||
|
if (cudaSuccess != cudaHostAlloc(&l->delta, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {
|
||||||
|
l->delta = realloc(l->delta, l->batch*l->outputs * sizeof(float));
|
||||||
|
l->delta_pinned = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cuda_free(l->delta_gpu);
|
cuda_free(l->delta_gpu);
|
||||||
cuda_free(l->output_gpu);
|
cuda_free(l->output_gpu);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user