mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
MERRY CHRISTMAS I BROKE ALL YOUR DETECTION THINGS
This commit is contained in:
527
examples/lsd.c
527
examples/lsd.c
@ -408,6 +408,8 @@ void test_dcgan(char *cfgfile, char *weightfile)
|
||||
for(i = 0; i < im.w*im.h*im.c; ++i){
|
||||
im.data[i] = rand_normal();
|
||||
}
|
||||
float mag = mag_array(im.data, im.w*im.h*im.c);
|
||||
//scale_array(im.data, im.w*im.h*im.c, 1./mag);
|
||||
|
||||
float *X = im.data;
|
||||
time=clock();
|
||||
@ -426,21 +428,10 @@ void test_dcgan(char *cfgfile, char *weightfile)
|
||||
}
|
||||
}
|
||||
|
||||
void dcgan_batch(network gnet, network anet)
|
||||
{
|
||||
//float *input = calloc(x_size, sizeof(float));
|
||||
}
|
||||
|
||||
|
||||
void train_dcgan(char *cfg, char *weight, char *acfg, char *aweight, int clear, int display, char *train_images)
|
||||
{
|
||||
#ifdef GPU
|
||||
//char *train_images = "/home/pjreddie/data/coco/train1.txt";
|
||||
//char *train_images = "/home/pjreddie/data/coco/trainvalno5k.txt";
|
||||
//char *train_images = "/home/pjreddie/data/imagenet/imagenet1k.train.list";
|
||||
//char *train_images = "data/64.txt";
|
||||
//char *train_images = "data/alp.txt";
|
||||
//char *train_images = "data/cifar.txt";
|
||||
char *backup_directory = "/home/pjreddie/backup/";
|
||||
srand(time(0));
|
||||
char *base = basecfg(cfg);
|
||||
@ -498,7 +489,7 @@ void train_dcgan(char *cfg, char *weight, char *acfg, char *aweight, int clear,
|
||||
//data generated = copy_data(train);
|
||||
|
||||
while (get_current_batch(gnet) < gnet->max_batches) {
|
||||
start += 1;
|
||||
start += 1;
|
||||
i += 1;
|
||||
time=clock();
|
||||
pthread_join(load_thread, 0);
|
||||
@ -513,8 +504,8 @@ void train_dcgan(char *cfg, char *weight, char *acfg, char *aweight, int clear,
|
||||
|
||||
data gen = copy_data(train);
|
||||
for (j = 0; j < imgs; ++j) {
|
||||
train.y.vals[j][0] = .95;
|
||||
gen.y.vals[j][0] = .05;
|
||||
train.y.vals[j][0] = 1;
|
||||
gen.y.vals[j][0] = 0;
|
||||
}
|
||||
time=clock();
|
||||
|
||||
@ -524,31 +515,35 @@ void train_dcgan(char *cfg, char *weight, char *acfg, char *aweight, int clear,
|
||||
for(z = 0; z < x_size; ++z){
|
||||
gnet->input[z] = rand_normal();
|
||||
}
|
||||
for(z = 0; z < gnet->batch; ++z){
|
||||
float mag = mag_array(gnet->input + z*gnet->inputs, gnet->inputs);
|
||||
scale_array(gnet->input + z*gnet->inputs, gnet->inputs, 1./mag);
|
||||
}
|
||||
|
||||
cuda_push_array(gnet->input_gpu, gnet->input, x_size);
|
||||
cuda_push_array(gnet->truth_gpu, gnet->truth, y_size);
|
||||
//cuda_push_array(gnet->input_gpu, gnet->input, x_size);
|
||||
//cuda_push_array(gnet->truth_gpu, gnet->truth, y_size);
|
||||
*gnet->seen += gnet->batch;
|
||||
forward_network_gpu(gnet);
|
||||
forward_network(gnet);
|
||||
|
||||
fill_gpu(imlayer.outputs*imlayer.batch, 0, imerror, 1);
|
||||
fill_gpu(anet->truths*anet->batch, .95, anet->truth_gpu, 1);
|
||||
copy_gpu(anet->inputs*anet->batch, imlayer.output_gpu, 1, anet->input_gpu, 1);
|
||||
fill_cpu(anet->truths*anet->batch, 1, anet->truth, 1);
|
||||
copy_cpu(anet->inputs*anet->batch, imlayer.output, 1, anet->input, 1);
|
||||
anet->delta_gpu = imerror;
|
||||
forward_network_gpu(anet);
|
||||
backward_network_gpu(anet);
|
||||
forward_network(anet);
|
||||
backward_network(anet);
|
||||
|
||||
float genaloss = *anet->cost / anet->batch;
|
||||
printf("%f\n", genaloss);
|
||||
|
||||
scal_gpu(imlayer.outputs*imlayer.batch, 1, imerror, 1);
|
||||
scal_gpu(imlayer.outputs*imlayer.batch, .00, gnet->layers[gnet->n-1].delta_gpu, 1);
|
||||
scal_gpu(imlayer.outputs*imlayer.batch, 0, gnet->layers[gnet->n-1].delta_gpu, 1);
|
||||
|
||||
printf("realness %f\n", cuda_mag_array(imerror, imlayer.outputs*imlayer.batch));
|
||||
printf("features %f\n", cuda_mag_array(gnet->layers[gnet->n-1].delta_gpu, imlayer.outputs*imlayer.batch));
|
||||
|
||||
axpy_gpu(imlayer.outputs*imlayer.batch, 1, imerror, 1, gnet->layers[gnet->n-1].delta_gpu, 1);
|
||||
|
||||
backward_network_gpu(gnet);
|
||||
backward_network(gnet);
|
||||
|
||||
for(k = 0; k < gnet->batch; ++k){
|
||||
int index = j*gnet->batch + k;
|
||||
@ -565,23 +560,25 @@ void train_dcgan(char *cfg, char *weight, char *acfg, char *aweight, int clear,
|
||||
//scale_image(im, .5);
|
||||
//translate_image(im2, 1);
|
||||
//scale_image(im2, .5);
|
||||
#ifdef OPENCV
|
||||
#ifdef OPENCV
|
||||
if(display){
|
||||
image im = float_to_image(anet->w, anet->h, anet->c, gen.X.vals[0]);
|
||||
image im2 = float_to_image(anet->w, anet->h, anet->c, train.X.vals[0]);
|
||||
show_image(im, "gen");
|
||||
show_image(im2, "train");
|
||||
save_image(im, "gen");
|
||||
save_image(im2, "train");
|
||||
cvWaitKey(50);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
if(aloss < .1){
|
||||
anet->learning_rate = 0;
|
||||
} else if (aloss > .3){
|
||||
anet->learning_rate = orig_rate;
|
||||
}
|
||||
*/
|
||||
/*
|
||||
if(aloss < .1){
|
||||
anet->learning_rate = 0;
|
||||
} else if (aloss > .3){
|
||||
anet->learning_rate = orig_rate;
|
||||
}
|
||||
*/
|
||||
|
||||
update_network_gpu(gnet);
|
||||
|
||||
@ -747,7 +744,7 @@ void train_colorizer(char *cfg, char *weight, char *acfg, char *aweight, int cle
|
||||
|
||||
update_network_gpu(net);
|
||||
|
||||
#ifdef OPENCV
|
||||
#ifdef OPENCV
|
||||
if(display){
|
||||
image im = float_to_image(anet->w, anet->h, anet->c, gray.X.vals[0]);
|
||||
image im2 = float_to_image(anet->w, anet->h, anet->c, train.X.vals[0]);
|
||||
@ -755,7 +752,7 @@ void train_colorizer(char *cfg, char *weight, char *acfg, char *aweight, int cle
|
||||
show_image(im2, "train");
|
||||
cvWaitKey(50);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
free_data(merge);
|
||||
free_data(train);
|
||||
free_data(gray);
|
||||
@ -786,259 +783,259 @@ void train_colorizer(char *cfg, char *weight, char *acfg, char *aweight, int cle
|
||||
}
|
||||
|
||||
/*
|
||||
void train_lsd2(char *cfgfile, char *weightfile, char *acfgfile, char *aweightfile, int clear)
|
||||
{
|
||||
void train_lsd2(char *cfgfile, char *weightfile, char *acfgfile, char *aweightfile, int clear)
|
||||
{
|
||||
#ifdef GPU
|
||||
char *train_images = "/home/pjreddie/data/coco/trainvalno5k.txt";
|
||||
char *backup_directory = "/home/pjreddie/backup/";
|
||||
srand(time(0));
|
||||
char *base = basecfg(cfgfile);
|
||||
printf("%s\n", base);
|
||||
network net = parse_network_cfg(cfgfile);
|
||||
if(weightfile){
|
||||
load_weights(&net, weightfile);
|
||||
}
|
||||
if(clear) *net->seen = 0;
|
||||
char *train_images = "/home/pjreddie/data/coco/trainvalno5k.txt";
|
||||
char *backup_directory = "/home/pjreddie/backup/";
|
||||
srand(time(0));
|
||||
char *base = basecfg(cfgfile);
|
||||
printf("%s\n", base);
|
||||
network net = parse_network_cfg(cfgfile);
|
||||
if(weightfile){
|
||||
load_weights(&net, weightfile);
|
||||
}
|
||||
if(clear) *net->seen = 0;
|
||||
|
||||
char *abase = basecfg(acfgfile);
|
||||
network anet = parse_network_cfg(acfgfile);
|
||||
if(aweightfile){
|
||||
load_weights(&anet, aweightfile);
|
||||
}
|
||||
if(clear) *anet->seen = 0;
|
||||
char *abase = basecfg(acfgfile);
|
||||
network anet = parse_network_cfg(acfgfile);
|
||||
if(aweightfile){
|
||||
load_weights(&anet, aweightfile);
|
||||
}
|
||||
if(clear) *anet->seen = 0;
|
||||
|
||||
int i, j, k;
|
||||
layer imlayer = {0};
|
||||
for (i = 0; i < net->n; ++i) {
|
||||
if (net->layers[i].out_c == 3) {
|
||||
imlayer = net->layers[i];
|
||||
break;
|
||||
int i, j, k;
|
||||
layer imlayer = {0};
|
||||
for (i = 0; i < net->n; ++i) {
|
||||
if (net->layers[i].out_c == 3) {
|
||||
imlayer = net->layers[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
|
||||
int imgs = net->batch*net->subdivisions;
|
||||
i = *net->seen/imgs;
|
||||
data train, buffer;
|
||||
|
||||
|
||||
list *plist = get_paths(train_images);
|
||||
//int N = plist->size;
|
||||
char **paths = (char **)list_to_array(plist);
|
||||
|
||||
load_args args = {0};
|
||||
args.w = net->w;
|
||||
args.h = net->h;
|
||||
args.paths = paths;
|
||||
args.n = imgs;
|
||||
args.m = plist->size;
|
||||
args.d = &buffer;
|
||||
|
||||
args.min = net->min_crop;
|
||||
args.max = net->max_crop;
|
||||
args.angle = net->angle;
|
||||
args.aspect = net->aspect;
|
||||
args.exposure = net->exposure;
|
||||
args.saturation = net->saturation;
|
||||
args.hue = net->hue;
|
||||
args.size = net->w;
|
||||
args.type = CLASSIFICATION_DATA;
|
||||
args.classes = 1;
|
||||
char *ls[1] = {"coco"};
|
||||
args.labels = ls;
|
||||
|
||||
pthread_t load_thread = load_data_in_thread(args);
|
||||
clock_t time;
|
||||
|
||||
network_state gstate = {0};
|
||||
gstate.index = 0;
|
||||
gstate.net = net;
|
||||
int x_size = get_network_input_size(net)*net->batch;
|
||||
int y_size = 1*net->batch;
|
||||
gstate.input = cuda_make_array(0, x_size);
|
||||
gstate.truth = 0;
|
||||
gstate.delta = 0;
|
||||
gstate.train = 1;
|
||||
float *X = calloc(x_size, sizeof(float));
|
||||
float *y = calloc(y_size, sizeof(float));
|
||||
|
||||
network_state astate = {0};
|
||||
astate.index = 0;
|
||||
astate.net = anet;
|
||||
int ay_size = get_network_output_size(anet)*anet->batch;
|
||||
astate.input = 0;
|
||||
astate.truth = 0;
|
||||
astate.delta = 0;
|
||||
astate.train = 1;
|
||||
|
||||
float *imerror = cuda_make_array(0, imlayer.outputs);
|
||||
float *ones_gpu = cuda_make_array(0, ay_size);
|
||||
fill_gpu(ay_size, 1, ones_gpu, 1);
|
||||
|
||||
float aloss_avg = -1;
|
||||
float gloss_avg = -1;
|
||||
|
||||
//data generated = copy_data(train);
|
||||
|
||||
while (get_current_batch(net) < net->max_batches) {
|
||||
i += 1;
|
||||
time=clock();
|
||||
pthread_join(load_thread, 0);
|
||||
train = buffer;
|
||||
load_thread = load_data_in_thread(args);
|
||||
|
||||
printf("Loaded: %lf seconds\n", sec(clock()-time));
|
||||
|
||||
data generated = copy_data(train);
|
||||
time=clock();
|
||||
float gloss = 0;
|
||||
|
||||
for(j = 0; j < net->subdivisions; ++j){
|
||||
get_next_batch(train, net->batch, j*net->batch, X, y);
|
||||
cuda_push_array(gstate.input, X, x_size);
|
||||
*net->seen += net->batch;
|
||||
forward_network_gpu(net, gstate);
|
||||
|
||||
fill_gpu(imlayer.outputs, 0, imerror, 1);
|
||||
astate.input = imlayer.output_gpu;
|
||||
astate.delta = imerror;
|
||||
astate.truth = ones_gpu;
|
||||
forward_network_gpu(anet, astate);
|
||||
backward_network_gpu(anet, astate);
|
||||
|
||||
scal_gpu(imlayer.outputs, 1, imerror, 1);
|
||||
axpy_gpu(imlayer.outputs, 1, imerror, 1, imlayer.delta_gpu, 1);
|
||||
|
||||
backward_network_gpu(net, gstate);
|
||||
|
||||
printf("features %f\n", cuda_mag_array(imlayer.delta_gpu, imlayer.outputs));
|
||||
printf("realness %f\n", cuda_mag_array(imerror, imlayer.outputs));
|
||||
|
||||
gloss += get_network_cost(net) /(net->subdivisions*net->batch);
|
||||
|
||||
cuda_pull_array(imlayer.output_gpu, imlayer.output, imlayer.outputs*imlayer.batch);
|
||||
for(k = 0; k < net->batch; ++k){
|
||||
int index = j*net->batch + k;
|
||||
copy_cpu(imlayer.outputs, imlayer.output + k*imlayer.outputs, 1, generated.X.vals[index], 1);
|
||||
generated.y.vals[index][0] = 0;
|
||||
}
|
||||
}
|
||||
harmless_update_network_gpu(anet);
|
||||
|
||||
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
|
||||
int imgs = net->batch*net->subdivisions;
|
||||
i = *net->seen/imgs;
|
||||
data train, buffer;
|
||||
data merge = concat_data(train, generated);
|
||||
randomize_data(merge);
|
||||
float aloss = train_network(anet, merge);
|
||||
|
||||
update_network_gpu(net);
|
||||
update_network_gpu(anet);
|
||||
free_data(merge);
|
||||
free_data(train);
|
||||
free_data(generated);
|
||||
if (aloss_avg < 0) aloss_avg = aloss;
|
||||
aloss_avg = aloss_avg*.9 + aloss*.1;
|
||||
gloss_avg = gloss_avg*.9 + gloss*.1;
|
||||
|
||||
list *plist = get_paths(train_images);
|
||||
//int N = plist->size;
|
||||
char **paths = (char **)list_to_array(plist);
|
||||
|
||||
load_args args = {0};
|
||||
args.w = net->w;
|
||||
args.h = net->h;
|
||||
args.paths = paths;
|
||||
args.n = imgs;
|
||||
args.m = plist->size;
|
||||
args.d = &buffer;
|
||||
|
||||
args.min = net->min_crop;
|
||||
args.max = net->max_crop;
|
||||
args.angle = net->angle;
|
||||
args.aspect = net->aspect;
|
||||
args.exposure = net->exposure;
|
||||
args.saturation = net->saturation;
|
||||
args.hue = net->hue;
|
||||
args.size = net->w;
|
||||
args.type = CLASSIFICATION_DATA;
|
||||
args.classes = 1;
|
||||
char *ls[1] = {"coco"};
|
||||
args.labels = ls;
|
||||
|
||||
pthread_t load_thread = load_data_in_thread(args);
|
||||
clock_t time;
|
||||
|
||||
network_state gstate = {0};
|
||||
gstate.index = 0;
|
||||
gstate.net = net;
|
||||
int x_size = get_network_input_size(net)*net->batch;
|
||||
int y_size = 1*net->batch;
|
||||
gstate.input = cuda_make_array(0, x_size);
|
||||
gstate.truth = 0;
|
||||
gstate.delta = 0;
|
||||
gstate.train = 1;
|
||||
float *X = calloc(x_size, sizeof(float));
|
||||
float *y = calloc(y_size, sizeof(float));
|
||||
|
||||
network_state astate = {0};
|
||||
astate.index = 0;
|
||||
astate.net = anet;
|
||||
int ay_size = get_network_output_size(anet)*anet->batch;
|
||||
astate.input = 0;
|
||||
astate.truth = 0;
|
||||
astate.delta = 0;
|
||||
astate.train = 1;
|
||||
|
||||
float *imerror = cuda_make_array(0, imlayer.outputs);
|
||||
float *ones_gpu = cuda_make_array(0, ay_size);
|
||||
fill_gpu(ay_size, 1, ones_gpu, 1);
|
||||
|
||||
float aloss_avg = -1;
|
||||
float gloss_avg = -1;
|
||||
|
||||
//data generated = copy_data(train);
|
||||
|
||||
while (get_current_batch(net) < net->max_batches) {
|
||||
i += 1;
|
||||
time=clock();
|
||||
pthread_join(load_thread, 0);
|
||||
train = buffer;
|
||||
load_thread = load_data_in_thread(args);
|
||||
|
||||
printf("Loaded: %lf seconds\n", sec(clock()-time));
|
||||
|
||||
data generated = copy_data(train);
|
||||
time=clock();
|
||||
float gloss = 0;
|
||||
|
||||
for(j = 0; j < net->subdivisions; ++j){
|
||||
get_next_batch(train, net->batch, j*net->batch, X, y);
|
||||
cuda_push_array(gstate.input, X, x_size);
|
||||
*net->seen += net->batch;
|
||||
forward_network_gpu(net, gstate);
|
||||
|
||||
fill_gpu(imlayer.outputs, 0, imerror, 1);
|
||||
astate.input = imlayer.output_gpu;
|
||||
astate.delta = imerror;
|
||||
astate.truth = ones_gpu;
|
||||
forward_network_gpu(anet, astate);
|
||||
backward_network_gpu(anet, astate);
|
||||
|
||||
scal_gpu(imlayer.outputs, 1, imerror, 1);
|
||||
axpy_gpu(imlayer.outputs, 1, imerror, 1, imlayer.delta_gpu, 1);
|
||||
|
||||
backward_network_gpu(net, gstate);
|
||||
|
||||
printf("features %f\n", cuda_mag_array(imlayer.delta_gpu, imlayer.outputs));
|
||||
printf("realness %f\n", cuda_mag_array(imerror, imlayer.outputs));
|
||||
|
||||
gloss += get_network_cost(net) /(net->subdivisions*net->batch);
|
||||
|
||||
cuda_pull_array(imlayer.output_gpu, imlayer.output, imlayer.outputs*imlayer.batch);
|
||||
for(k = 0; k < net->batch; ++k){
|
||||
int index = j*net->batch + k;
|
||||
copy_cpu(imlayer.outputs, imlayer.output + k*imlayer.outputs, 1, generated.X.vals[index], 1);
|
||||
generated.y.vals[index][0] = 0;
|
||||
}
|
||||
}
|
||||
harmless_update_network_gpu(anet);
|
||||
|
||||
data merge = concat_data(train, generated);
|
||||
randomize_data(merge);
|
||||
float aloss = train_network(anet, merge);
|
||||
|
||||
update_network_gpu(net);
|
||||
update_network_gpu(anet);
|
||||
free_data(merge);
|
||||
free_data(train);
|
||||
free_data(generated);
|
||||
if (aloss_avg < 0) aloss_avg = aloss;
|
||||
aloss_avg = aloss_avg*.9 + aloss*.1;
|
||||
gloss_avg = gloss_avg*.9 + gloss*.1;
|
||||
|
||||
printf("%d: gen: %f, adv: %f | gen_avg: %f, adv_avg: %f, %f rate, %lf seconds, %d images\n", i, gloss, aloss, gloss_avg, aloss_avg, get_current_rate(net), sec(clock()-time), i*imgs);
|
||||
if(i%1000==0){
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
|
||||
save_weights(net, buff);
|
||||
sprintf(buff, "%s/%s_%d.weights", backup_directory, abase, i);
|
||||
save_weights(anet, buff);
|
||||
}
|
||||
if(i%100==0){
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s.backup", backup_directory, base);
|
||||
save_weights(net, buff);
|
||||
sprintf(buff, "%s/%s.backup", backup_directory, abase);
|
||||
save_weights(anet, buff);
|
||||
}
|
||||
printf("%d: gen: %f, adv: %f | gen_avg: %f, adv_avg: %f, %f rate, %lf seconds, %d images\n", i, gloss, aloss, gloss_avg, aloss_avg, get_current_rate(net), sec(clock()-time), i*imgs);
|
||||
if(i%1000==0){
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
|
||||
save_weights(net, buff);
|
||||
sprintf(buff, "%s/%s_%d.weights", backup_directory, abase, i);
|
||||
save_weights(anet, buff);
|
||||
}
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
|
||||
save_weights(net, buff);
|
||||
if(i%100==0){
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s.backup", backup_directory, base);
|
||||
save_weights(net, buff);
|
||||
sprintf(buff, "%s/%s.backup", backup_directory, abase);
|
||||
save_weights(anet, buff);
|
||||
}
|
||||
}
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
|
||||
save_weights(net, buff);
|
||||
#endif
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
void train_lsd(char *cfgfile, char *weightfile, int clear)
|
||||
{
|
||||
char *train_images = "/home/pjreddie/data/coco/trainvalno5k.txt";
|
||||
char *backup_directory = "/home/pjreddie/backup/";
|
||||
srand(time(0));
|
||||
char *base = basecfg(cfgfile);
|
||||
printf("%s\n", base);
|
||||
float avg_loss = -1;
|
||||
network net = parse_network_cfg(cfgfile);
|
||||
if(weightfile){
|
||||
load_weights(&net, weightfile);
|
||||
}
|
||||
if(clear) *net->seen = 0;
|
||||
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
|
||||
int imgs = net->batch*net->subdivisions;
|
||||
int i = *net->seen/imgs;
|
||||
data train, buffer;
|
||||
void train_lsd(char *cfgfile, char *weightfile, int clear)
|
||||
{
|
||||
char *train_images = "/home/pjreddie/data/coco/trainvalno5k.txt";
|
||||
char *backup_directory = "/home/pjreddie/backup/";
|
||||
srand(time(0));
|
||||
char *base = basecfg(cfgfile);
|
||||
printf("%s\n", base);
|
||||
float avg_loss = -1;
|
||||
network net = parse_network_cfg(cfgfile);
|
||||
if(weightfile){
|
||||
load_weights(&net, weightfile);
|
||||
}
|
||||
if(clear) *net->seen = 0;
|
||||
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
|
||||
int imgs = net->batch*net->subdivisions;
|
||||
int i = *net->seen/imgs;
|
||||
data train, buffer;
|
||||
|
||||
|
||||
list *plist = get_paths(train_images);
|
||||
//int N = plist->size;
|
||||
char **paths = (char **)list_to_array(plist);
|
||||
list *plist = get_paths(train_images);
|
||||
//int N = plist->size;
|
||||
char **paths = (char **)list_to_array(plist);
|
||||
|
||||
load_args args = {0};
|
||||
args.w = net->w;
|
||||
args.h = net->h;
|
||||
args.paths = paths;
|
||||
args.n = imgs;
|
||||
args.m = plist->size;
|
||||
args.d = &buffer;
|
||||
load_args args = {0};
|
||||
args.w = net->w;
|
||||
args.h = net->h;
|
||||
args.paths = paths;
|
||||
args.n = imgs;
|
||||
args.m = plist->size;
|
||||
args.d = &buffer;
|
||||
|
||||
args.min = net->min_crop;
|
||||
args.max = net->max_crop;
|
||||
args.angle = net->angle;
|
||||
args.aspect = net->aspect;
|
||||
args.exposure = net->exposure;
|
||||
args.saturation = net->saturation;
|
||||
args.hue = net->hue;
|
||||
args.size = net->w;
|
||||
args.type = CLASSIFICATION_DATA;
|
||||
args.classes = 1;
|
||||
char *ls[1] = {"coco"};
|
||||
args.labels = ls;
|
||||
args.min = net->min_crop;
|
||||
args.max = net->max_crop;
|
||||
args.angle = net->angle;
|
||||
args.aspect = net->aspect;
|
||||
args.exposure = net->exposure;
|
||||
args.saturation = net->saturation;
|
||||
args.hue = net->hue;
|
||||
args.size = net->w;
|
||||
args.type = CLASSIFICATION_DATA;
|
||||
args.classes = 1;
|
||||
char *ls[1] = {"coco"};
|
||||
args.labels = ls;
|
||||
|
||||
pthread_t load_thread = load_data_in_thread(args);
|
||||
clock_t time;
|
||||
//while(i*imgs < N*120){
|
||||
while(get_current_batch(net) < net->max_batches){
|
||||
i += 1;
|
||||
time=clock();
|
||||
pthread_join(load_thread, 0);
|
||||
train = buffer;
|
||||
load_thread = load_data_in_thread(args);
|
||||
pthread_t load_thread = load_data_in_thread(args);
|
||||
clock_t time;
|
||||
//while(i*imgs < N*120){
|
||||
while(get_current_batch(net) < net->max_batches){
|
||||
i += 1;
|
||||
time=clock();
|
||||
pthread_join(load_thread, 0);
|
||||
train = buffer;
|
||||
load_thread = load_data_in_thread(args);
|
||||
|
||||
printf("Loaded: %lf seconds\n", sec(clock()-time));
|
||||
printf("Loaded: %lf seconds\n", sec(clock()-time));
|
||||
|
||||
time=clock();
|
||||
float loss = train_network(net, train);
|
||||
if (avg_loss < 0) avg_loss = loss;
|
||||
avg_loss = avg_loss*.9 + loss*.1;
|
||||
time=clock();
|
||||
float loss = train_network(net, train);
|
||||
if (avg_loss < 0) avg_loss = loss;
|
||||
avg_loss = avg_loss*.9 + loss*.1;
|
||||
|
||||
printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", i, loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs);
|
||||
if(i%1000==0){
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
|
||||
save_weights(net, buff);
|
||||
}
|
||||
if(i%100==0){
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s.backup", backup_directory, base);
|
||||
save_weights(net, buff);
|
||||
}
|
||||
free_data(train);
|
||||
}
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
|
||||
save_weights(net, buff);
|
||||
printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", i, loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs);
|
||||
if(i%1000==0){
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
|
||||
save_weights(net, buff);
|
||||
}
|
||||
if(i%100==0){
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s.backup", backup_directory, base);
|
||||
save_weights(net, buff);
|
||||
}
|
||||
free_data(train);
|
||||
}
|
||||
char buff[256];
|
||||
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
|
||||
save_weights(net, buff);
|
||||
}
|
||||
*/
|
||||
|
||||
|
Reference in New Issue
Block a user