mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
Stable, needs to be way faster
This commit is contained in:
parent
aa5996d58e
commit
08b757a0bf
@ -105,7 +105,7 @@ void train_detection_net(char *cfgfile)
|
|||||||
time=clock();
|
time=clock();
|
||||||
float loss = train_network(net, train);
|
float loss = train_network(net, train);
|
||||||
avg_loss = avg_loss*.9 + loss*.1;
|
avg_loss = avg_loss*.9 + loss*.1;
|
||||||
printf("%d: %f, %f avg, %lf seconds, %d images\n", i, loss, avg_loss, sec(clock()-time), i*imgs*net.batch);
|
printf("%d: %f, %f avg, %lf seconds, %d images\n", i, loss, avg_loss, sec(clock()-time), i*imgs);
|
||||||
if(i%100==0){
|
if(i%100==0){
|
||||||
char buff[256];
|
char buff[256];
|
||||||
sprintf(buff, "/home/pjreddie/imagenet_backup/detnet_%d.cfg", i);
|
sprintf(buff, "/home/pjreddie/imagenet_backup/detnet_%d.cfg", i);
|
||||||
@ -213,7 +213,7 @@ void train_imagenet(char *cfgfile)
|
|||||||
set_learning_network(&net, net.learning_rate, 0, net.decay);
|
set_learning_network(&net, net.learning_rate, 0, net.decay);
|
||||||
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
|
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
|
||||||
int imgs = 1024;
|
int imgs = 1024;
|
||||||
int i = 77700;
|
int i = 0;
|
||||||
char **labels = get_labels("/home/pjreddie/data/imagenet/cls.labels.list");
|
char **labels = get_labels("/home/pjreddie/data/imagenet/cls.labels.list");
|
||||||
list *plist = get_paths("/data/imagenet/cls.train.list");
|
list *plist = get_paths("/data/imagenet/cls.train.list");
|
||||||
char **paths = (char **)list_to_array(plist);
|
char **paths = (char **)list_to_array(plist);
|
||||||
@ -240,7 +240,7 @@ void train_imagenet(char *cfgfile)
|
|||||||
free_data(train);
|
free_data(train);
|
||||||
if(i%100==0){
|
if(i%100==0){
|
||||||
char buff[256];
|
char buff[256];
|
||||||
sprintf(buff, "/home/pjreddie/imagenet_backup/net_%d.cfg", i);
|
sprintf(buff, "/home/pjreddie/imagenet_backup/alexnet_%d.cfg", i);
|
||||||
save_network(net, buff);
|
save_network(net, buff);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,35 @@
|
|||||||
#include "softmax_layer.h"
|
#include "softmax_layer.h"
|
||||||
#include "dropout_layer.h"
|
#include "dropout_layer.h"
|
||||||
|
|
||||||
|
char *get_layer_string(LAYER_TYPE a)
|
||||||
|
{
|
||||||
|
switch(a){
|
||||||
|
case CONVOLUTIONAL:
|
||||||
|
return "convolutional";
|
||||||
|
case CONNECTED:
|
||||||
|
return "connected";
|
||||||
|
case MAXPOOL:
|
||||||
|
return "maxpool";
|
||||||
|
case SOFTMAX:
|
||||||
|
return "softmax";
|
||||||
|
case NORMALIZATION:
|
||||||
|
return "normalization";
|
||||||
|
case DROPOUT:
|
||||||
|
return "dropout";
|
||||||
|
case FREEWEIGHT:
|
||||||
|
return "freeweight";
|
||||||
|
case CROP:
|
||||||
|
return "crop";
|
||||||
|
case COST:
|
||||||
|
return "cost";
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return "none";
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
network make_network(int n, int batch)
|
network make_network(int n, int batch)
|
||||||
{
|
{
|
||||||
network net;
|
network net;
|
||||||
|
@ -41,6 +41,7 @@ float *network_predict_gpu(network net, float *input);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void compare_networks(network n1, network n2, data d);
|
void compare_networks(network n1, network n2, data d);
|
||||||
|
char *get_layer_string(LAYER_TYPE a);
|
||||||
|
|
||||||
network make_network(int n, int batch);
|
network make_network(int n, int batch);
|
||||||
void forward_network(network net, float *input, float *truth, int train);
|
void forward_network(network net, float *input, float *truth, int train);
|
||||||
|
@ -24,7 +24,7 @@ void forward_network_gpu(network net, cl_mem input, cl_mem truth, int train)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for(i = 0; i < net.n; ++i){
|
for(i = 0; i < net.n; ++i){
|
||||||
clock_t time = clock();
|
//clock_t time = clock();
|
||||||
if(net.types[i] == CONVOLUTIONAL){
|
if(net.types[i] == CONVOLUTIONAL){
|
||||||
convolutional_layer layer = *(convolutional_layer *)net.layers[i];
|
convolutional_layer layer = *(convolutional_layer *)net.layers[i];
|
||||||
forward_convolutional_layer_gpu(layer, input);
|
forward_convolutional_layer_gpu(layer, input);
|
||||||
@ -61,7 +61,7 @@ void forward_network_gpu(network net, cl_mem input, cl_mem truth, int train)
|
|||||||
input = layer.output_cl;
|
input = layer.output_cl;
|
||||||
}
|
}
|
||||||
check_error(cl);
|
check_error(cl);
|
||||||
//printf("Forw %d %f\n", i, sec(clock() - time));
|
//printf("Forward %d %s %f\n", i, get_layer_string(net.types[i]), sec(clock() - time));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,7 +71,7 @@ void backward_network_gpu(network net, cl_mem input)
|
|||||||
cl_mem prev_input;
|
cl_mem prev_input;
|
||||||
cl_mem prev_delta;
|
cl_mem prev_delta;
|
||||||
for(i = net.n-1; i >= 0; --i){
|
for(i = net.n-1; i >= 0; --i){
|
||||||
clock_t time = clock();
|
//clock_t time = clock();
|
||||||
if(i == 0){
|
if(i == 0){
|
||||||
prev_input = input;
|
prev_input = input;
|
||||||
prev_delta = 0;
|
prev_delta = 0;
|
||||||
@ -104,7 +104,7 @@ void backward_network_gpu(network net, cl_mem input)
|
|||||||
backward_softmax_layer_gpu(layer, prev_delta);
|
backward_softmax_layer_gpu(layer, prev_delta);
|
||||||
}
|
}
|
||||||
check_error(cl);
|
check_error(cl);
|
||||||
//printf("Back %d %f\n", i, sec(clock() - time));
|
//printf("Backward %d %s %f\n", i, get_layer_string(net.types[i]), sec(clock() - time));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user