diff --git a/Makefile b/Makefile index de515d32..c9b6ecac 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ -GPU=1 -OPENCV=1 +GPU=0 +OPENCV=0 DEBUG=0 ARCH= --gpu-architecture=compute_20 --gpu-code=compute_20 diff --git a/cfg/rnn.cfg b/cfg/rnn.cfg index a67e1fa1..68c032d2 100644 --- a/cfg/rnn.cfg +++ b/cfg/rnn.cfg @@ -1,29 +1,32 @@ [net] subdivisions=1 inputs=256 -batch = 128 +batch = 1 momentum=0.9 decay=0.001 -max_batches = 50000 -time_steps=900 +max_batches = 2000 +time_steps=1 learning_rate=0.1 +policy=steps +steps=1000,1500 +scales=.1,.1 [rnn] batch_normalize=1 -output = 256 -hidden=512 +output = 1024 +hidden=1024 activation=leaky [rnn] batch_normalize=1 -output = 256 -hidden=512 +output = 1024 +hidden=1024 activation=leaky [rnn] batch_normalize=1 -output = 256 -hidden=512 +output = 1024 +hidden=1024 activation=leaky [connected] diff --git a/cfg/rnn.train.cfg b/cfg/rnn.train.cfg new file mode 100644 index 00000000..9139757f --- /dev/null +++ b/cfg/rnn.train.cfg @@ -0,0 +1,40 @@ +[net] +subdivisions=1 +inputs=256 +batch = 128 +momentum=0.9 +decay=0.001 +max_batches = 2000 +time_steps=576 +learning_rate=0.1 +policy=steps +steps=1000,1500 +scales=.1,.1 + +[rnn] +batch_normalize=1 +output = 1024 +hidden=1024 +activation=leaky + +[rnn] +batch_normalize=1 +output = 1024 +hidden=1024 +activation=leaky + +[rnn] +batch_normalize=1 +output = 1024 +hidden=1024 +activation=leaky + +[connected] +output=256 +activation=leaky + +[softmax] + +[cost] +type=sse + diff --git a/src/rnn.c b/src/rnn.c index aee53ffb..3984d93d 100644 --- a/src/rnn.c +++ b/src/rnn.c @@ -12,22 +12,31 @@ typedef struct { float *y; } float_pair; -float_pair get_rnn_data(char *text, int len, int batch, int steps) +float_pair get_rnn_data(unsigned char *text, int characters, int len, int batch, int steps) { - float *x = calloc(batch * steps * 256, sizeof(float)); - float *y = calloc(batch * steps * 256, sizeof(float)); + float *x = calloc(batch * steps * characters, sizeof(float)); + float *y = calloc(batch * steps * characters, sizeof(float)); int i,j; for(i = 0; i < batch; ++i){ int index = rand() %(len - steps - 1); + /* int done = 1; while(!done){ index = rand() %(len - steps - 1); while(index < len-steps-1 && text[index++] != '\n'); if (index < len-steps-1) done = 1; - } + } + */ for(j = 0; j < steps; ++j){ - x[(j*batch + i)*256 + text[index + j]] = 1; - y[(j*batch + i)*256 + text[index + j + 1]] = 1; + x[(j*batch + i)*characters + text[index + j]] = 1; + y[(j*batch + i)*characters + text[index + j + 1]] = 1; + + if(text[index+j] > 255 || text[index+j] <= 0 || text[index+j+1] > 255 || text[index+j+1] <= 0){ + text[index+j+2] = 0; + printf("%d %d %d %d %d\n", index, j, len, (int)text[index+j], (int)text[index+j+1]); + printf("%s", text+index); + error("Bad char"); + } } } float_pair p; @@ -38,7 +47,7 @@ float_pair get_rnn_data(char *text, int len, int batch, int steps) void train_char_rnn(char *cfgfile, char *weightfile, char *filename) { - FILE *fp = fopen(filename, "r"); + FILE *fp = fopen(filename, "rb"); //FILE *fp = fopen("data/ab.txt", "r"); //FILE *fp = fopen("data/grrm/asoiaf.txt", "r"); @@ -46,7 +55,7 @@ void train_char_rnn(char *cfgfile, char *weightfile, char *filename) size_t size = ftell(fp); fseek(fp, 0, SEEK_SET); - char *text = calloc(size, sizeof(char)); + unsigned char *text = calloc(size+1, sizeof(char)); fread(text, 1, size, fp); fclose(fp); @@ -60,6 +69,7 @@ void train_char_rnn(char *cfgfile, char *weightfile, char *filename) if(weightfile){ load_weights(&net, weightfile); } + int inputs = get_network_input_size(net); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay); int batch = net.batch; int steps = net.time_steps; @@ -69,7 +79,7 @@ void train_char_rnn(char *cfgfile, char *weightfile, char *filename) while(get_current_batch(net) < net.max_batches){ i += 1; time=clock(); - float_pair p = get_rnn_data(text, size, batch/steps, steps); + float_pair p = get_rnn_data(text, inputs, size, batch/steps, steps); float loss = train_network_datum(net, p.x, p.y) / (batch); free(p.x); @@ -104,12 +114,13 @@ void test_char_rnn(char *cfgfile, char *weightfile, int num, char *seed, float t if(weightfile){ load_weights(&net, weightfile); } - + int inputs = get_network_input_size(net); + int i, j; for(i = 0; i < net.n; ++i) net.layers[i].temperature = temp; - char c; + unsigned char c; int len = strlen(seed); - float *input = calloc(256, sizeof(float)); + float *input = calloc(inputs, sizeof(float)); for(i = 0; i < len-1; ++i){ c = seed[i]; input[(int)c] = 1; @@ -125,7 +136,7 @@ void test_char_rnn(char *cfgfile, char *weightfile, int num, char *seed, float t input[(int)c] = 1; float *out = network_predict(net, input); input[(int)c] = 0; - for(j = 0; j < 256; ++j){ + for(j = 0; j < inputs; ++j){ sum += out[j]; if(sum > r) break; } @@ -134,20 +145,8 @@ void test_char_rnn(char *cfgfile, char *weightfile, int num, char *seed, float t printf("\n"); } -void valid_char_rnn(char *cfgfile, char *weightfile, char *filename) +void valid_char_rnn(char *cfgfile, char *weightfile) { - FILE *fp = fopen(filename, "r"); - //FILE *fp = fopen("data/ab.txt", "r"); - //FILE *fp = fopen("data/grrm/asoiaf.txt", "r"); - - fseek(fp, 0, SEEK_END); - size_t size = ftell(fp); - fseek(fp, 0, SEEK_SET); - - char *text = calloc(size, sizeof(char)); - fread(text, 1, size, fp); - fclose(fp); - char *base = basecfg(cfgfile); fprintf(stderr, "%s\n", base); @@ -155,19 +154,25 @@ void valid_char_rnn(char *cfgfile, char *weightfile, char *filename) if(weightfile){ load_weights(&net, weightfile); } - - int i; - char c; - float *input = calloc(256, sizeof(float)); + int inputs = get_network_input_size(net); + + int count = 0; + int c; + float *input = calloc(inputs, sizeof(float)); float sum = 0; - for(i = 0; i < size-1; ++i){ - c = text[i]; - input[(int)c] = 1; + c = getc(stdin); + float log2 = log(2); + while(c != EOF){ + int next = getc(stdin); + if(next == EOF) break; + ++count; + input[c] = 1; float *out = network_predict(net, input); - input[(int)c] = 0; - sum += log(out[(int)text[i+1]]); + input[c] = 0; + sum += log(out[next])/log2; + c = next; } - printf("Log Probability: %f\n", sum); + printf("Perplexity: %f\n", pow(2, -sum/count)); } @@ -179,13 +184,13 @@ void run_char_rnn(int argc, char **argv) } char *filename = find_char_arg(argc, argv, "-file", "data/shakespeare.txt"); char *seed = find_char_arg(argc, argv, "-seed", "\n"); - int len = find_int_arg(argc, argv, "-len", 100); - float temp = find_float_arg(argc, argv, "-temp", 1); + int len = find_int_arg(argc, argv, "-len", 1000); + float temp = find_float_arg(argc, argv, "-temp", .7); int rseed = find_int_arg(argc, argv, "-srand", time(0)); char *cfg = argv[3]; char *weights = (argc > 4) ? argv[4] : 0; if(0==strcmp(argv[2], "train")) train_char_rnn(cfg, weights, filename); - else if(0==strcmp(argv[2], "valid")) valid_char_rnn(cfg, weights, filename); + else if(0==strcmp(argv[2], "valid")) valid_char_rnn(cfg, weights); else if(0==strcmp(argv[2], "test")) test_char_rnn(cfg, weights, len, seed, temp, rseed); } diff --git a/src/rnn_layer.c b/src/rnn_layer.c index e58e0a4b..a6b025da 100644 --- a/src/rnn_layer.c +++ b/src/rnn_layer.c @@ -10,6 +10,19 @@ #include #include +void increment_layer(layer *l, int steps) +{ + int num = l->outputs*l->batch*steps; + l->output += num; + l->delta += num; + l->x += num; + l->x_norm += num; + + l->output_gpu += num; + l->delta_gpu += num; + l->x_gpu += num; + l->x_norm_gpu += num; +} layer make_rnn_layer(int batch, int inputs, int hidden, int outputs, int steps, ACTIVATION activation, int batch_normalize, int log) { @@ -22,7 +35,7 @@ layer make_rnn_layer(int batch, int inputs, int hidden, int outputs, int steps, l.hidden = hidden; l.inputs = inputs; - l.state = calloc(batch*hidden, sizeof(float)); + l.state = calloc(batch*hidden*(steps+1), sizeof(float)); l.input_layer = malloc(sizeof(layer)); fprintf(stderr, "\t\t"); @@ -43,11 +56,11 @@ layer make_rnn_layer(int batch, int inputs, int hidden, int outputs, int steps, l.output = l.output_layer->output; l.delta = l.output_layer->delta; - #ifdef GPU - l.state_gpu = cuda_make_array(l.state, batch*hidden); +#ifdef GPU + l.state_gpu = cuda_make_array(l.state, batch*hidden*(steps+1)); l.output_gpu = l.output_layer->output_gpu; l.delta_gpu = l.output_layer->delta_gpu; - #endif +#endif return l; } @@ -80,16 +93,23 @@ void forward_rnn_layer(layer l, network_state state) s.input = l.state; forward_connected_layer(self_layer, s); - copy_cpu(l.hidden * l.batch, input_layer.output, 1, l.state, 1); + float *old_state = l.state; + if(state.train) l.state += l.hidden*l.batch; + if(l.shortcut){ + copy_cpu(l.hidden * l.batch, old_state, 1, l.state, 1); + }else{ + fill_cpu(l.hidden * l.batch, 0, l.state, 1); + } + axpy_cpu(l.hidden * l.batch, 1, input_layer.output, 1, l.state, 1); axpy_cpu(l.hidden * l.batch, 1, self_layer.output, 1, l.state, 1); s.input = l.state; forward_connected_layer(output_layer, s); state.input += l.inputs*l.batch; - input_layer.output += l.hidden*l.batch; - self_layer.output += l.hidden*l.batch; - output_layer.output += l.outputs*l.batch; + increment_layer(&input_layer, 1); + increment_layer(&self_layer, 1); + increment_layer(&output_layer, 1); } } @@ -101,14 +121,12 @@ void backward_rnn_layer(layer l, network_state state) layer input_layer = *(l.input_layer); layer self_layer = *(l.self_layer); layer output_layer = *(l.output_layer); - input_layer.output += l.hidden*l.batch*(l.steps-1); - input_layer.delta += l.hidden*l.batch*(l.steps-1); - self_layer.output += l.hidden*l.batch*(l.steps-1); - self_layer.delta += l.hidden*l.batch*(l.steps-1); + increment_layer(&input_layer, l.steps-1); + increment_layer(&self_layer, l.steps-1); + increment_layer(&output_layer, l.steps-1); - output_layer.output += l.outputs*l.batch*(l.steps-1); - output_layer.delta += l.outputs*l.batch*(l.steps-1); + l.state += l.hidden*l.batch*l.steps; for (i = l.steps-1; i >= 0; --i) { copy_cpu(l.hidden * l.batch, input_layer.output, 1, l.state, 1); axpy_cpu(l.hidden * l.batch, 1, self_layer.output, 1, l.state, 1); @@ -116,13 +134,16 @@ void backward_rnn_layer(layer l, network_state state) s.input = l.state; s.delta = self_layer.delta; backward_connected_layer(output_layer, s); - - if(i > 0){ - copy_cpu(l.hidden * l.batch, input_layer.output - l.hidden*l.batch, 1, l.state, 1); - axpy_cpu(l.hidden * l.batch, 1, self_layer.output - l.hidden*l.batch, 1, l.state, 1); - }else{ - fill_cpu(l.hidden * l.batch, 0, l.state, 1); - } + + l.state -= l.hidden*l.batch; + /* + if(i > 0){ + copy_cpu(l.hidden * l.batch, input_layer.output - l.hidden*l.batch, 1, l.state, 1); + axpy_cpu(l.hidden * l.batch, 1, self_layer.output - l.hidden*l.batch, 1, l.state, 1); + }else{ + fill_cpu(l.hidden * l.batch, 0, l.state, 1); + } + */ s.input = l.state; s.delta = self_layer.delta - l.hidden*l.batch; @@ -130,19 +151,15 @@ void backward_rnn_layer(layer l, network_state state) backward_connected_layer(self_layer, s); copy_cpu(l.hidden*l.batch, self_layer.delta, 1, input_layer.delta, 1); + if (i > 0 && l.shortcut) axpy_cpu(l.hidden*l.batch, 1, self_layer.delta, 1, self_layer.delta - l.hidden*l.batch, 1); s.input = state.input + i*l.inputs*l.batch; if(state.delta) s.delta = state.delta + i*l.inputs*l.batch; else s.delta = 0; backward_connected_layer(input_layer, s); - input_layer.output -= l.hidden*l.batch; - input_layer.delta -= l.hidden*l.batch; - - self_layer.output -= l.hidden*l.batch; - self_layer.delta -= l.hidden*l.batch; - - output_layer.output -= l.outputs*l.batch; - output_layer.delta -= l.outputs*l.batch; + increment_layer(&input_layer, -1); + increment_layer(&self_layer, -1); + increment_layer(&output_layer, -1); } } @@ -190,23 +207,23 @@ void forward_rnn_layer_gpu(layer l, network_state state) s.input = l.state_gpu; forward_connected_layer_gpu(self_layer, s); - copy_ongpu(l.hidden * l.batch, input_layer.output_gpu, 1, l.state_gpu, 1); + float *old_state = l.state_gpu; + if(state.train) l.state_gpu += l.hidden*l.batch; + if(l.shortcut){ + copy_ongpu(l.hidden * l.batch, old_state, 1, l.state_gpu, 1); + }else{ + fill_ongpu(l.hidden * l.batch, 0, l.state_gpu, 1); + } + axpy_ongpu(l.hidden * l.batch, 1, input_layer.output_gpu, 1, l.state_gpu, 1); axpy_ongpu(l.hidden * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1); + s.input = l.state_gpu; forward_connected_layer_gpu(output_layer, s); state.input += l.inputs*l.batch; - input_layer.output_gpu += l.hidden*l.batch; - input_layer.x_gpu += l.hidden*l.batch; - input_layer.x_norm_gpu += l.hidden*l.batch; - - self_layer.output_gpu += l.hidden*l.batch; - self_layer.x_gpu += l.hidden*l.batch; - self_layer.x_norm_gpu += l.hidden*l.batch; - - output_layer.output_gpu += l.outputs*l.batch; - output_layer.x_gpu += l.outputs*l.batch; - output_layer.x_norm_gpu += l.outputs*l.batch; + increment_layer(&input_layer, 1); + increment_layer(&self_layer, 1); + increment_layer(&output_layer, 1); } } @@ -218,20 +235,10 @@ void backward_rnn_layer_gpu(layer l, network_state state) layer input_layer = *(l.input_layer); layer self_layer = *(l.self_layer); layer output_layer = *(l.output_layer); - input_layer.output_gpu += l.hidden*l.batch*(l.steps-1); - input_layer.delta_gpu += l.hidden*l.batch*(l.steps-1); - input_layer.x_gpu += l.hidden*l.batch*(l.steps-1); - input_layer.x_norm_gpu += l.hidden*l.batch*(l.steps-1); - - self_layer.output_gpu += l.hidden*l.batch*(l.steps-1); - self_layer.delta_gpu += l.hidden*l.batch*(l.steps-1); - self_layer.x_gpu += l.hidden*l.batch*(l.steps-1); - self_layer.x_norm_gpu += l.hidden*l.batch*(l.steps-1); - - output_layer.output_gpu += l.outputs*l.batch*(l.steps-1); - output_layer.delta_gpu += l.outputs*l.batch*(l.steps-1); - output_layer.x_gpu += l.outputs*l.batch*(l.steps-1); - output_layer.x_norm_gpu += l.outputs*l.batch*(l.steps-1); + increment_layer(&input_layer, l.steps - 1); + increment_layer(&self_layer, l.steps - 1); + increment_layer(&output_layer, l.steps - 1); + l.state_gpu += l.hidden*l.batch*l.steps; for (i = l.steps-1; i >= 0; --i) { copy_ongpu(l.hidden * l.batch, input_layer.output_gpu, 1, l.state_gpu, 1); axpy_ongpu(l.hidden * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1); @@ -239,13 +246,8 @@ void backward_rnn_layer_gpu(layer l, network_state state) s.input = l.state_gpu; s.delta = self_layer.delta_gpu; backward_connected_layer_gpu(output_layer, s); - - if(i > 0){ - copy_ongpu(l.hidden * l.batch, input_layer.output_gpu - l.hidden*l.batch, 1, l.state_gpu, 1); - axpy_ongpu(l.hidden * l.batch, 1, self_layer.output_gpu - l.hidden*l.batch, 1, l.state_gpu, 1); - }else{ - fill_ongpu(l.hidden * l.batch, 0, l.state_gpu, 1); - } + + l.state_gpu -= l.hidden*l.batch; s.input = l.state_gpu; s.delta = self_layer.delta_gpu - l.hidden*l.batch; @@ -253,25 +255,15 @@ void backward_rnn_layer_gpu(layer l, network_state state) backward_connected_layer_gpu(self_layer, s); copy_ongpu(l.hidden*l.batch, self_layer.delta_gpu, 1, input_layer.delta_gpu, 1); + if (i > 0 && l.shortcut) axpy_ongpu(l.hidden*l.batch, 1, self_layer.delta_gpu, 1, self_layer.delta_gpu - l.hidden*l.batch, 1); s.input = state.input + i*l.inputs*l.batch; if(state.delta) s.delta = state.delta + i*l.inputs*l.batch; else s.delta = 0; backward_connected_layer_gpu(input_layer, s); - input_layer.output_gpu -= l.hidden*l.batch; - input_layer.delta_gpu -= l.hidden*l.batch; - input_layer.x_gpu -= l.hidden*l.batch; - input_layer.x_norm_gpu -= l.hidden*l.batch; - - self_layer.output_gpu -= l.hidden*l.batch; - self_layer.delta_gpu -= l.hidden*l.batch; - self_layer.x_gpu -= l.hidden*l.batch; - self_layer.x_norm_gpu -= l.hidden*l.batch; - - output_layer.output_gpu -= l.outputs*l.batch; - output_layer.delta_gpu -= l.outputs*l.batch; - output_layer.x_gpu -= l.outputs*l.batch; - output_layer.x_norm_gpu -= l.outputs*l.batch; + increment_layer(&input_layer, -1); + increment_layer(&self_layer, -1); + increment_layer(&output_layer, -1); } } #endif