mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
OK SHOULD I START WORKING ON CVPR OR WHAT?
This commit is contained in:
parent
c725270342
commit
3fb3eec650
@ -3,16 +3,64 @@
|
|||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
void train_attention(char *datacfg, char *cfgfile, char *weightfile, char *cfgfile2, char *weightfile2, int *gpus, int ngpus, int clear)
|
void extend_data_truth(data *d, int n, float val)
|
||||||
{
|
{
|
||||||
int i;
|
int i, j;
|
||||||
|
for(i = 0; i < d->y.rows; ++i){
|
||||||
|
d->y.vals[i] = realloc(d->y.vals[i], (d->y.cols+n)*sizeof(float));
|
||||||
|
for(j = 0; j < n; ++j){
|
||||||
|
d->y.vals[i][d->y.cols + j] = val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d->y.cols += n;
|
||||||
|
}
|
||||||
|
|
||||||
float avg_loss = -1;
|
matrix network_loss_data(network *net, data test)
|
||||||
|
{
|
||||||
|
int i,b;
|
||||||
|
int k = 1;
|
||||||
|
matrix pred = make_matrix(test.X.rows, k);
|
||||||
|
float *X = calloc(net->batch*test.X.cols, sizeof(float));
|
||||||
|
float *y = calloc(net->batch*test.y.cols, sizeof(float));
|
||||||
|
for(i = 0; i < test.X.rows; i += net->batch){
|
||||||
|
for(b = 0; b < net->batch; ++b){
|
||||||
|
if(i+b == test.X.rows) break;
|
||||||
|
memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float));
|
||||||
|
memcpy(y+b*test.y.cols, test.y.vals[i+b], test.y.cols*sizeof(float));
|
||||||
|
}
|
||||||
|
|
||||||
|
network orig = *net;
|
||||||
|
net->input = X;
|
||||||
|
net->truth = y;
|
||||||
|
net->train = 0;
|
||||||
|
net->delta = 0;
|
||||||
|
forward_network(net);
|
||||||
|
*net = orig;
|
||||||
|
|
||||||
|
float *delta = net->layers[net->n-1].output;
|
||||||
|
for(b = 0; b < net->batch; ++b){
|
||||||
|
if(i+b == test.X.rows) break;
|
||||||
|
int t = max_index(y + b*test.y.cols, 1000);
|
||||||
|
float err = sum_array(delta + b*net->outputs, net->outputs);
|
||||||
|
pred.vals[i+b][0] = -err;
|
||||||
|
//pred.vals[i+b][0] = 1-delta[b*net->outputs + t];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
free(X);
|
||||||
|
free(y);
|
||||||
|
return pred;
|
||||||
|
}
|
||||||
|
|
||||||
|
void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
float avg_cls_loss = -1;
|
||||||
|
float avg_att_loss = -1;
|
||||||
char *base = basecfg(cfgfile);
|
char *base = basecfg(cfgfile);
|
||||||
printf("%s\n", base);
|
printf("%s\n", base);
|
||||||
printf("%d\n", ngpus);
|
printf("%d\n", ngpus);
|
||||||
network **attnets = calloc(ngpus, sizeof(network*));
|
network **nets = calloc(ngpus, sizeof(network*));
|
||||||
network **clsnets = calloc(ngpus, sizeof(network*));
|
|
||||||
|
|
||||||
srand(time(0));
|
srand(time(0));
|
||||||
int seed = rand();
|
int seed = rand();
|
||||||
@ -21,14 +69,11 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, char *cfgfi
|
|||||||
#ifdef GPU
|
#ifdef GPU
|
||||||
cuda_set_device(gpus[i]);
|
cuda_set_device(gpus[i]);
|
||||||
#endif
|
#endif
|
||||||
attnets[i] = load_network(cfgfile, weightfile, clear);
|
nets[i] = load_network(cfgfile, weightfile, clear);
|
||||||
attnets[i]->learning_rate *= ngpus;
|
nets[i]->learning_rate *= ngpus;
|
||||||
clsnets[i] = load_network(cfgfile2, weightfile2, clear);
|
|
||||||
clsnets[i]->learning_rate *= ngpus;
|
|
||||||
}
|
}
|
||||||
srand(time(0));
|
srand(time(0));
|
||||||
network *net = attnets[0];
|
network *net = nets[0];
|
||||||
//network *clsnet = clsnets[0];
|
|
||||||
|
|
||||||
int imgs = net->batch * net->subdivisions * ngpus;
|
int imgs = net->batch * net->subdivisions * ngpus;
|
||||||
|
|
||||||
@ -47,15 +92,18 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, char *cfgfi
|
|||||||
int N = plist->size;
|
int N = plist->size;
|
||||||
double time;
|
double time;
|
||||||
|
|
||||||
|
int divs=3;
|
||||||
|
int size=2;
|
||||||
|
|
||||||
load_args args = {0};
|
load_args args = {0};
|
||||||
args.w = 4*net->w;
|
args.w = divs*net->w/size;
|
||||||
args.h = 4*net->h;
|
args.h = divs*net->h/size;
|
||||||
args.size = 4*net->w;
|
args.size = divs*net->w/size;
|
||||||
args.threads = 32;
|
args.threads = 32;
|
||||||
args.hierarchy = net->hierarchy;
|
args.hierarchy = net->hierarchy;
|
||||||
|
|
||||||
args.min = net->min_ratio*net->w;
|
args.min = net->min_ratio*args.w;
|
||||||
args.max = net->max_ratio*net->w;
|
args.max = net->max_ratio*args.w;
|
||||||
args.angle = net->angle;
|
args.angle = net->angle;
|
||||||
args.aspect = net->aspect;
|
args.aspect = net->aspect;
|
||||||
args.exposure = net->exposure;
|
args.exposure = net->exposure;
|
||||||
@ -83,25 +131,81 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, char *cfgfi
|
|||||||
train = buffer;
|
train = buffer;
|
||||||
load_thread = load_data(args);
|
load_thread = load_data(args);
|
||||||
data resized = resize_data(train, net->w, net->h);
|
data resized = resize_data(train, net->w, net->h);
|
||||||
|
extend_data_truth(&resized, divs*divs, 0);
|
||||||
|
data *tiles = tile_data(train, divs, size);
|
||||||
|
|
||||||
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
|
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
|
||||||
time = what_time_is_it_now();
|
time = what_time_is_it_now();
|
||||||
|
|
||||||
float loss = 0;
|
float aloss = 0;
|
||||||
|
float closs = 0;
|
||||||
|
int z;
|
||||||
|
for (i = 0; i < divs*divs/ngpus; ++i) {
|
||||||
|
#pragma omp parallel for
|
||||||
|
for(j = 0; j < ngpus; ++j){
|
||||||
|
int index = i*ngpus + j;
|
||||||
|
extend_data_truth(tiles+index, divs*divs, SECRET_NUM);
|
||||||
|
matrix deltas = network_loss_data(nets[j], tiles[index]);
|
||||||
|
for(z = 0; z < resized.y.rows; ++z){
|
||||||
|
resized.y.vals[z][train.y.cols + index] = deltas.vals[z][0];
|
||||||
|
}
|
||||||
|
free_matrix(deltas);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
int *inds = calloc(resized.y.rows, sizeof(int));
|
||||||
|
for(z = 0; z < resized.y.rows; ++z){
|
||||||
|
int index = max_index(resized.y.vals[z] + train.y.cols, divs*divs);
|
||||||
|
inds[z] = index;
|
||||||
|
for(i = 0; i < divs*divs; ++i){
|
||||||
|
resized.y.vals[z][train.y.cols + i] = (i == index)? 1 : 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data best = select_data(tiles, inds);
|
||||||
|
free(inds);
|
||||||
#ifdef GPU
|
#ifdef GPU
|
||||||
if (ngpus == 1) {
|
if (ngpus == 1) {
|
||||||
loss = train_network(net, train);
|
closs = train_network(net, best);
|
||||||
} else {
|
} else {
|
||||||
loss = train_networks(attnets, ngpus, train, 4);
|
closs = train_networks(nets, ngpus, best, 4);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
loss = train_network(net, train);
|
|
||||||
#endif
|
#endif
|
||||||
|
for (i = 0; i < divs*divs; ++i) {
|
||||||
|
printf("%.2f ", resized.y.vals[0][train.y.cols + i]);
|
||||||
|
if((i+1)%divs == 0) printf("\n");
|
||||||
|
free_data(tiles[i]);
|
||||||
|
}
|
||||||
|
free_data(best);
|
||||||
|
printf("\n");
|
||||||
|
image im = float_to_image(64,64,3,resized.X.vals[0]);
|
||||||
|
//show_image(im, "orig");
|
||||||
|
//cvWaitKey(100);
|
||||||
|
/*
|
||||||
|
image im1 = float_to_image(64,64,3,tiles[i].X.vals[0]);
|
||||||
|
image im2 = float_to_image(64,64,3,resized.X.vals[0]);
|
||||||
|
show_image(im1, "tile");
|
||||||
|
show_image(im2, "res");
|
||||||
|
*/
|
||||||
|
#ifdef GPU
|
||||||
|
if (ngpus == 1) {
|
||||||
|
aloss = train_network(net, resized);
|
||||||
|
} else {
|
||||||
|
aloss = train_networks(nets, ngpus, resized, 4);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
for(i = 0; i < divs*divs; ++i){
|
||||||
|
printf("%f ", nets[0]->output[1000 + i]);
|
||||||
|
if ((i+1) % divs == 0) printf("\n");
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
|
||||||
free_data(resized);
|
free_data(resized);
|
||||||
if(avg_loss == -1) avg_loss = loss;
|
|
||||||
avg_loss = avg_loss*.9 + loss*.1;
|
|
||||||
printf("%ld, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net->seen)/N, loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, *net->seen);
|
|
||||||
free_data(train);
|
free_data(train);
|
||||||
|
if(avg_cls_loss == -1) avg_cls_loss = closs;
|
||||||
|
if(avg_att_loss == -1) avg_att_loss = aloss;
|
||||||
|
avg_cls_loss = avg_cls_loss*.9 + closs*.1;
|
||||||
|
avg_att_loss = avg_att_loss*.9 + aloss*.1;
|
||||||
|
|
||||||
|
printf("%ld, %.3f: Att: %f, %f avg, Class: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net->seen)/N, aloss, avg_att_loss, closs, avg_cls_loss, get_current_rate(net), what_time_is_it_now()-time, *net->seen);
|
||||||
if(*net->seen/N > epoch){
|
if(*net->seen/N > epoch){
|
||||||
epoch = *net->seen/N;
|
epoch = *net->seen/N;
|
||||||
char buff[256];
|
char buff[256];
|
||||||
@ -152,6 +256,11 @@ void validate_attention_single(char *datacfg, char *filename, char *weightfile)
|
|||||||
float avg_acc = 0;
|
float avg_acc = 0;
|
||||||
float avg_topk = 0;
|
float avg_topk = 0;
|
||||||
int *indexes = calloc(topk, sizeof(int));
|
int *indexes = calloc(topk, sizeof(int));
|
||||||
|
int divs = 4;
|
||||||
|
int size = 2;
|
||||||
|
int extra = 0;
|
||||||
|
float *avgs = calloc(classes, sizeof(float));
|
||||||
|
int *inds = calloc(divs*divs, sizeof(int));
|
||||||
|
|
||||||
for(i = 0; i < m; ++i){
|
for(i = 0; i < m; ++i){
|
||||||
int class = -1;
|
int class = -1;
|
||||||
@ -163,14 +272,38 @@ void validate_attention_single(char *datacfg, char *filename, char *weightfile)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
image im = load_image_color(paths[i], 0, 0);
|
image im = load_image_color(paths[i], 0, 0);
|
||||||
image resized = resize_min(im, net->w);
|
image resized = resize_min(im, net->w*divs/size);
|
||||||
image crop = crop_image(resized, (resized.w - net->w)/2, (resized.h - net->h)/2, net->w, net->h);
|
image crop = crop_image(resized, (resized.w - net->w*divs/size)/2, (resized.h - net->h*divs/size)/2, net->w*divs/size, net->h*divs/size);
|
||||||
|
image rcrop = resize_image(crop, net->w, net->h);
|
||||||
//show_image(im, "orig");
|
//show_image(im, "orig");
|
||||||
//show_image(crop, "cropped");
|
//show_image(crop, "cropped");
|
||||||
//cvWaitKey(0);
|
//cvWaitKey(0);
|
||||||
float *pred = network_predict(net, crop.data);
|
float *pred = network_predict(net, rcrop.data);
|
||||||
|
//pred[classes + 56] = 0;
|
||||||
|
for(j = 0; j < divs*divs; ++j){
|
||||||
|
printf("%.2f ", pred[classes + j]);
|
||||||
|
if((j+1)%divs == 0) printf("\n");
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
copy_cpu(classes, pred, 1, avgs, 1);
|
||||||
|
top_k(pred + classes, divs*divs, divs*divs, inds);
|
||||||
|
show_image(crop, "crop");
|
||||||
|
for(j = 0; j < extra; ++j){
|
||||||
|
int index = inds[j];
|
||||||
|
int row = index / divs;
|
||||||
|
int col = index % divs;
|
||||||
|
int y = row * crop.h / divs - (net->h - crop.h/divs)/2;
|
||||||
|
int x = col * crop.w / divs - (net->w - crop.w/divs)/2;
|
||||||
|
printf("%d %d %d %d\n", row, col, y, x);
|
||||||
|
image tile = crop_image(crop, x, y, net->w, net->h);
|
||||||
|
float *pred = network_predict(net, tile.data);
|
||||||
|
axpy_cpu(classes, 1., pred, 1, avgs, 1);
|
||||||
|
show_image(tile, "tile");
|
||||||
|
cvWaitKey(10);
|
||||||
|
}
|
||||||
if(net->hierarchy) hierarchy_predictions(pred, net->outputs, net->hierarchy, 1, 1);
|
if(net->hierarchy) hierarchy_predictions(pred, net->outputs, net->hierarchy, 1, 1);
|
||||||
|
|
||||||
|
if(rcrop.data != resized.data) free_image(rcrop);
|
||||||
if(resized.data != im.data) free_image(resized);
|
if(resized.data != im.data) free_image(resized);
|
||||||
free_image(im);
|
free_image(im);
|
||||||
free_image(crop);
|
free_image(crop);
|
||||||
@ -318,7 +451,7 @@ void run_attention(int argc, char **argv)
|
|||||||
char *filename = (argc > 6) ? argv[6]: 0;
|
char *filename = (argc > 6) ? argv[6]: 0;
|
||||||
char *layer_s = (argc > 7) ? argv[7]: 0;
|
char *layer_s = (argc > 7) ? argv[7]: 0;
|
||||||
if(0==strcmp(argv[2], "predict")) predict_attention(data, cfg, weights, filename, top);
|
if(0==strcmp(argv[2], "predict")) predict_attention(data, cfg, weights, filename, top);
|
||||||
else if(0==strcmp(argv[2], "train")) train_attention(data, cfg, weights, filename, layer_s, gpus, ngpus, clear);
|
else if(0==strcmp(argv[2], "train")) train_attention(data, cfg, weights, gpus, ngpus, clear);
|
||||||
else if(0==strcmp(argv[2], "valid")) validate_attention_single(data, cfg, weights);
|
else if(0==strcmp(argv[2], "valid")) validate_attention_single(data, cfg, weights);
|
||||||
else if(0==strcmp(argv[2], "validmulti")) validate_attention_multi(data, cfg, weights);
|
else if(0==strcmp(argv[2], "validmulti")) validate_attention_multi(data, cfg, weights);
|
||||||
}
|
}
|
||||||
|
@ -447,7 +447,7 @@ void validate_classifier_multi(char *datacfg, char *cfg, char *weights)
|
|||||||
float *pred = calloc(classes, sizeof(float));
|
float *pred = calloc(classes, sizeof(float));
|
||||||
image im = load_image_color(paths[i], 0, 0);
|
image im = load_image_color(paths[i], 0, 0);
|
||||||
for(j = 0; j < nscales; ++j){
|
for(j = 0; j < nscales; ++j){
|
||||||
image r = resize_min(im, scales[j]);
|
image r = resize_max(im, scales[j]);
|
||||||
resize_network(net, r.w, r.h);
|
resize_network(net, r.w, r.h);
|
||||||
float *p = network_predict(net, r.data);
|
float *p = network_predict(net, r.data);
|
||||||
if(net->hierarchy) hierarchy_predictions(p, net->outputs, net->hierarchy, 1 , 1);
|
if(net->hierarchy) hierarchy_predictions(p, net->outputs, net->hierarchy, 1 , 1);
|
||||||
|
@ -12,6 +12,7 @@ extern void run_coco(int argc, char **argv);
|
|||||||
extern void run_captcha(int argc, char **argv);
|
extern void run_captcha(int argc, char **argv);
|
||||||
extern void run_nightmare(int argc, char **argv);
|
extern void run_nightmare(int argc, char **argv);
|
||||||
extern void run_classifier(int argc, char **argv);
|
extern void run_classifier(int argc, char **argv);
|
||||||
|
extern void run_attention(int argc, char **argv);
|
||||||
extern void run_regressor(int argc, char **argv);
|
extern void run_regressor(int argc, char **argv);
|
||||||
extern void run_segmenter(int argc, char **argv);
|
extern void run_segmenter(int argc, char **argv);
|
||||||
extern void run_char_rnn(int argc, char **argv);
|
extern void run_char_rnn(int argc, char **argv);
|
||||||
@ -431,6 +432,8 @@ int main(int argc, char **argv)
|
|||||||
predict_classifier("cfg/imagenet1k.data", argv[2], argv[3], argv[4], 5);
|
predict_classifier("cfg/imagenet1k.data", argv[2], argv[3], argv[4], 5);
|
||||||
} else if (0 == strcmp(argv[1], "classifier")){
|
} else if (0 == strcmp(argv[1], "classifier")){
|
||||||
run_classifier(argc, argv);
|
run_classifier(argc, argv);
|
||||||
|
} else if (0 == strcmp(argv[1], "attention")){
|
||||||
|
run_attention(argc, argv);
|
||||||
} else if (0 == strcmp(argv[1], "regressor")){
|
} else if (0 == strcmp(argv[1], "regressor")){
|
||||||
run_regressor(argc, argv);
|
run_regressor(argc, argv);
|
||||||
} else if (0 == strcmp(argv[1], "segmenter")){
|
} else if (0 == strcmp(argv[1], "segmenter")){
|
||||||
|
56
examples/detector-scipy-opencv.py
Normal file
56
examples/detector-scipy-opencv.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
# Stupid python path shit.
|
||||||
|
# Instead just add darknet.py to somewhere in your python path
|
||||||
|
# OK actually that might not be a great idea, idk, work in progress
|
||||||
|
# Use at your own risk. or don't, i don't care
|
||||||
|
|
||||||
|
from scipy.misc import imread
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
def array_to_image(arr):
|
||||||
|
arr = arr.transpose(2,0,1)
|
||||||
|
c = arr.shape[0]
|
||||||
|
h = arr.shape[1]
|
||||||
|
w = arr.shape[2]
|
||||||
|
arr = (arr/255.0).flatten()
|
||||||
|
data = dn.c_array(dn.c_float, arr)
|
||||||
|
im = dn.IMAGE(w,h,c,data)
|
||||||
|
return im
|
||||||
|
|
||||||
|
def detect2(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
|
||||||
|
boxes = dn.make_boxes(net)
|
||||||
|
probs = dn.make_probs(net)
|
||||||
|
num = dn.num_boxes(net)
|
||||||
|
dn.network_detect(net, image, thresh, hier_thresh, nms, boxes, probs)
|
||||||
|
res = []
|
||||||
|
for j in range(num):
|
||||||
|
for i in range(meta.classes):
|
||||||
|
if probs[j][i] > 0:
|
||||||
|
res.append((meta.names[i], probs[j][i], (boxes[j].x, boxes[j].y, boxes[j].w, boxes[j].h)))
|
||||||
|
res = sorted(res, key=lambda x: -x[1])
|
||||||
|
dn.free_ptrs(dn.cast(probs, dn.POINTER(dn.c_void_p)), num)
|
||||||
|
return res
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
sys.path.append(os.path.join(os.getcwd(),'python/'))
|
||||||
|
|
||||||
|
import darknet as dn
|
||||||
|
|
||||||
|
# Darknet
|
||||||
|
net = dn.load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0)
|
||||||
|
meta = dn.load_meta("cfg/coco.data")
|
||||||
|
r = dn.detect(net, meta, "data/dog.jpg")
|
||||||
|
print r
|
||||||
|
|
||||||
|
# scipy
|
||||||
|
arr= imread('data/dog.jpg')
|
||||||
|
im = array_to_image(arr)
|
||||||
|
r = detect2(net, meta, im)
|
||||||
|
print r
|
||||||
|
|
||||||
|
# OpenCV
|
||||||
|
arr = cv2.imread('data/dog.jpg')
|
||||||
|
im = array_to_image(arr)
|
||||||
|
dn.rgbgr_image(im)
|
||||||
|
r = detect2(net, meta, im)
|
||||||
|
print r
|
||||||
|
|
@ -609,8 +609,8 @@ void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filenam
|
|||||||
network_predict(net, X);
|
network_predict(net, X);
|
||||||
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
|
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
|
||||||
get_region_boxes(l, im.w, im.h, net->w, net->h, thresh, probs, boxes, masks, 0, 0, hier_thresh, 1);
|
get_region_boxes(l, im.w, im.h, net->w, net->h, thresh, probs, boxes, masks, 0, 0, hier_thresh, 1);
|
||||||
|
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
|
||||||
if (nms) do_nms_sort(boxes, probs, l.w*l.h*l.n, l.classes, nms);
|
if (nms) do_nms_sort(boxes, probs, l.w*l.h*l.n, l.classes, nms);
|
||||||
//else if (nms) do_nms_sort(boxes, probs, l.w*l.h*l.n, l.classes, nms);
|
|
||||||
draw_detections(im, l.w*l.h*l.n, thresh, boxes, probs, masks, names, alphabet, l.classes);
|
draw_detections(im, l.w*l.h*l.n, thresh, boxes, probs, masks, names, alphabet, l.classes);
|
||||||
if(outfile){
|
if(outfile){
|
||||||
save_image(im, outfile);
|
save_image(im, outfile);
|
||||||
|
703
examples/go.c
703
examples/go.c
File diff suppressed because it is too large
Load Diff
@ -56,6 +56,10 @@ typedef enum{
|
|||||||
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
|
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
|
||||||
} ACTIVATION;
|
} ACTIVATION;
|
||||||
|
|
||||||
|
typedef enum{
|
||||||
|
MULT, ADD, SUB, DIV
|
||||||
|
} BINARY_ACTIVATION;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
CONVOLUTIONAL,
|
CONVOLUTIONAL,
|
||||||
DECONVOLUTIONAL,
|
DECONVOLUTIONAL,
|
||||||
@ -578,6 +582,8 @@ list *read_data_cfg(char *filename);
|
|||||||
list *read_cfg(char *filename);
|
list *read_cfg(char *filename);
|
||||||
unsigned char *read_file(char *filename);
|
unsigned char *read_file(char *filename);
|
||||||
data resize_data(data orig, int w, int h);
|
data resize_data(data orig, int w, int h);
|
||||||
|
data *tile_data(data orig, int divs, int size);
|
||||||
|
data select_data(data *orig, int *inds);
|
||||||
|
|
||||||
void forward_network(network *net);
|
void forward_network(network *net);
|
||||||
void backward_network(network *net);
|
void backward_network(network *net);
|
||||||
@ -588,6 +594,7 @@ void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY);
|
|||||||
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY);
|
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY);
|
||||||
void scal_cpu(int N, float ALPHA, float *X, int INCX);
|
void scal_cpu(int N, float ALPHA, float *X, int INCX);
|
||||||
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial);
|
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial);
|
||||||
|
void softmax(float *input, int n, float temp, int stride, float *output);
|
||||||
|
|
||||||
int best_3d_shift_r(image a, image b, int min, int max);
|
int best_3d_shift_r(image a, image b, int min, int max);
|
||||||
#ifdef GPU
|
#ifdef GPU
|
||||||
@ -744,12 +751,15 @@ void top_k(float *a, int n, int k, int *index);
|
|||||||
int *read_map(char *filename);
|
int *read_map(char *filename);
|
||||||
void error(const char *s);
|
void error(const char *s);
|
||||||
int max_index(float *a, int n);
|
int max_index(float *a, int n);
|
||||||
|
int max_int_index(int *a, int n);
|
||||||
int sample_array(float *a, int n);
|
int sample_array(float *a, int n);
|
||||||
|
int *random_index_order(int min, int max);
|
||||||
void free_list(list *l);
|
void free_list(list *l);
|
||||||
float mse_array(float *a, int n);
|
float mse_array(float *a, int n);
|
||||||
float variance_array(float *a, int n);
|
float variance_array(float *a, int n);
|
||||||
float mag_array(float *a, int n);
|
float mag_array(float *a, int n);
|
||||||
float mean_array(float *a, int n);
|
float mean_array(float *a, int n);
|
||||||
|
float sum_array(float *a, int n);
|
||||||
void normalize_array(float *a, int n);
|
void normalize_array(float *a, int n);
|
||||||
int *read_intlist(char *s, int *n, int d);
|
int *read_intlist(char *s, int *n, int d);
|
||||||
size_t rand_size_t();
|
size_t rand_size_t();
|
||||||
|
@ -31,6 +31,8 @@ class METADATA(Structure):
|
|||||||
_fields_ = [("classes", c_int),
|
_fields_ = [("classes", c_int),
|
||||||
("names", POINTER(c_char_p))]
|
("names", POINTER(c_char_p))]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
|
#lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
|
||||||
lib = CDLL("libdarknet.so", RTLD_GLOBAL)
|
lib = CDLL("libdarknet.so", RTLD_GLOBAL)
|
||||||
lib.network_width.argtypes = [c_void_p]
|
lib.network_width.argtypes = [c_void_p]
|
||||||
@ -42,6 +44,10 @@ predict = lib.network_predict
|
|||||||
predict.argtypes = [c_void_p, POINTER(c_float)]
|
predict.argtypes = [c_void_p, POINTER(c_float)]
|
||||||
predict.restype = POINTER(c_float)
|
predict.restype = POINTER(c_float)
|
||||||
|
|
||||||
|
make_image = lib.make_image
|
||||||
|
make_image.argtypes = [c_int, c_int, c_int]
|
||||||
|
make_image.restype = IMAGE
|
||||||
|
|
||||||
make_boxes = lib.make_boxes
|
make_boxes = lib.make_boxes
|
||||||
make_boxes.argtypes = [c_void_p]
|
make_boxes.argtypes = [c_void_p]
|
||||||
make_boxes.restype = POINTER(BOX)
|
make_boxes.restype = POINTER(BOX)
|
||||||
@ -82,6 +88,9 @@ load_image = lib.load_image_color
|
|||||||
load_image.argtypes = [c_char_p, c_int, c_int]
|
load_image.argtypes = [c_char_p, c_int, c_int]
|
||||||
load_image.restype = IMAGE
|
load_image.restype = IMAGE
|
||||||
|
|
||||||
|
rgbgr_image = lib.rgbgr_image
|
||||||
|
rgbgr_image.argtypes = [IMAGE]
|
||||||
|
|
||||||
predict_image = lib.network_predict_image
|
predict_image = lib.network_predict_image
|
||||||
predict_image.argtypes = [c_void_p, IMAGE]
|
predict_image.argtypes = [c_void_p, IMAGE]
|
||||||
predict_image.restype = POINTER(c_float)
|
predict_image.restype = POINTER(c_float)
|
||||||
|
@ -140,6 +140,41 @@ __device__ float gradient_kernel(float x, ACTIVATION a)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx)
|
||||||
|
{
|
||||||
|
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
|
||||||
|
int i = id % s;
|
||||||
|
int b = id / s;
|
||||||
|
float x1 = x[b*s + i];
|
||||||
|
float x2 = x[b*s + s/2 + i];
|
||||||
|
if(id < n) {
|
||||||
|
float de = dy[id];
|
||||||
|
dx[b*s + i] = x2*de;
|
||||||
|
dx[b*s + s/2 + i] = x1*de;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y)
|
||||||
|
{
|
||||||
|
binary_gradient_array_kernel<<<cuda_gridsize(n/2), BLOCK>>>(x, dx, n/2, size, a, y);
|
||||||
|
check_error(cudaPeekAtLastError());
|
||||||
|
}
|
||||||
|
__global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y)
|
||||||
|
{
|
||||||
|
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
|
||||||
|
int i = id % s;
|
||||||
|
int b = id / s;
|
||||||
|
float x1 = x[b*s + i];
|
||||||
|
float x2 = x[b*s + s/2 + i];
|
||||||
|
if(id < n) y[id] = x1*x2;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y)
|
||||||
|
{
|
||||||
|
binary_activate_array_kernel<<<cuda_gridsize(n/2), BLOCK>>>(x, n/2, size, a, y);
|
||||||
|
check_error(cudaPeekAtLastError());
|
||||||
|
}
|
||||||
|
|
||||||
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
|
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
|
||||||
{
|
{
|
||||||
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
|
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
|
||||||
|
55
src/data.c
55
src/data.c
@ -1172,6 +1172,56 @@ data load_data_regression(char **paths, int n, int m, int min, int max, int size
|
|||||||
return d;
|
return d;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data select_data(data *orig, int *inds)
|
||||||
|
{
|
||||||
|
data d = {0};
|
||||||
|
d.shallow = 1;
|
||||||
|
d.w = orig[0].w;
|
||||||
|
d.h = orig[0].h;
|
||||||
|
|
||||||
|
d.X.rows = orig[0].X.rows;
|
||||||
|
d.y.rows = orig[0].X.rows;
|
||||||
|
|
||||||
|
d.X.cols = orig[0].X.cols;
|
||||||
|
d.y.cols = orig[0].y.cols;
|
||||||
|
|
||||||
|
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
|
||||||
|
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
|
||||||
|
int i;
|
||||||
|
for(i = 0; i < d.X.rows; ++i){
|
||||||
|
d.X.vals[i] = orig[inds[i]].X.vals[i];
|
||||||
|
d.y.vals[i] = orig[inds[i]].y.vals[i];
|
||||||
|
}
|
||||||
|
return d;
|
||||||
|
}
|
||||||
|
|
||||||
|
data *tile_data(data orig, int divs, int size)
|
||||||
|
{
|
||||||
|
data *ds = calloc(divs*divs, sizeof(data));
|
||||||
|
int i, j;
|
||||||
|
#pragma omp parallel for
|
||||||
|
for(i = 0; i < divs*divs; ++i){
|
||||||
|
data d;
|
||||||
|
d.shallow = 0;
|
||||||
|
d.w = orig.w/divs * size;
|
||||||
|
d.h = orig.h/divs * size;
|
||||||
|
d.X.rows = orig.X.rows;
|
||||||
|
d.X.cols = d.w*d.h*3;
|
||||||
|
d.X.vals = calloc(d.X.rows, sizeof(float*));
|
||||||
|
|
||||||
|
d.y = copy_matrix(orig.y);
|
||||||
|
#pragma omp parallel for
|
||||||
|
for(j = 0; j < orig.X.rows; ++j){
|
||||||
|
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
|
||||||
|
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
|
||||||
|
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
|
||||||
|
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
|
||||||
|
}
|
||||||
|
ds[i] = d;
|
||||||
|
}
|
||||||
|
return ds;
|
||||||
|
}
|
||||||
|
|
||||||
data resize_data(data orig, int w, int h)
|
data resize_data(data orig, int w, int h)
|
||||||
{
|
{
|
||||||
data d = {0};
|
data d = {0};
|
||||||
@ -1181,9 +1231,10 @@ data resize_data(data orig, int w, int h)
|
|||||||
int i;
|
int i;
|
||||||
d.X.rows = orig.X.rows;
|
d.X.rows = orig.X.rows;
|
||||||
d.X.cols = w*h*3;
|
d.X.cols = w*h*3;
|
||||||
d.X.vals = calloc(d.X.rows, sizeof(float));
|
d.X.vals = calloc(d.X.rows, sizeof(float*));
|
||||||
|
|
||||||
d.y = copy_matrix(orig.y);
|
d.y = copy_matrix(orig.y);
|
||||||
|
#pragma omp parallel for
|
||||||
for(i = 0; i < orig.X.rows; ++i){
|
for(i = 0; i < orig.X.rows; ++i){
|
||||||
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
|
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
|
||||||
d.X.vals[i] = resize_image(im, w, h).data;
|
d.X.vals[i] = resize_image(im, w, h).data;
|
||||||
@ -1239,6 +1290,8 @@ data concat_data(data d1, data d2)
|
|||||||
d.shallow = 1;
|
d.shallow = 1;
|
||||||
d.X = concat_matrix(d1.X, d2.X);
|
d.X = concat_matrix(d1.X, d2.X);
|
||||||
d.y = concat_matrix(d1.y, d2.y);
|
d.y = concat_matrix(d1.y, d2.y);
|
||||||
|
d.w = d1.w;
|
||||||
|
d.h = d1.h;
|
||||||
return d;
|
return d;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
30
src/utils.c
30
src/utils.c
@ -91,6 +91,22 @@ void shuffle(void *arr, size_t n, size_t size)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int *random_index_order(int min, int max)
|
||||||
|
{
|
||||||
|
int *inds = calloc(max-min, sizeof(int));
|
||||||
|
int i;
|
||||||
|
for(i = min; i < max; ++i){
|
||||||
|
inds[i] = i;
|
||||||
|
}
|
||||||
|
for(i = min; i < max-1; ++i){
|
||||||
|
int swap = inds[i];
|
||||||
|
int index = i + rand()%(max-i);
|
||||||
|
inds[i] = inds[index];
|
||||||
|
inds[index] = swap;
|
||||||
|
}
|
||||||
|
return inds;
|
||||||
|
}
|
||||||
|
|
||||||
void del_arg(int argc, char **argv, int index)
|
void del_arg(int argc, char **argv, int index)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -583,6 +599,20 @@ int sample_array(float *a, int n)
|
|||||||
return n-1;
|
return n-1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int max_int_index(int *a, int n)
|
||||||
|
{
|
||||||
|
if(n <= 0) return -1;
|
||||||
|
int i, max_i = 0;
|
||||||
|
int max = a[0];
|
||||||
|
for(i = 1; i < n; ++i){
|
||||||
|
if(a[i] > max){
|
||||||
|
max = a[i];
|
||||||
|
max_i = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return max_i;
|
||||||
|
}
|
||||||
|
|
||||||
int max_index(float *a, int n)
|
int max_index(float *a, int n)
|
||||||
{
|
{
|
||||||
if(n <= 0) return -1;
|
if(n <= 0) return -1;
|
||||||
|
@ -44,7 +44,6 @@ int constrain_int(int a, int min, int max);
|
|||||||
float rand_uniform(float min, float max);
|
float rand_uniform(float min, float max);
|
||||||
float rand_scale(float s);
|
float rand_scale(float s);
|
||||||
int rand_int(int min, int max);
|
int rand_int(int min, int max);
|
||||||
float sum_array(float *a, int n);
|
|
||||||
void mean_arrays(float **a, int n, int els, float *avg);
|
void mean_arrays(float **a, int n, int els, float *avg);
|
||||||
float dist_array(float *a, float *b, int n, int sub);
|
float dist_array(float *a, float *b, int n, int sub);
|
||||||
float **one_hot_encode(float *a, int n, int k);
|
float **one_hot_encode(float *a, int n, int k);
|
||||||
|
Loading…
Reference in New Issue
Block a user