Removing even more conflicts

This commit is contained in:
Anup Rajput
2017-01-23 10:39:20 +05:30
12 changed files with 28784 additions and 301 deletions

10
cfg/combine9k.data Normal file
View File

@@ -0,0 +1,10 @@
classes= 9418
#train = /home/pjreddie/data/coco/trainvalno5k.txt
train = data/combine9k.train.list
valid = /home/pjreddie/data/imagenet/det.val.files
labels = data/9k.labels
names = data/9k.names
backup = backup/
map = data/inet9k.map
eval = imagenet
results = results

211
cfg/yolo9000.cfg Normal file
View File

@@ -0,0 +1,211 @@
[net]
batch=1
subdivisions=1
height=416
width=416
channels=3
momentum=0.9
decay=0.0005
learning_rate=0.00001
max_batches = 242200
policy=steps
steps=500,200000,240000
scales=10,.1,.1
hue=.1
saturation=.75
exposure=.75
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
filters=28269
size=1
stride=1
pad=1
activation=linear
[region]
anchors = 0.77871, 1.14074, 3.00525, 4.31277, 9.22725, 9.61974
bias_match=1
classes=9418
coords=4
num=3
softmax=1
jitter=.2
rescore=1
object_scale=5
noobject_scale=1
class_scale=1
coord_scale=1
thresh = .6
absolute=1
random=1
tree=data/9k.tree
map = data/coco9k.map

9418
data/9k.labels Normal file

File diff suppressed because it is too large Load Diff

9418
data/9k.names Normal file

File diff suppressed because it is too large Load Diff

9418
data/9k.tree Normal file

File diff suppressed because it is too large Load Diff

80
data/coco9k.map Normal file
View File

@@ -0,0 +1,80 @@
5177
3768
3802
3800
4107
4072
4071
3797
4097
2645
5150
2644
3257
2523
6527
6866
6912
7342
7255
7271
7217
6858
7343
7233
3704
4374
3641
5001
3899
2999
2631
5141
2015
1133
1935
1930
5144
5143
2371
3916
3745
3640
4749
4736
4735
3678
58
42
771
81
152
141
786
700
218
791
2518
2521
3637
2458
2505
2519
3499
2837
3503
2597
3430
2080
5103
5111
5102
3013
5096
1102
3218
4010
2266
1127
5122
2360

200
data/inet9k.map Normal file
View File

@@ -0,0 +1,200 @@
2687
4107
8407
7254
42
6797
127
2268
2442
3704
260
1970
58
4443
2661
2043
2039
4858
4007
6858
8408
166
2523
3768
4347
6527
2446
5005
3274
3678
4918
709
4072
8428
7223
2251
3802
3848
7271
2677
8267
2849
2518
2738
3746
5105
3430
3503
2249
1841
2032
2358
122
3984
4865
3246
5095
6912
6878
8467
2741
1973
3057
7217
1872
44
2452
3637
2704
6917
2715
6734
2325
6864
6677
2035
1949
338
2664
5122
1844
784
2223
7188
2719
2670
4830
158
4818
7228
1965
7342
786
2095
8281
8258
7406
3915
8382
2437
2837
82
6871
1876
7447
8285
5007
2740
3463
5103
3755
4910
6809
3800
118
3396
3092
2709
81
7105
4036
2366
1846
5177
2684
64
2041
3919
700
3724
1742
39
807
7184
2256
235
2778
2996
2030
3714
7167
2369
6705
6861
5096
2597
2166
2036
3228
3747
2711
8300
2226
7153
7255
2631
7109
8242
7445
3776
3803
3690
2025
2521
2316
7190
8249
3352
2639
2887
100
4219
3344
5008
7224
3351
2434
2074
2034
8304
5004
6868
5102
2645
4071
2716
2717
7420
3499
3763
5084
2676
2046
5107
5097
3944
4097
7132
3956
7343

View File

@@ -274,6 +274,34 @@ void do_nms_obj(box *boxes, float **probs, int total, int classes, float thresh)
}
void do_nms_sort(box *boxes, float **probs, int total, int classes, float thresh)
{
int i, j, k;
sortable_bbox *s = calloc(total, sizeof(sortable_bbox));
for(i = 0; i < total; ++i){
s[i].index = i;
s[i].class = classes;
s[i].probs = probs;
}
qsort(s, total, sizeof(sortable_bbox), nms_comparator);
for(i = 0; i < total; ++i){
if(probs[s[i].index][classes] == 0) continue;
box a = boxes[s[i].index];
for(j = i+1; j < total; ++j){
box b = boxes[s[j].index];
if (box_iou(a, b) > thresh){
for(k = 0; k < classes+1; ++k){
probs[s[j].index][k] = 0;
}
}
}
}
free(s);
}
void do_nms_sort(box *boxes, float **probs, int total, int classes, float thresh)
{
int i, j, k;

View File

@@ -1,91 +0,0 @@
#include "mini_blas.h"
void cpu_gemm_nn(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void cpu_gemm_nt(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void cpu_gemm_tn(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void cpu_gemm_tt(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
for(k = 0; k < K; ++k){
C[i*ldc+j] += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
}
}
}
void cpu_gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
cpu_gemm_nn( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
else if(TA && !TB)
cpu_gemm_tn( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
else if(!TA && TB)
cpu_gemm_nt( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
else
cpu_gemm_tt( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}

View File

@@ -11,7 +11,7 @@
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
{
region_layer l = {0};
layer l = {0};
l.type = REGION;
l.n = n;

View File

@@ -1,205 +0,0 @@
#include <stdio.h> /* needed for sockaddr_in */
#include <string.h> /* needed for sockaddr_in */
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h> /* needed for sockaddr_in */
#include <netdb.h>
#include <pthread.h>
#include <time.h>
#include "mini_blas.h"
#include "utils.h"
#include "parser.h"
#include "server.h"
#include "connected_layer.h"
#include "convolutional_layer.h"
#define SERVER_PORT 9423
#define STR(x) #x
int socket_setup(int server)
{
int fd = 0; /* our socket */
struct sockaddr_in me; /* our address */
/* create a UDP socket */
if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
error("cannot create socket");
}
/* bind the socket to any valid IP address and a specific port */
if (server == 1){
bzero((char *) &me, sizeof(me));
me.sin_family = AF_INET;
me.sin_addr.s_addr = htonl(INADDR_ANY);
me.sin_port = htons(SERVER_PORT);
if (bind(fd, (struct sockaddr *)&me, sizeof(me)) < 0) {
error("bind failed");
}
}
return fd;
}
typedef struct{
int fd;
int counter;
network net;
} connection_info;
void read_and_add_into(int fd, float *a, int n)
{
float *buff = calloc(n, sizeof(float));
read_all(fd, (char*) buff, n*sizeof(float));
axpy_cpu(n, 1, buff, 1, a, 1);
free(buff);
}
void handle_connection(void *pointer)
{
connection_info info = *(connection_info *) pointer;
free(pointer);
//printf("New Connection\n");
if(info.counter%100==0){
char buff[256];
sprintf(buff, "unikitty/net_%d.part", info.counter);
save_network(info.net, buff);
}
int fd = info.fd;
network net = info.net;
int i;
for(i = 0; i < net.n; ++i){
if(net.types[i] == CONVOLUTIONAL){
convolutional_layer layer = *(convolutional_layer *) net.layers[i];
read_and_add_into(fd, layer.bias_updates, layer.n);
int num = layer.n*layer.c*layer.size*layer.size;
read_and_add_into(fd, layer.filter_updates, num);
}
if(net.types[i] == CONNECTED){
connected_layer layer = *(connected_layer *) net.layers[i];
read_and_add_into(fd, layer.bias_updates, layer.outputs);
read_and_add_into(fd, layer.weight_updates, layer.inputs*layer.outputs);
}
}
for(i = 0; i < net.n; ++i){
if(net.types[i] == CONVOLUTIONAL){
convolutional_layer layer = *(convolutional_layer *) net.layers[i];
update_convolutional_layer(layer);
write_all(fd, (char*) layer.biases, layer.n*sizeof(float));
int num = layer.n*layer.c*layer.size*layer.size;
write_all(fd, (char*) layer.filters, num*sizeof(float));
}
if(net.types[i] == CONNECTED){
connected_layer layer = *(connected_layer *) net.layers[i];
update_connected_layer(layer);
write_all(fd, (char *)layer.biases, layer.outputs*sizeof(float));
write_all(fd, (char *)layer.weights, layer.outputs*layer.inputs*sizeof(float));
}
}
//printf("Received updates\n");
close(fd);
}
void server_update(network net)
{
int fd = socket_setup(1);
int counter = 18000;
listen(fd, 64);
struct sockaddr_in client; /* remote address */
socklen_t client_size = sizeof(client); /* length of addresses */
time_t t=0;
while(1){
connection_info *info = calloc(1, sizeof(connection_info));
info->net = net;
info->counter = counter;
pthread_t worker;
int connection = accept(fd, (struct sockaddr *) &client, &client_size);
if(!t) t=time(0);
info->fd = connection;
pthread_create(&worker, NULL, (void *) &handle_connection, info);
++counter;
printf("%d\n", counter);
//if(counter == 1024) break;
}
close(fd);
}
void client_update(network net, char *address)
{
int fd = socket_setup(0);
struct hostent *hp; /* host information */
struct sockaddr_in server; /* server address */
/* fill in the server's address and data */
bzero((char*)&server, sizeof(server));
server.sin_family = AF_INET;
server.sin_port = htons(SERVER_PORT);
/* look up the address of the server given its name */
hp = gethostbyname(address);
if (!hp) {
perror("no such host");
fprintf(stderr, "could not obtain address of %s\n", "localhost");
}
/* put the host's address into the server address structure */
memcpy((void *)&server.sin_addr, hp->h_addr_list[0], hp->h_length);
if (connect(fd, (struct sockaddr *) &server, sizeof(server)) < 0) {
error("error connecting");
}
/* send a message to the server */
int i;
//printf("Sending\n");
for(i = 0; i < net.n; ++i){
if(net.types[i] == CONVOLUTIONAL){
convolutional_layer layer = *(convolutional_layer *) net.layers[i];
write_all(fd, (char*) layer.bias_updates, layer.n*sizeof(float));
int num = layer.n*layer.c*layer.size*layer.size;
write_all(fd, (char*) layer.filter_updates, num*sizeof(float));
memset(layer.bias_updates, 0, layer.n*sizeof(float));
memset(layer.filter_updates, 0, num*sizeof(float));
}
if(net.types[i] == CONNECTED){
connected_layer layer = *(connected_layer *) net.layers[i];
write_all(fd, (char *)layer.bias_updates, layer.outputs*sizeof(float));
write_all(fd, (char *)layer.weight_updates, layer.outputs*layer.inputs*sizeof(float));
memset(layer.bias_updates, 0, layer.outputs*sizeof(float));
memset(layer.weight_updates, 0, layer.inputs*layer.outputs*sizeof(float));
}
}
//printf("Sent\n");
for(i = 0; i < net.n; ++i){
if(net.types[i] == CONVOLUTIONAL){
convolutional_layer layer = *(convolutional_layer *) net.layers[i];
read_all(fd, (char*) layer.biases, layer.n*sizeof(float));
int num = layer.n*layer.c*layer.size*layer.size;
read_all(fd, (char*) layer.filters, num*sizeof(float));
#ifdef GPU
push_convolutional_layer(layer);
#endif
}
if(net.types[i] == CONNECTED){
connected_layer layer = *(connected_layer *) net.layers[i];
read_all(fd, (char *)layer.biases, layer.outputs*sizeof(float));
read_all(fd, (char *)layer.weights, layer.outputs*layer.inputs*sizeof(float));
#ifdef GPU
push_connected_layer(layer);
#endif
}
}
//printf("Updated\n");
close(fd);
}

View File

@@ -1,4 +0,0 @@
#include "network.h"
void client_update(network net, char *address);
void server_update(network net);