:eyeofthetiger::noseofthetiger::eyeofthetiger2:

This commit is contained in:
Joseph Redmon
2017-03-26 23:42:30 -07:00
parent b61bcf544e
commit 60e952ba69
53 changed files with 3486 additions and 661 deletions

View File

@@ -1,7 +1,7 @@
classes= 80
train = /home/pjreddie/data/coco/trainvalno5k.txt
#valid = coco_testdev
valid = data/coco_val_5k.list
valid = coco_testdev
#valid = data/coco_val_5k.list
names = data/coco.names
backup = /home/pjreddie/backup/
eval=coco

135
cfg/go.cfg Normal file
View File

@@ -0,0 +1,135 @@
[net]
batch=512
subdivisions=1
height=19
width=19
channels=1
momentum=0.9
decay=0.0005
burn_in=1000
learning_rate=0.1
policy=poly
power=4
max_batches=10000000
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=1
size=1
stride=1
pad=1
activation=linear
[reorg]
extra=1
stride=1
[softmax]
[cost]
type=sse

View File

@@ -7,13 +7,13 @@ channels=1
momentum=0.9
decay=0.0005
learning_rate=0.1
learning_rate=0.01
policy=poly
power=4
max_batches=400000
max_batches=100000
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -21,7 +21,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -29,7 +29,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -37,7 +37,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -45,7 +45,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -53,7 +53,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -61,7 +61,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -69,7 +69,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -77,7 +77,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -85,7 +85,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -93,7 +93,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -101,7 +101,7 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
@@ -109,14 +109,13 @@ activation=relu
batch_normalize=1
[convolutional]
filters=192
filters=256
size=3
stride=1
pad=1
activation=relu
batch_normalize=1
[convolutional]
filters=1
size=1
@@ -124,6 +123,10 @@ stride=1
pad=1
activation=linear
[reorg]
extra=1
stride=1
[softmax]
[cost]

View File

@@ -12,7 +12,7 @@ exposure = 1.5
hue=.1
learning_rate=0.001
max_batches = 40100
max_batches = 40200
policy=steps
steps=-1,100,20000,30000
scales=.1,10,.1,.1

244
cfg/yolo-voc.2.0.cfg Normal file
View File

@@ -0,0 +1,244 @@
[net]
batch=64
subdivisions=8
height=416
width=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.0001
max_batches = 45000
policy=steps
steps=100,25000,35000
scales=10,.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
#######
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[route]
layers=-9
[reorg]
stride=2
[route]
layers=-1,-3
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=125
activation=linear
[region]
anchors = 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52
bias_match=1
classes=20
coords=4
num=5
softmax=1
jitter=.2
rescore=1
object_scale=5
noobject_scale=1
class_scale=1
coord_scale=1
absolute=1
thresh = .6
random=0

View File

@@ -11,11 +11,12 @@ saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.0001
max_batches = 45000
learning_rate=0.001
burn_in=1000
max_batches = 80200
policy=steps
steps=100,25000,35000
scales=10,.1,.1
steps=40000,60000
scales=.1,.1
[convolutional]
batch_normalize=1
@@ -203,11 +204,19 @@ activation=leaky
[route]
layers=-9
[convolutional]
batch_normalize=1
size=1
stride=1
pad=1
filters=64
activation=leaky
[reorg]
stride=2
[route]
layers=-1,-3
layers=-1,-4
[convolutional]
batch_normalize=1
@@ -224,14 +233,15 @@ pad=1
filters=125
activation=linear
[region]
anchors = 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52
anchors = 1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071
bias_match=1
classes=20
coords=4
num=5
softmax=1
jitter=.2
jitter=.3
rescore=1
object_scale=5
@@ -241,4 +251,4 @@ coord_scale=1
absolute=1
thresh = .6
random=0
random=1

244
cfg/yolo.2.0.cfg Normal file
View File

@@ -0,0 +1,244 @@
[net]
batch=1
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
max_batches = 120000
policy=steps
steps=-1,100,80000,100000
scales=.1,10,.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
#######
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[route]
layers=-9
[reorg]
stride=2
[route]
layers=-1,-3
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=425
activation=linear
[region]
anchors = 0.738768,0.874946, 2.42204,2.65704, 4.30971,7.04493, 10.246,4.59428, 12.6868,11.8741
bias_match=1
classes=80
coords=4
num=5
softmax=1
jitter=.2
rescore=1
object_scale=5
noobject_scale=1
class_scale=1
coord_scale=1
absolute=1
thresh = .6
random=0

View File

@@ -1,8 +1,8 @@
[net]
batch=1
subdivisions=1
width=416
height=416
batch=64
subdivisions=8
height=608
width=608
channels=3
momentum=0.9
decay=0.0005
@@ -12,10 +12,11 @@ exposure = 1.5
hue=.1
learning_rate=0.001
max_batches = 120000
burn_in=1000
max_batches = 500200
policy=steps
steps=-1,100,80000,100000
scales=.1,10,.1,.1
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
@@ -203,11 +204,19 @@ activation=leaky
[route]
layers=-9
[convolutional]
batch_normalize=1
size=1
stride=1
pad=1
filters=64
activation=leaky
[reorg]
stride=2
[route]
layers=-1,-3
layers=-1,-4
[convolutional]
batch_normalize=1
@@ -224,14 +233,15 @@ pad=1
filters=425
activation=linear
[region]
anchors = 0.738768,0.874946, 2.42204,2.65704, 4.30971,7.04493, 10.246,4.59428, 12.6868,11.8741
anchors = 0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828
bias_match=1
classes=80
coords=4
num=5
softmax=1
jitter=.2
jitter=.3
rescore=1
object_scale=5
@@ -241,4 +251,4 @@ coord_scale=1
absolute=1
thresh = .6
random=0
random=1