Updated cfg-files for new weights-files

This commit is contained in:
AlexeyAB
2017-04-07 18:06:56 +03:00
parent 9c63bd33ad
commit e5ab66d7a7
7 changed files with 95 additions and 39 deletions

View File

@ -1,6 +1,6 @@
darknet.exe detector test data/voc.data yolo-voc.cfg yolo-voc.weights -i 0 -thresh 0.1 darknet.exe detector test data/voc.data yolo-voc.cfg yolo-voc.weights -i 0 -thresh 0.2
pause pause

View File

@ -12,7 +12,7 @@ exposure = 1.5
hue=.1 hue=.1
learning_rate=0.001 learning_rate=0.001
max_batches = 40100 max_batches = 40200
policy=steps policy=steps
steps=-1,100,20000,30000 steps=-1,100,20000,30000
scales=.1,10,.1,.1 scales=.1,10,.1,.1

View File

@ -1,6 +1,10 @@
[net] [net]
batch=64 # Testing
subdivisions=8 batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=8
height=416 height=416
width=416 width=416
channels=3 channels=3
@ -11,11 +15,12 @@ saturation = 1.5
exposure = 1.5 exposure = 1.5
hue=.1 hue=.1
learning_rate=0.0001 learning_rate=0.001
max_batches = 45000 burn_in=1000
max_batches = 80200
policy=steps policy=steps
steps=100,25000,35000 steps=40000,60000
scales=10,.1,.1 scales=.1,.1
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
@ -203,11 +208,19 @@ activation=leaky
[route] [route]
layers=-9 layers=-9
[convolutional]
batch_normalize=1
size=1
stride=1
pad=1
filters=64
activation=leaky
[reorg] [reorg]
stride=2 stride=2
[route] [route]
layers=-1,-3 layers=-1,-4
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
@ -224,14 +237,15 @@ pad=1
filters=125 filters=125
activation=linear activation=linear
[region] [region]
anchors = 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52 anchors = 1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071
bias_match=1 bias_match=1
classes=20 classes=20
coords=4 coords=4
num=5 num=5
softmax=1 softmax=1
jitter=.2 jitter=.3
rescore=1 rescore=1
object_scale=5 object_scale=5
@ -241,4 +255,4 @@ coord_scale=1
absolute=1 absolute=1
thresh = .6 thresh = .6
random=0 random=1

View File

@ -1,8 +1,12 @@
[net] [net]
# Testing
batch=1 batch=1
subdivisions=1 subdivisions=1
width=416 # Training
# batch=64
# subdivisions=8
height=416 height=416
width=416
channels=3 channels=3
momentum=0.9 momentum=0.9
decay=0.0005 decay=0.0005
@ -12,10 +16,11 @@ exposure = 1.5
hue=.1 hue=.1
learning_rate=0.001 learning_rate=0.001
max_batches = 120000 burn_in=1000
max_batches = 500200
policy=steps policy=steps
steps=-1,100,80000,100000 steps=400000,450000
scales=.1,10,.1,.1 scales=.1,.1
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
@ -203,11 +208,19 @@ activation=leaky
[route] [route]
layers=-9 layers=-9
[convolutional]
batch_normalize=1
size=1
stride=1
pad=1
filters=64
activation=leaky
[reorg] [reorg]
stride=2 stride=2
[route] [route]
layers=-1,-3 layers=-1,-4
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
@ -224,14 +237,15 @@ pad=1
filters=425 filters=425
activation=linear activation=linear
[region] [region]
anchors = 0.738768,0.874946, 2.42204,2.65704, 4.30971,7.04493, 10.246,4.59428, 12.6868,11.8741 anchors = 0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828
bias_match=1 bias_match=1
classes=80 classes=80
coords=4 coords=4
num=5 num=5
softmax=1 softmax=1
jitter=.2 jitter=.3
rescore=1 rescore=1
object_scale=5 object_scale=5
@ -241,4 +255,4 @@ coord_scale=1
absolute=1 absolute=1
thresh = .6 thresh = .6
random=0 random=1

View File

@ -12,7 +12,7 @@ exposure = 1.5
hue=.1 hue=.1
learning_rate=0.001 learning_rate=0.001
max_batches = 40100 max_batches = 40200
policy=steps policy=steps
steps=-1,100,20000,30000 steps=-1,100,20000,30000
scales=.1,10,.1,.1 scales=.1,10,.1,.1

View File

@ -1,6 +1,10 @@
[net] [net]
batch=64 # Testing
subdivisions=8 batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=8
height=416 height=416
width=416 width=416
channels=3 channels=3
@ -11,11 +15,12 @@ saturation = 1.5
exposure = 1.5 exposure = 1.5
hue=.1 hue=.1
learning_rate=0.0001 learning_rate=0.001
max_batches = 45000 burn_in=1000
max_batches = 80200
policy=steps policy=steps
steps=100,25000,35000 steps=40000,60000
scales=10,.1,.1 scales=.1,.1
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
@ -203,11 +208,19 @@ activation=leaky
[route] [route]
layers=-9 layers=-9
[convolutional]
batch_normalize=1
size=1
stride=1
pad=1
filters=64
activation=leaky
[reorg] [reorg]
stride=2 stride=2
[route] [route]
layers=-1,-3 layers=-1,-4
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
@ -224,14 +237,15 @@ pad=1
filters=125 filters=125
activation=linear activation=linear
[region] [region]
anchors = 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52 anchors = 1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071
bias_match=1 bias_match=1
classes=20 classes=20
coords=4 coords=4
num=5 num=5
softmax=1 softmax=1
jitter=.2 jitter=.3
rescore=1 rescore=1
object_scale=5 object_scale=5
@ -241,4 +255,4 @@ coord_scale=1
absolute=1 absolute=1
thresh = .6 thresh = .6
random=0 random=1

View File

@ -1,8 +1,12 @@
[net] [net]
# Testing
batch=1 batch=1
subdivisions=1 subdivisions=1
width=416 # Training
# batch=64
# subdivisions=8
height=416 height=416
width=416
channels=3 channels=3
momentum=0.9 momentum=0.9
decay=0.0005 decay=0.0005
@ -12,10 +16,11 @@ exposure = 1.5
hue=.1 hue=.1
learning_rate=0.001 learning_rate=0.001
max_batches = 120000 burn_in=1000
max_batches = 500200
policy=steps policy=steps
steps=-1,100,80000,100000 steps=400000,450000
scales=.1,10,.1,.1 scales=.1,.1
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
@ -203,11 +208,19 @@ activation=leaky
[route] [route]
layers=-9 layers=-9
[convolutional]
batch_normalize=1
size=1
stride=1
pad=1
filters=64
activation=leaky
[reorg] [reorg]
stride=2 stride=2
[route] [route]
layers=-1,-3 layers=-1,-4
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
@ -224,14 +237,15 @@ pad=1
filters=425 filters=425
activation=linear activation=linear
[region] [region]
anchors = 0.738768,0.874946, 2.42204,2.65704, 4.30971,7.04493, 10.246,4.59428, 12.6868,11.8741 anchors = 0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828
bias_match=1 bias_match=1
classes=80 classes=80
coords=4 coords=4
num=5 num=5
softmax=1 softmax=1
jitter=.2 jitter=.3
rescore=1 rescore=1
object_scale=5 object_scale=5
@ -241,4 +255,4 @@ coord_scale=1
absolute=1 absolute=1
thresh = .6 thresh = .6
random=0 random=1