working on TED demo

This commit is contained in:
Joseph Redmon
2017-04-12 14:22:53 -07:00
parent 77ee1118bc
commit addcc4ef96
24 changed files with 392 additions and 110 deletions

205
cfg/darknet9000.cfg Normal file
View File

@ -0,0 +1,205 @@
[net]
# Training
# batch=128
# subdivisions=4
# Testing
batch = 1
subdivisions = 1
height=448
width=448
max_crop=512
channels=3
momentum=0.9
decay=0.0005
learning_rate=0.001
policy=poly
power=4
max_batches=100000
angle=7
hue=.1
saturation=.75
exposure=.75
aspect=.75
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional]
filters=9418
size=1
stride=1
pad=1
activation=linear
[avgpool]
[softmax]
groups=1
tree=data/9k.tree
[cost]
type=masked

View File

@ -0,0 +1,9 @@
classes=9418
train = data/9k.train.list
valid = /data/imagenet/imagenet1k.valid.list
leaves = data/imagenet1k.labels
backup = /home/pjreddie/backup/
labels = data/9k.labels
names = data/9k.names
top=5

View File

@ -1,17 +1,24 @@
[net]
# Testing
# batch=1
# subdivisions=1
# Training
batch=64
subdivisions=8
batch=1
subdivisions=1
height=416
width=416
height=544
width=544
channels=3
momentum=0.9
decay=0.0005
learning_rate=0.00001
max_batches = 242200
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=500,200000,240000
scales=10,.1,.1
steps=400000,450000
scales=.1,.1
hue=.1
saturation=.75