mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
560 lines
5.7 KiB
INI
560 lines
5.7 KiB
INI
[net]
|
|
# Training - start training with darknet53.weights
|
|
# batch=128
|
|
# subdivisions=8
|
|
|
|
# Testing
|
|
batch=1
|
|
subdivisions=1
|
|
|
|
height=448
|
|
width=448
|
|
channels=3
|
|
min_crop=448
|
|
max_crop=512
|
|
|
|
learning_rate=0.001
|
|
policy=poly
|
|
power=4
|
|
max_batches=100000
|
|
momentum=0.9
|
|
decay=0.0005
|
|
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=32
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
# Downsample
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=64
|
|
size=3
|
|
stride=2
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=32
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=64
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
# Downsample
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=3
|
|
stride=2
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=64
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=64
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
# Downsample
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=3
|
|
stride=2
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=128
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
# Downsample
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=3
|
|
stride=2
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=256
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
# Downsample
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=1024
|
|
size=3
|
|
stride=2
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=1024
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=1024
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=1024
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=512
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[convolutional]
|
|
batch_normalize=1
|
|
filters=1024
|
|
size=3
|
|
stride=1
|
|
pad=1
|
|
activation=leaky
|
|
|
|
[shortcut]
|
|
from=-3
|
|
activation=linear
|
|
|
|
[avgpool]
|
|
|
|
[convolutional]
|
|
filters=1000
|
|
size=1
|
|
stride=1
|
|
pad=1
|
|
activation=linear
|
|
|
|
[softmax]
|
|
groups=1
|
|
|