new models 🐍 🐍 🐍

This commit is contained in:
Joseph Redmon 2018-08-15 10:59:59 -07:00
parent 9a4b19c415
commit f86901f617
19 changed files with 4814 additions and 44 deletions

View File

@ -1,7 +1,7 @@
GPU=0 GPU=1
CUDNN=0 CUDNN=1
OPENCV=0 OPENCV=1
OPENMP=0 OPENMP=1
DEBUG=0 DEBUG=0
ARCH= -gencode arch=compute_30,code=sm_30 \ ARCH= -gencode arch=compute_30,code=sm_30 \

View File

@ -1,5 +1,9 @@
[net] [net]
batch=128 # Training
# batch=128
# subdivisions=1
# Testing
batch=1
subdivisions=1 subdivisions=1
height=227 height=227
width=227 width=227

View File

@ -1,7 +1,7 @@
classes= 80 classes= 80
train = /home/pjreddie/data/coco/trainvalno5k.txt train = /home/pjreddie/data/coco/trainvalno5k.txt
#valid = coco_testdev valid = coco_testdev
valid = data/coco_val_5k.list #valid = data/coco_val_5k.list
names = data/coco.names names = data/coco.names
backup = /home/pjreddie/backup/ backup = /home/pjreddie/backup/
eval=coco eval=coco

View File

@ -1,21 +1,30 @@
[net] [net]
# Train # Training
batch=128 # batch=128
subdivisions=1 # subdivisions=1
# Test # Testing
#batch=1 batch=1
#subdivisions=1 subdivisions=1
height=256 height=256
width=256 width=256
min_crop=128
max_crop=448
channels=3 channels=3
momentum=0.9 momentum=0.9
decay=0.0005 decay=0.0005
max_crop=320
burn_in=1000
learning_rate=0.1 learning_rate=0.1
policy=poly policy=poly
power=4 power=4
max_batches=1600000 max_batches=800000
angle=7
hue=.1
saturation=.75
exposure=.75
aspect=.75
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
@ -97,14 +106,14 @@ stride=1
pad=1 pad=1
activation=leaky activation=leaky
[avgpool]
[convolutional] [convolutional]
filters=1000 filters=1000
size=1 size=1
stride=1 stride=1
pad=1 pad=1
activation=leaky activation=linear
[avgpool]
[softmax] [softmax]
groups=1 groups=1

View File

@ -1,10 +1,10 @@
[net] [net]
subdivisions=1
batch = 256
inputs=256 inputs=256
momentum=0.9 momentum=0.9
decay=0.0 decay=0.0
time_steps=128 subdivisions=1
batch = 1
time_steps=1
learning_rate=.002 learning_rate=.002
adam=1 adam=1
@ -13,13 +13,13 @@ power=4
max_batches=1000000 max_batches=1000000
[gru] [gru]
output = 1024 output = 256
[gru] [gru]
output = 1024 output = 256
[gru] [gru]
output = 1024 output = 256
[connected] [connected]
output=256 output=256
@ -27,4 +27,3 @@ activation=linear
[softmax] [softmax]

990
cfg/resnet101.cfg Normal file
View File

@ -0,0 +1,990 @@
[net]
# Training
# batch=128
# subdivisions=2
# Testing
batch=1
subdivisions=1
height=256
width=256
channels=3
min_crop=128
max_crop=448
burn_in=1000
learning_rate=0.1
policy=poly
power=4
max_batches=800000
momentum=0.9
decay=0.0005
angle=7
hue=.1
saturation=.75
exposure=.75
aspect=.75
[convolutional]
batch_normalize=1
filters=64
size=7
stride=2
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# Conv 4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
#Conv 5
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
filters=1000
size=1
stride=1
pad=1
activation=linear
[avgpool]
[softmax]
groups=1
[cost]
type=sse

228
cfg/resnet18.cfg Normal file
View File

@ -0,0 +1,228 @@
[net]
# Training
# batch=128
# subdivisions=1
# Testing
batch=1
subdivisions=1
height=256
width=256
channels=3
min_crop=128
max_crop=448
burn_in=1000
learning_rate=0.1
policy=poly
power=4
max_batches=800000
momentum=0.9
decay=0.0005
angle=7
hue=.1
saturation=.75
exposure=.75
aspect=.75
[convolutional]
batch_normalize=1
filters=64
size=7
stride=2
pad=1
activation=leaky
[maxpool]
size=2
stride=2
# Residual Block
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Strided Residual Block
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Strided Residual Block
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Strided Residual Block
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
[avgpool]
[convolutional]
filters=1000
size=1
stride=1
pad=1
activation=linear
[softmax]
groups=1

392
cfg/resnet34.cfg Normal file
View File

@ -0,0 +1,392 @@
[net]
# Training
# batch=128
# subdivisions=2
# Testing
batch=1
subdivisions=1
height=256
width=256
channels=3
min_crop=128
max_crop=448
burn_in=1000
learning_rate=0.1
policy=poly
power=4
max_batches=800000
momentum=0.9
decay=0.0005
angle=7
hue=.1
saturation=.75
exposure=.75
aspect=.75
[convolutional]
batch_normalize=1
filters=64
size=7
stride=2
pad=1
activation=leaky
[maxpool]
size=2
stride=2
# Residual Block
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Strided Residual Block
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Strided Residual Block
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
# Residual Block
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=linear
[shortcut]
activation=leaky
from=-3
[avgpool]
[convolutional]
filters=1000
size=1
stride=1
pad=1
activation=linear
[softmax]
groups=1

View File

@ -9,16 +9,17 @@ subdivisions=1
height=256 height=256
width=256 width=256
max_crop=448
channels=3 channels=3
momentum=0.9 min_crop=128
decay=0.0005 max_crop=448
burn_in=1000 burn_in=1000
learning_rate=0.1 learning_rate=0.1
policy=poly policy=poly
power=4 power=4
max_batches=1600000 max_batches=800000
momentum=0.9
decay=0.0005
angle=7 angle=7
hue=.1 hue=.1
@ -26,6 +27,7 @@ saturation=.75
exposure=.75 exposure=.75
aspect=.75 aspect=.75
[convolutional] [convolutional]
batch_normalize=1 batch_normalize=1
filters=64 filters=64
@ -493,6 +495,7 @@ activation=leaky
[avgpool]
[convolutional] [convolutional]
filters=1000 filters=1000
@ -501,8 +504,6 @@ stride=1
pad=1 pad=1
activation=linear activation=linear
[avgpool]
[softmax] [softmax]
groups=1 groups=1

1048
cfg/resnext101-32x4d.cfg Normal file

File diff suppressed because it is too large Load Diff

1558
cfg/resnext152-32x4d.cfg Normal file

File diff suppressed because it is too large Load Diff

523
cfg/resnext50.cfg Normal file
View File

@ -0,0 +1,523 @@
[net]
# Training
# batch=128
# subdivisions=4
# Testing
batch=1
subdivisions=1
height=256
width=256
channels=3
min_crop=128
max_crop=448
burn_in=1000
learning_rate=0.1
policy=poly
power=4
max_batches=800000
momentum=0.9
decay=0.0005
angle=7
hue=.1
saturation=.75
exposure=.75
aspect=.75
[convolutional]
batch_normalize=1
filters=64
size=7
stride=2
pad=1
activation=leaky
[maxpool]
size=2
stride=2
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
groups=32
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
# Conv 4
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
#Conv 5
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
groups=32
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
groups=32
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear
[shortcut]
from=-4
activation=leaky
[avgpool]
[convolutional]
filters=1000
size=1
stride=1
pad=1
activation=linear
[softmax]
groups=1

View File

@ -5,8 +5,8 @@ subdivisions=1
# Training # Training
# batch=64 # batch=64
# subdivisions=8 # subdivisions=8
width=416 width=608
height=416 height=608
channels=3 channels=3
momentum=0.9 momentum=0.9
decay=0.0005 decay=0.0005

View File

@ -172,7 +172,7 @@ filters=255
activation=linear activation=linear
[yolo] [yolo]
mask = 1,2,3 mask = 0,1,2
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80 classes=80
num=6 num=6

View File

@ -1,12 +1,12 @@
[net] [net]
# Testing # Testing
batch=1 # batch=1
subdivisions=1 # subdivisions=1
# Training # Training
# batch=64 batch=64
# subdivisions=16 subdivisions=16
width=416 width=608
height=416 height=608
channels=3 channels=3
momentum=0.9 momentum=0.9
decay=0.0005 decay=0.0005

View File

@ -396,6 +396,7 @@ void validate_classifier_single(char *datacfg, char *filename, char *weightfile)
} }
image im = load_image_color(paths[i], 0, 0); image im = load_image_color(paths[i], 0, 0);
image crop = center_crop_image(im, net->w, net->h); image crop = center_crop_image(im, net->w, net->h);
//grayscale_image_3c(crop);
//show_image(im, "orig"); //show_image(im, "orig");
//show_image(crop, "cropped"); //show_image(crop, "cropped");
//cvWaitKey(0); //cvWaitKey(0);

View File

@ -2,6 +2,7 @@
#include <sys/time.h> #include <sys/time.h>
#include <assert.h> #include <assert.h>
void normalize_image2(image p);
void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear, int display) void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear, int display)
{ {
int i; int i;
@ -26,6 +27,10 @@ void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
network *net = nets[0]; network *net = nets[0];
image pred = get_network_image(net); image pred = get_network_image(net);
image embed = pred;
embed.c = 3;
embed.data += embed.w*embed.h*80;
int div = net->w/pred.w; int div = net->w/pred.w;
assert(pred.w * div == net->w); assert(pred.w * div == net->w);
assert(pred.h * div == net->h); assert(pred.h * div == net->h);
@ -98,6 +103,11 @@ void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
pred.c = 80; pred.c = 80;
image mask = mask_to_rgb(tr); image mask = mask_to_rgb(tr);
image prmask = mask_to_rgb(pred); image prmask = mask_to_rgb(pred);
image ecopy = copy_image(embed);
normalize_image2(ecopy);
show_image(ecopy, "embed", 1);
free_image(ecopy);
show_image(im, "input", 1); show_image(im, "input", 1);
show_image(prmask, "pred", 1); show_image(prmask, "pred", 1);
show_image(mask, "truth", 100); show_image(mask, "truth", 100);

View File

@ -127,6 +127,7 @@ matrix load_image_augment_paths(char **paths, int n, int min, int max, int size,
show_image(crop, "crop"); show_image(crop, "crop");
cvWaitKey(0); cvWaitKey(0);
*/ */
//grayscale_image_3c(crop);
free_image(im); free_image(im);
X.vals[i] = crop.data; X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c; X.cols = crop.h*crop.w*crop.c;

View File

@ -109,9 +109,8 @@ void forward_iseg_layer(const layer l, network net)
} }
memset(l.counts, 0, 90*sizeof(float)); memset(l.counts, 0, 90*sizeof(int));
for(i = 0; i < 90; ++i){ for(i = 0; i < 90; ++i){
l.counts[i] = 0;
fill_cpu(ids, 0, l.sums[i], 1); fill_cpu(ids, 0, l.sums[i], 1);
int c = net.truth[b*l.truths + i*(l.w*l.h+1)]; int c = net.truth[b*l.truths + i*(l.w*l.h+1)];
@ -153,7 +152,7 @@ void forward_iseg_layer(const layer l, network net)
scal_cpu(ids, 1.f/l.counts[i], l.sums[i], 1); scal_cpu(ids, 1.f/l.counts[i], l.sums[i], 1);
if(b == 0 && net.gpu_index == 0){ if(b == 0 && net.gpu_index == 0){
printf("%4d, %6.3f, ", l.counts[i], mse[i]); printf("%4d, %6.3f, ", l.counts[i], mse[i]);
for(j = 0; j < ids/4; ++j){ for(j = 0; j < ids; ++j){
printf("%6.3f,", l.sums[i][j]); printf("%6.3f,", l.sums[i][j]);
} }
printf("\n"); printf("\n");
@ -180,6 +179,13 @@ void forward_iseg_layer(const layer l, network net)
} }
} }
} }
for(i = 0; i < ids; ++i){
for(k = 0; k < l.w*l.h; ++k){
int index = b*l.outputs + (i+l.classes)*l.w*l.h + k;
l.delta[index] *= .01;
}
}
} }
*(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2); *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);