TensorRT-Demo/mtcnn/det1_relu.prototxt

291 lines
4.1 KiB
Plaintext

name: "PNet"
layer
{
name: "data"
type: "Input"
top: "data"
#
# Max allowed input image size as: 1280x720
# 'minsize' = 40
#
# Input dimension of the 1st 'scale':
# 720 * 12 / 40 = 216
# 1280 * 12 / 40 = 384
#
# H's in all scales: (scale factor = 0.709)
# Original: 216.0, 153.1, 108.6 77.0, 54.6, 38.7, 27.4, 19.5, 13.8, (9.8)
# Rounded: 216, 154, 108, 78, 54, 38, 28, 20, 14
# Offsets: 0, 216, 370, 478, 556, 610, 648, 676, 696, (710)
#
# Input dimension of the 'stacked image': 710x384
#
# Output dimension: (stride=2)
# (710 - 12) / 2 + 1 = 350
# (384 - 12) / 2 + 1 = 187
#
input_param{shape:{dim:1 dim:3 dim:710 dim:384}}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 10
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "ReLU1"
type: "ReLU"
bottom: "conv1"
top: "conv1_1"
}
layer {
name: "scale1_1"
bottom: "conv1"
top: "conv1_2"
type: "Scale"
scale_param {
axis: 1
bias_term:false
}
}
layer {
name: "ReLU1_2"
type: "ReLU"
bottom: "conv1_2"
top: "conv1_2"
}
layer {
name: "scale1_2"
bottom: "conv1_2"
top: "conv1_2"
type: "Scale"
scale_param {
axis: 1
bias_term:false
}
}
layer {
name: "eltwise-sum1"
type: "Eltwise"
bottom: "conv1_1"
bottom: "conv1_2"
top: "conv1_3"
eltwise_param { operation: SUM }
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1_3"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 16
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "ReLU2"
type: "ReLU"
bottom: "conv2"
top: "conv2_1"
}
layer {
name: "scale2_1"
bottom: "conv2"
top: "conv2_2"
type: "Scale"
scale_param {
axis: 1
bias_term:false
}
}
layer {
name: "ReLU2_2"
type: "ReLU"
bottom: "conv2_2"
top: "conv2_2"
}
layer {
name: "scale2_2"
bottom: "conv2_2"
top: "conv2_2"
type: "Scale"
scale_param {
axis: 1
bias_term:false
}
}
layer {
name: "eltwise-sum2"
type: "Eltwise"
bottom: "conv2_1"
bottom: "conv2_2"
top: "conv2_3"
eltwise_param { operation: SUM }
}
layer {
name: "conv3"
type: "Convolution"
bottom: "conv2_3"
top: "conv3"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "ReLU3"
type: "ReLU"
bottom: "conv3"
top: "conv3_1"
}
layer {
name: "scale3_1"
bottom: "conv3"
top: "conv3_2"
type: "Scale"
scale_param {
axis: 1
bias_term:false
}
}
layer {
name: "ReLU3_2"
type: "ReLU"
bottom: "conv3_2"
top: "conv3_2"
}
layer {
name: "scale3_2"
bottom: "conv3_2"
top: "conv3_2"
type: "Scale"
scale_param {
axis: 1
bias_term:false
}
}
layer {
name: "eltwise-sum3"
type: "Eltwise"
bottom: "conv3_1"
bottom: "conv3_2"
top: "conv3_3"
eltwise_param { operation: SUM }
}
layer {
name: "conv4-1"
type: "Convolution"
bottom: "conv3_3"
top: "conv4-1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 2
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "conv4-2"
type: "Convolution"
bottom: "conv3_3"
top: "conv4-2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 4
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "prob1"
type: "Softmax"
bottom: "conv4-1"
top: "prob1"
}