123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454 |
- input: "data"
- input_dim: 1
- input_dim: 3
- input_dim: 160
- input_dim: 40
- layer {
- name: "conv0"
- type: "Convolution"
- bottom: "data"
- top: "conv0"
- convolution_param {
- num_output: 32
- bias_term: true
- pad_h: 1
- pad_w: 1
- kernel_h: 3
- kernel_w: 3
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "bn0"
- type: "BatchNorm"
- bottom: "conv0"
- top: "bn0"
- batch_norm_param {
- moving_average_fraction: 0.99
- eps: 0.001
- }
- }
- layer {
- name: "bn0_scale"
- type: "Scale"
- bottom: "bn0"
- top: "bn0"
- scale_param {
- bias_term: true
- }
- }
- layer {
- name: "relu0"
- type: "ReLU"
- bottom: "bn0"
- top: "bn0"
- }
- layer {
- name: "pool0"
- type: "Pooling"
- bottom: "bn0"
- top: "pool0"
- pooling_param {
- pool: MAX
- kernel_h: 2
- kernel_w: 2
- stride_h: 2
- stride_w: 2
- pad_h: 0
- pad_w: 0
- }
- }
- layer {
- name: "conv1"
- type: "Convolution"
- bottom: "pool0"
- top: "conv1"
- convolution_param {
- num_output: 64
- bias_term: true
- pad_h: 1
- pad_w: 1
- kernel_h: 3
- kernel_w: 3
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "bn1"
- type: "BatchNorm"
- bottom: "conv1"
- top: "bn1"
- batch_norm_param {
- moving_average_fraction: 0.99
- eps: 0.001
- }
- }
- layer {
- name: "bn1_scale"
- type: "Scale"
- bottom: "bn1"
- top: "bn1"
- scale_param {
- bias_term: true
- }
- }
- layer {
- name: "relu1"
- type: "ReLU"
- bottom: "bn1"
- top: "bn1"
- }
- layer {
- name: "pool1"
- type: "Pooling"
- bottom: "bn1"
- top: "pool1"
- pooling_param {
- pool: MAX
- kernel_h: 2
- kernel_w: 2
- stride_h: 2
- stride_w: 2
- pad_h: 0
- pad_w: 0
- }
- }
- layer {
- name: "conv2"
- type: "Convolution"
- bottom: "pool1"
- top: "conv2"
- convolution_param {
- num_output: 128
- bias_term: true
- pad_h: 1
- pad_w: 1
- kernel_h: 3
- kernel_w: 3
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "bn2"
- type: "BatchNorm"
- bottom: "conv2"
- top: "bn2"
- batch_norm_param {
- moving_average_fraction: 0.99
- eps: 0.001
- }
- }
- layer {
- name: "bn2_scale"
- type: "Scale"
- bottom: "bn2"
- top: "bn2"
- scale_param {
- bias_term: true
- }
- }
- layer {
- name: "relu2"
- type: "ReLU"
- bottom: "bn2"
- top: "bn2"
- }
- layer {
- name: "pool2"
- type: "Pooling"
- bottom: "bn2"
- top: "pool2"
- pooling_param {
- pool: MAX
- kernel_h: 2
- kernel_w: 2
- stride_h: 2
- stride_w: 2
- pad_h: 0
- pad_w: 0
- }
- }
- layer {
- name: "conv2d_1"
- type: "Convolution"
- bottom: "pool2"
- top: "conv2d_1"
- convolution_param {
- num_output: 256
- bias_term: true
- pad_h: 0
- pad_w: 0
- kernel_h: 1
- kernel_w: 5
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "batch_normalization_1"
- type: "BatchNorm"
- bottom: "conv2d_1"
- top: "batch_normalization_1"
- batch_norm_param {
- moving_average_fraction: 0.99
- eps: 0.001
- }
- }
- layer {
- name: "batch_normalization_1_scale"
- type: "Scale"
- bottom: "batch_normalization_1"
- top: "batch_normalization_1"
- scale_param {
- bias_term: true
- }
- }
- layer {
- name: "activation_1"
- type: "ReLU"
- bottom: "batch_normalization_1"
- top: "batch_normalization_1"
- }
- layer {
- name: "conv2d_2"
- type: "Convolution"
- bottom: "batch_normalization_1"
- top: "conv2d_2"
- convolution_param {
- num_output: 256
- bias_term: true
- pad_h: 3
- pad_w: 0
- kernel_h: 7
- kernel_w: 1
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "conv2d_3"
- type: "Convolution"
- bottom: "batch_normalization_1"
- top: "conv2d_3"
- convolution_param {
- num_output: 256
- bias_term: true
- pad_h: 2
- pad_w: 0
- kernel_h: 5
- kernel_w: 1
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "conv2d_4"
- type: "Convolution"
- bottom: "batch_normalization_1"
- top: "conv2d_4"
- convolution_param {
- num_output: 256
- bias_term: true
- pad_h: 1
- pad_w: 0
- kernel_h: 3
- kernel_w: 1
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "conv2d_5"
- type: "Convolution"
- bottom: "batch_normalization_1"
- top: "conv2d_5"
- convolution_param {
- num_output: 256
- bias_term: true
- pad_h: 0
- pad_w: 0
- kernel_h: 1
- kernel_w: 1
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "batch_normalization_2"
- type: "BatchNorm"
- bottom: "conv2d_2"
- top: "batch_normalization_2"
- batch_norm_param {
- moving_average_fraction: 0.99
- eps: 0.001
- }
- }
- layer {
- name: "batch_normalization_2_scale"
- type: "Scale"
- bottom: "batch_normalization_2"
- top: "batch_normalization_2"
- scale_param {
- bias_term: true
- }
- }
- layer {
- name: "batch_normalization_3"
- type: "BatchNorm"
- bottom: "conv2d_3"
- top: "batch_normalization_3"
- batch_norm_param {
- moving_average_fraction: 0.99
- eps: 0.001
- }
- }
- layer {
- name: "batch_normalization_3_scale"
- type: "Scale"
- bottom: "batch_normalization_3"
- top: "batch_normalization_3"
- scale_param {
- bias_term: true
- }
- }
- layer {
- name: "batch_normalization_4"
- type: "BatchNorm"
- bottom: "conv2d_4"
- top: "batch_normalization_4"
- batch_norm_param {
- moving_average_fraction: 0.99
- eps: 0.001
- }
- }
- layer {
- name: "batch_normalization_4_scale"
- type: "Scale"
- bottom: "batch_normalization_4"
- top: "batch_normalization_4"
- scale_param {
- bias_term: true
- }
- }
- layer {
- name: "batch_normalization_5"
- type: "BatchNorm"
- bottom: "conv2d_5"
- top: "batch_normalization_5"
- batch_norm_param {
- moving_average_fraction: 0.99
- eps: 0.001
- }
- }
- layer {
- name: "batch_normalization_5_scale"
- type: "Scale"
- bottom: "batch_normalization_5"
- top: "batch_normalization_5"
- scale_param {
- bias_term: true
- }
- }
- layer {
- name: "activation_2"
- type: "ReLU"
- bottom: "batch_normalization_2"
- top: "batch_normalization_2"
- }
- layer {
- name: "activation_3"
- type: "ReLU"
- bottom: "batch_normalization_3"
- top: "batch_normalization_3"
- }
- layer {
- name: "activation_4"
- type: "ReLU"
- bottom: "batch_normalization_4"
- top: "batch_normalization_4"
- }
- layer {
- name: "activation_5"
- type: "ReLU"
- bottom: "batch_normalization_5"
- top: "batch_normalization_5"
- }
- layer {
- name: "concatenate_1"
- type: "Concat"
- bottom: "batch_normalization_2"
- bottom: "batch_normalization_3"
- bottom: "batch_normalization_4"
- bottom: "batch_normalization_5"
- top: "concatenate_1"
- concat_param {
- axis: 1
- }
- }
- layer {
- name: "conv_1024_11"
- type: "Convolution"
- bottom: "concatenate_1"
- top: "conv_1024_11"
- convolution_param {
- num_output: 1024
- bias_term: true
- pad_h: 0
- pad_w: 0
- kernel_h: 1
- kernel_w: 1
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "batch_normalization_6"
- type: "BatchNorm"
- bottom: "conv_1024_11"
- top: "batch_normalization_6"
- batch_norm_param {
- moving_average_fraction: 0.99
- eps: 0.001
- }
- }
- layer {
- name: "batch_normalization_6_scale"
- type: "Scale"
- bottom: "batch_normalization_6"
- top: "batch_normalization_6"
- scale_param {
- bias_term: true
- }
- }
- layer {
- name: "activation_6"
- type: "ReLU"
- bottom: "batch_normalization_6"
- top: "batch_normalization_6"
- }
- layer {
- name: "conv_class_11"
- type: "Convolution"
- bottom: "batch_normalization_6"
- top: "conv_class_11"
- convolution_param {
- num_output: 84
- bias_term: true
- pad_h: 0
- pad_w: 0
- kernel_h: 1
- kernel_w: 1
- stride_h: 1
- stride_w: 1
- }
- }
- layer {
- name: "prob"
- type: "Softmax"
- bottom: "conv_class_11"
- top: "prob"
- }
|