SegmentationFree.prototxt 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. input: "data"
  2. input_dim: 1
  3. input_dim: 3
  4. input_dim: 160
  5. input_dim: 40
  6. layer {
  7. name: "conv0"
  8. type: "Convolution"
  9. bottom: "data"
  10. top: "conv0"
  11. convolution_param {
  12. num_output: 32
  13. bias_term: true
  14. pad_h: 1
  15. pad_w: 1
  16. kernel_h: 3
  17. kernel_w: 3
  18. stride_h: 1
  19. stride_w: 1
  20. }
  21. }
  22. layer {
  23. name: "bn0"
  24. type: "BatchNorm"
  25. bottom: "conv0"
  26. top: "bn0"
  27. batch_norm_param {
  28. moving_average_fraction: 0.99
  29. eps: 0.001
  30. }
  31. }
  32. layer {
  33. name: "bn0_scale"
  34. type: "Scale"
  35. bottom: "bn0"
  36. top: "bn0"
  37. scale_param {
  38. bias_term: true
  39. }
  40. }
  41. layer {
  42. name: "relu0"
  43. type: "ReLU"
  44. bottom: "bn0"
  45. top: "bn0"
  46. }
  47. layer {
  48. name: "pool0"
  49. type: "Pooling"
  50. bottom: "bn0"
  51. top: "pool0"
  52. pooling_param {
  53. pool: MAX
  54. kernel_h: 2
  55. kernel_w: 2
  56. stride_h: 2
  57. stride_w: 2
  58. pad_h: 0
  59. pad_w: 0
  60. }
  61. }
  62. layer {
  63. name: "conv1"
  64. type: "Convolution"
  65. bottom: "pool0"
  66. top: "conv1"
  67. convolution_param {
  68. num_output: 64
  69. bias_term: true
  70. pad_h: 1
  71. pad_w: 1
  72. kernel_h: 3
  73. kernel_w: 3
  74. stride_h: 1
  75. stride_w: 1
  76. }
  77. }
  78. layer {
  79. name: "bn1"
  80. type: "BatchNorm"
  81. bottom: "conv1"
  82. top: "bn1"
  83. batch_norm_param {
  84. moving_average_fraction: 0.99
  85. eps: 0.001
  86. }
  87. }
  88. layer {
  89. name: "bn1_scale"
  90. type: "Scale"
  91. bottom: "bn1"
  92. top: "bn1"
  93. scale_param {
  94. bias_term: true
  95. }
  96. }
  97. layer {
  98. name: "relu1"
  99. type: "ReLU"
  100. bottom: "bn1"
  101. top: "bn1"
  102. }
  103. layer {
  104. name: "pool1"
  105. type: "Pooling"
  106. bottom: "bn1"
  107. top: "pool1"
  108. pooling_param {
  109. pool: MAX
  110. kernel_h: 2
  111. kernel_w: 2
  112. stride_h: 2
  113. stride_w: 2
  114. pad_h: 0
  115. pad_w: 0
  116. }
  117. }
  118. layer {
  119. name: "conv2"
  120. type: "Convolution"
  121. bottom: "pool1"
  122. top: "conv2"
  123. convolution_param {
  124. num_output: 128
  125. bias_term: true
  126. pad_h: 1
  127. pad_w: 1
  128. kernel_h: 3
  129. kernel_w: 3
  130. stride_h: 1
  131. stride_w: 1
  132. }
  133. }
  134. layer {
  135. name: "bn2"
  136. type: "BatchNorm"
  137. bottom: "conv2"
  138. top: "bn2"
  139. batch_norm_param {
  140. moving_average_fraction: 0.99
  141. eps: 0.001
  142. }
  143. }
  144. layer {
  145. name: "bn2_scale"
  146. type: "Scale"
  147. bottom: "bn2"
  148. top: "bn2"
  149. scale_param {
  150. bias_term: true
  151. }
  152. }
  153. layer {
  154. name: "relu2"
  155. type: "ReLU"
  156. bottom: "bn2"
  157. top: "bn2"
  158. }
  159. layer {
  160. name: "pool2"
  161. type: "Pooling"
  162. bottom: "bn2"
  163. top: "pool2"
  164. pooling_param {
  165. pool: MAX
  166. kernel_h: 2
  167. kernel_w: 2
  168. stride_h: 2
  169. stride_w: 2
  170. pad_h: 0
  171. pad_w: 0
  172. }
  173. }
  174. layer {
  175. name: "conv2d_1"
  176. type: "Convolution"
  177. bottom: "pool2"
  178. top: "conv2d_1"
  179. convolution_param {
  180. num_output: 256
  181. bias_term: true
  182. pad_h: 0
  183. pad_w: 0
  184. kernel_h: 1
  185. kernel_w: 5
  186. stride_h: 1
  187. stride_w: 1
  188. }
  189. }
  190. layer {
  191. name: "batch_normalization_1"
  192. type: "BatchNorm"
  193. bottom: "conv2d_1"
  194. top: "batch_normalization_1"
  195. batch_norm_param {
  196. moving_average_fraction: 0.99
  197. eps: 0.001
  198. }
  199. }
  200. layer {
  201. name: "batch_normalization_1_scale"
  202. type: "Scale"
  203. bottom: "batch_normalization_1"
  204. top: "batch_normalization_1"
  205. scale_param {
  206. bias_term: true
  207. }
  208. }
  209. layer {
  210. name: "activation_1"
  211. type: "ReLU"
  212. bottom: "batch_normalization_1"
  213. top: "batch_normalization_1"
  214. }
  215. layer {
  216. name: "conv2d_2"
  217. type: "Convolution"
  218. bottom: "batch_normalization_1"
  219. top: "conv2d_2"
  220. convolution_param {
  221. num_output: 256
  222. bias_term: true
  223. pad_h: 3
  224. pad_w: 0
  225. kernel_h: 7
  226. kernel_w: 1
  227. stride_h: 1
  228. stride_w: 1
  229. }
  230. }
  231. layer {
  232. name: "conv2d_3"
  233. type: "Convolution"
  234. bottom: "batch_normalization_1"
  235. top: "conv2d_3"
  236. convolution_param {
  237. num_output: 256
  238. bias_term: true
  239. pad_h: 2
  240. pad_w: 0
  241. kernel_h: 5
  242. kernel_w: 1
  243. stride_h: 1
  244. stride_w: 1
  245. }
  246. }
  247. layer {
  248. name: "conv2d_4"
  249. type: "Convolution"
  250. bottom: "batch_normalization_1"
  251. top: "conv2d_4"
  252. convolution_param {
  253. num_output: 256
  254. bias_term: true
  255. pad_h: 1
  256. pad_w: 0
  257. kernel_h: 3
  258. kernel_w: 1
  259. stride_h: 1
  260. stride_w: 1
  261. }
  262. }
  263. layer {
  264. name: "conv2d_5"
  265. type: "Convolution"
  266. bottom: "batch_normalization_1"
  267. top: "conv2d_5"
  268. convolution_param {
  269. num_output: 256
  270. bias_term: true
  271. pad_h: 0
  272. pad_w: 0
  273. kernel_h: 1
  274. kernel_w: 1
  275. stride_h: 1
  276. stride_w: 1
  277. }
  278. }
  279. layer {
  280. name: "batch_normalization_2"
  281. type: "BatchNorm"
  282. bottom: "conv2d_2"
  283. top: "batch_normalization_2"
  284. batch_norm_param {
  285. moving_average_fraction: 0.99
  286. eps: 0.001
  287. }
  288. }
  289. layer {
  290. name: "batch_normalization_2_scale"
  291. type: "Scale"
  292. bottom: "batch_normalization_2"
  293. top: "batch_normalization_2"
  294. scale_param {
  295. bias_term: true
  296. }
  297. }
  298. layer {
  299. name: "batch_normalization_3"
  300. type: "BatchNorm"
  301. bottom: "conv2d_3"
  302. top: "batch_normalization_3"
  303. batch_norm_param {
  304. moving_average_fraction: 0.99
  305. eps: 0.001
  306. }
  307. }
  308. layer {
  309. name: "batch_normalization_3_scale"
  310. type: "Scale"
  311. bottom: "batch_normalization_3"
  312. top: "batch_normalization_3"
  313. scale_param {
  314. bias_term: true
  315. }
  316. }
  317. layer {
  318. name: "batch_normalization_4"
  319. type: "BatchNorm"
  320. bottom: "conv2d_4"
  321. top: "batch_normalization_4"
  322. batch_norm_param {
  323. moving_average_fraction: 0.99
  324. eps: 0.001
  325. }
  326. }
  327. layer {
  328. name: "batch_normalization_4_scale"
  329. type: "Scale"
  330. bottom: "batch_normalization_4"
  331. top: "batch_normalization_4"
  332. scale_param {
  333. bias_term: true
  334. }
  335. }
  336. layer {
  337. name: "batch_normalization_5"
  338. type: "BatchNorm"
  339. bottom: "conv2d_5"
  340. top: "batch_normalization_5"
  341. batch_norm_param {
  342. moving_average_fraction: 0.99
  343. eps: 0.001
  344. }
  345. }
  346. layer {
  347. name: "batch_normalization_5_scale"
  348. type: "Scale"
  349. bottom: "batch_normalization_5"
  350. top: "batch_normalization_5"
  351. scale_param {
  352. bias_term: true
  353. }
  354. }
  355. layer {
  356. name: "activation_2"
  357. type: "ReLU"
  358. bottom: "batch_normalization_2"
  359. top: "batch_normalization_2"
  360. }
  361. layer {
  362. name: "activation_3"
  363. type: "ReLU"
  364. bottom: "batch_normalization_3"
  365. top: "batch_normalization_3"
  366. }
  367. layer {
  368. name: "activation_4"
  369. type: "ReLU"
  370. bottom: "batch_normalization_4"
  371. top: "batch_normalization_4"
  372. }
  373. layer {
  374. name: "activation_5"
  375. type: "ReLU"
  376. bottom: "batch_normalization_5"
  377. top: "batch_normalization_5"
  378. }
  379. layer {
  380. name: "concatenate_1"
  381. type: "Concat"
  382. bottom: "batch_normalization_2"
  383. bottom: "batch_normalization_3"
  384. bottom: "batch_normalization_4"
  385. bottom: "batch_normalization_5"
  386. top: "concatenate_1"
  387. concat_param {
  388. axis: 1
  389. }
  390. }
  391. layer {
  392. name: "conv_1024_11"
  393. type: "Convolution"
  394. bottom: "concatenate_1"
  395. top: "conv_1024_11"
  396. convolution_param {
  397. num_output: 1024
  398. bias_term: true
  399. pad_h: 0
  400. pad_w: 0
  401. kernel_h: 1
  402. kernel_w: 1
  403. stride_h: 1
  404. stride_w: 1
  405. }
  406. }
  407. layer {
  408. name: "batch_normalization_6"
  409. type: "BatchNorm"
  410. bottom: "conv_1024_11"
  411. top: "batch_normalization_6"
  412. batch_norm_param {
  413. moving_average_fraction: 0.99
  414. eps: 0.001
  415. }
  416. }
  417. layer {
  418. name: "batch_normalization_6_scale"
  419. type: "Scale"
  420. bottom: "batch_normalization_6"
  421. top: "batch_normalization_6"
  422. scale_param {
  423. bias_term: true
  424. }
  425. }
  426. layer {
  427. name: "activation_6"
  428. type: "ReLU"
  429. bottom: "batch_normalization_6"
  430. top: "batch_normalization_6"
  431. }
  432. layer {
  433. name: "conv_class_11"
  434. type: "Convolution"
  435. bottom: "batch_normalization_6"
  436. top: "conv_class_11"
  437. convolution_param {
  438. num_output: 84
  439. bias_term: true
  440. pad_h: 0
  441. pad_w: 0
  442. kernel_h: 1
  443. kernel_w: 1
  444. stride_h: 1
  445. stride_w: 1
  446. }
  447. }
  448. layer {
  449. name: "prob"
  450. type: "Softmax"
  451. bottom: "conv_class_11"
  452. top: "prob"
  453. }