mv3.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import numpy as np
  18. import paddle
  19. from paddle import ParamAttr
  20. import paddle.nn as nn
  21. import paddle.nn.functional as F
  22. from paddle.nn.functional import hardswish, hardsigmoid
  23. from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
  24. from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
  25. from paddle.regularizer import L2Decay
  26. import math
  27. from paddle.utils.cpp_extension import load
  28. # jit compile custom op
  29. custom_ops = load(
  30. name="custom_jit_ops",
  31. sources=["./custom_op/custom_relu_op.cc", "./custom_op/custom_relu_op.cu"])
  32. def make_divisible(v, divisor=8, min_value=None):
  33. if min_value is None:
  34. min_value = divisor
  35. new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
  36. if new_v < 0.9 * v:
  37. new_v += divisor
  38. return new_v
  39. class MobileNetV3(nn.Layer):
  40. def __init__(self,
  41. scale=1.0,
  42. model_name="small",
  43. dropout_prob=0.2,
  44. class_dim=1000,
  45. use_custom_relu=False):
  46. super(MobileNetV3, self).__init__()
  47. self.use_custom_relu = use_custom_relu
  48. inplanes = 16
  49. if model_name == "large":
  50. self.cfg = [
  51. # k, exp, c, se, nl, s,
  52. [3, 16, 16, False, "relu", 1],
  53. [3, 64, 24, False, "relu", 2],
  54. [3, 72, 24, False, "relu", 1],
  55. [5, 72, 40, True, "relu", 2],
  56. [5, 120, 40, True, "relu", 1],
  57. [5, 120, 40, True, "relu", 1],
  58. [3, 240, 80, False, "hardswish", 2],
  59. [3, 200, 80, False, "hardswish", 1],
  60. [3, 184, 80, False, "hardswish", 1],
  61. [3, 184, 80, False, "hardswish", 1],
  62. [3, 480, 112, True, "hardswish", 1],
  63. [3, 672, 112, True, "hardswish", 1],
  64. [5, 672, 160, True, "hardswish", 2],
  65. [5, 960, 160, True, "hardswish", 1],
  66. [5, 960, 160, True, "hardswish", 1],
  67. ]
  68. self.cls_ch_squeeze = 960
  69. self.cls_ch_expand = 1280
  70. elif model_name == "small":
  71. self.cfg = [
  72. # k, exp, c, se, nl, s,
  73. [3, 16, 16, True, "relu", 2],
  74. [3, 72, 24, False, "relu", 2],
  75. [3, 88, 24, False, "relu", 1],
  76. [5, 96, 40, True, "hardswish", 2],
  77. [5, 240, 40, True, "hardswish", 1],
  78. [5, 240, 40, True, "hardswish", 1],
  79. [5, 120, 48, True, "hardswish", 1],
  80. [5, 144, 48, True, "hardswish", 1],
  81. [5, 288, 96, True, "hardswish", 2],
  82. [5, 576, 96, True, "hardswish", 1],
  83. [5, 576, 96, True, "hardswish", 1],
  84. ]
  85. self.cls_ch_squeeze = 576
  86. self.cls_ch_expand = 1280
  87. else:
  88. raise NotImplementedError(
  89. "mode[{}_model] is not implemented!".format(model_name))
  90. self.conv1 = ConvBNLayer(
  91. in_c=3,
  92. out_c=make_divisible(inplanes * scale),
  93. filter_size=3,
  94. stride=2,
  95. padding=1,
  96. num_groups=1,
  97. if_act=True,
  98. act="hardswish",
  99. name="conv1",
  100. use_custom_relu=self.use_custom_relu)
  101. self.block_list = []
  102. i = 0
  103. inplanes = make_divisible(inplanes * scale)
  104. for (k, exp, c, se, nl, s) in self.cfg:
  105. block = self.add_sublayer(
  106. "conv" + str(i + 2),
  107. ResidualUnit(
  108. in_c=inplanes,
  109. mid_c=make_divisible(scale * exp),
  110. out_c=make_divisible(scale * c),
  111. filter_size=k,
  112. stride=s,
  113. use_se=se,
  114. act=nl,
  115. name="conv" + str(i + 2),
  116. use_custom_relu=self.use_custom_relu))
  117. self.block_list.append(block)
  118. inplanes = make_divisible(scale * c)
  119. i += 1
  120. self.last_second_conv = ConvBNLayer(
  121. in_c=inplanes,
  122. out_c=make_divisible(scale * self.cls_ch_squeeze),
  123. filter_size=1,
  124. stride=1,
  125. padding=0,
  126. num_groups=1,
  127. if_act=True,
  128. act="hardswish",
  129. name="conv_last",
  130. use_custom_relu=self.use_custom_relu)
  131. self.pool = AdaptiveAvgPool2D(1)
  132. self.last_conv = Conv2D(
  133. in_channels=make_divisible(scale * self.cls_ch_squeeze),
  134. out_channels=self.cls_ch_expand,
  135. kernel_size=1,
  136. stride=1,
  137. padding=0,
  138. weight_attr=ParamAttr(),
  139. bias_attr=False)
  140. self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
  141. self.out = Linear(
  142. self.cls_ch_expand,
  143. class_dim,
  144. weight_attr=ParamAttr(),
  145. bias_attr=ParamAttr())
  146. def forward(self, inputs, label=None):
  147. x = self.conv1(inputs)
  148. for block in self.block_list:
  149. x = block(x)
  150. x = self.last_second_conv(x)
  151. x = self.pool(x)
  152. x = self.last_conv(x)
  153. x = hardswish(x)
  154. x = self.dropout(x)
  155. x = paddle.flatten(x, start_axis=1, stop_axis=-1)
  156. x = self.out(x)
  157. return x
  158. class ConvBNLayer(nn.Layer):
  159. def __init__(self,
  160. in_c,
  161. out_c,
  162. filter_size,
  163. stride,
  164. padding,
  165. num_groups=1,
  166. if_act=True,
  167. act=None,
  168. use_cudnn=True,
  169. name="",
  170. use_custom_relu=False):
  171. super(ConvBNLayer, self).__init__()
  172. self.if_act = if_act
  173. self.act = act
  174. self.conv = Conv2D(
  175. in_channels=in_c,
  176. out_channels=out_c,
  177. kernel_size=filter_size,
  178. stride=stride,
  179. padding=padding,
  180. groups=num_groups,
  181. weight_attr=ParamAttr(),
  182. bias_attr=False)
  183. self.bn = BatchNorm(
  184. num_channels=out_c,
  185. act=None,
  186. param_attr=ParamAttr(regularizer=L2Decay(0.0)),
  187. bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
  188. # moving_mean_name=name + "_bn_mean",
  189. # moving_variance_name=name + "_bn_variance")
  190. self.use_custom_relu = use_custom_relu
  191. def forward(self, x):
  192. x = self.conv(x)
  193. x = self.bn(x)
  194. if self.if_act:
  195. if self.act == "relu":
  196. if self.use_custom_relu:
  197. x = custom_ops.custom_relu(x)
  198. else:
  199. x = F.relu(x)
  200. elif self.act == "hardswish":
  201. x = hardswish(x)
  202. else:
  203. print("The activation function is selected incorrectly.")
  204. exit()
  205. return x
  206. class ResidualUnit(nn.Layer):
  207. def __init__(self,
  208. in_c,
  209. mid_c,
  210. out_c,
  211. filter_size,
  212. stride,
  213. use_se,
  214. act=None,
  215. name='',
  216. use_custom_relu=False):
  217. super(ResidualUnit, self).__init__()
  218. self.if_shortcut = stride == 1 and in_c == out_c
  219. self.if_se = use_se
  220. self.use_custom_relu = use_custom_relu
  221. self.expand_conv = ConvBNLayer(
  222. in_c=in_c,
  223. out_c=mid_c,
  224. filter_size=1,
  225. stride=1,
  226. padding=0,
  227. if_act=True,
  228. act=act,
  229. name=name + "_expand",
  230. use_custom_relu=self.use_custom_relu)
  231. self.bottleneck_conv = ConvBNLayer(
  232. in_c=mid_c,
  233. out_c=mid_c,
  234. filter_size=filter_size,
  235. stride=stride,
  236. padding=int((filter_size - 1) // 2),
  237. num_groups=mid_c,
  238. if_act=True,
  239. act=act,
  240. name=name + "_depthwise",
  241. use_custom_relu=self.use_custom_relu)
  242. if self.if_se:
  243. self.mid_se = SEModule(mid_c, name=name + "_se")
  244. self.linear_conv = ConvBNLayer(
  245. in_c=mid_c,
  246. out_c=out_c,
  247. filter_size=1,
  248. stride=1,
  249. padding=0,
  250. if_act=False,
  251. act=None,
  252. name=name + "_linear",
  253. use_custom_relu=self.use_custom_relu)
  254. def forward(self, inputs):
  255. x = self.expand_conv(inputs)
  256. x = self.bottleneck_conv(x)
  257. if self.if_se:
  258. x = self.mid_se(x)
  259. x = self.linear_conv(x)
  260. if self.if_shortcut:
  261. x = paddle.add(inputs, x)
  262. return x
  263. class SEModule(nn.Layer):
  264. def __init__(self, channel, reduction=4, name=""):
  265. super(SEModule, self).__init__()
  266. self.avg_pool = AdaptiveAvgPool2D(1)
  267. self.conv1 = Conv2D(
  268. in_channels=channel,
  269. out_channels=channel // reduction,
  270. kernel_size=1,
  271. stride=1,
  272. padding=0,
  273. weight_attr=ParamAttr(),
  274. bias_attr=ParamAttr())
  275. self.conv2 = Conv2D(
  276. in_channels=channel // reduction,
  277. out_channels=channel,
  278. kernel_size=1,
  279. stride=1,
  280. padding=0,
  281. weight_attr=ParamAttr(),
  282. bias_attr=ParamAttr())
  283. def forward(self, inputs):
  284. outputs = self.avg_pool(inputs)
  285. outputs = self.conv1(outputs)
  286. outputs = F.relu(outputs)
  287. outputs = self.conv2(outputs)
  288. outputs = hardsigmoid(outputs, slope=0.2, offset=0.5)
  289. return paddle.multiply(x=inputs, y=outputs)
  290. def MobileNetV3_small_x0_35(**args):
  291. model = MobileNetV3(model_name="small", scale=0.35, **args)
  292. return model
  293. def MobileNetV3_small_x0_5(**args):
  294. model = MobileNetV3(model_name="small", scale=0.5, **args)
  295. return model
  296. def MobileNetV3_small_x0_75(**args):
  297. model = MobileNetV3(model_name="small", scale=0.75, **args)
  298. return model
  299. def MobileNetV3_small_x1_0(**args):
  300. model = MobileNetV3(model_name="small", scale=1.0, **args)
  301. return model
  302. def MobileNetV3_small_x1_25(**args):
  303. model = MobileNetV3(model_name="small", scale=1.25, **args)
  304. return model
  305. def MobileNetV3_large_x0_35(**args):
  306. model = MobileNetV3(model_name="large", scale=0.35, **args)
  307. return model
  308. def MobileNetV3_large_x0_5(**args):
  309. model = MobileNetV3(model_name="large", scale=0.5, **args)
  310. return model
  311. def MobileNetV3_large_x0_75(**args):
  312. model = MobileNetV3(model_name="large", scale=0.75, **args)
  313. return model
  314. def MobileNetV3_large_x1_0(**args):
  315. model = MobileNetV3(model_name="large", scale=1.0, **args)
  316. return model
  317. def MobileNetV3_large_x1_25(**args):
  318. model = MobileNetV3(model_name="large", scale=1.25, **args)
  319. return
  320. class DistillMV3(nn.Layer):
  321. def __init__(self,
  322. scale=1.0,
  323. model_name="small",
  324. dropout_prob=0.2,
  325. class_dim=1000,
  326. args=None,
  327. use_custom_relu=False):
  328. super(DistillMV3, self).__init__()
  329. self.student = MobileNetV3(
  330. model_name=model_name,
  331. scale=scale,
  332. class_dim=class_dim,
  333. use_custom_relu=use_custom_relu)
  334. self.student1 = MobileNetV3(
  335. model_name=model_name,
  336. scale=scale,
  337. class_dim=class_dim,
  338. use_custom_relu=use_custom_relu)
  339. def forward(self, inputs, label=None):
  340. predicts = dict()
  341. predicts['student'] = self.student(inputs, label)
  342. predicts['student1'] = self.student1(inputs, label)
  343. return predicts
  344. def distillmv3_large_x0_5(**args):
  345. model = DistillMV3(model_name="large", scale=0.5, **args)
  346. return model
  347. class SiameseMV3(nn.Layer):
  348. def __init__(self,
  349. scale=1.0,
  350. model_name="small",
  351. dropout_prob=0.2,
  352. class_dim=1000,
  353. args=None,
  354. use_custom_relu=False):
  355. super(SiameseMV3, self).__init__()
  356. self.net = MobileNetV3(
  357. model_name=model_name,
  358. scale=scale,
  359. class_dim=class_dim,
  360. use_custom_relu=use_custom_relu)
  361. self.net1 = MobileNetV3(
  362. model_name=model_name,
  363. scale=scale,
  364. class_dim=class_dim,
  365. use_custom_relu=use_custom_relu)
  366. def forward(self, inputs, label=None):
  367. # net
  368. x = self.net.conv1(inputs)
  369. for block in self.net.block_list:
  370. x = block(x)
  371. # net1
  372. x1 = self.net1.conv1(inputs)
  373. for block in self.net1.block_list:
  374. x1 = block(x1)
  375. # add
  376. x = x + x1
  377. x = self.net.last_second_conv(x)
  378. x = self.net.pool(x)
  379. x = self.net.last_conv(x)
  380. x = hardswish(x)
  381. x = self.net.dropout(x)
  382. x = paddle.flatten(x, start_axis=1, stop_axis=-1)
  383. x = self.net.out(x)
  384. return x
  385. def siamese_mv3(class_dim, use_custom_relu):
  386. model = SiameseMV3(
  387. scale=0.5,
  388. model_name="large",
  389. class_dim=class_dim,
  390. use_custom_relu=use_custom_relu)
  391. return model
  392. def build_model(config):
  393. model_type = config['model_type']
  394. if model_type == "cls":
  395. class_dim = config['MODEL']['class_dim']
  396. use_custom_relu = config['MODEL']['use_custom_relu']
  397. if 'siamese' in config['MODEL'] and config['MODEL']['siamese'] is True:
  398. model = siamese_mv3(
  399. class_dim=class_dim, use_custom_relu=use_custom_relu)
  400. else:
  401. model = MobileNetV3_large_x0_5(
  402. class_dim=class_dim, use_custom_relu=use_custom_relu)
  403. elif model_type == "cls_distill":
  404. class_dim = config['MODEL']['class_dim']
  405. use_custom_relu = config['MODEL']['use_custom_relu']
  406. model = distillmv3_large_x0_5(
  407. class_dim=class_dim, use_custom_relu=use_custom_relu)
  408. elif model_type == "cls_distill_multiopt":
  409. class_dim = config['MODEL']['class_dim']
  410. use_custom_relu = config['MODEL']['use_custom_relu']
  411. model = distillmv3_large_x0_5(
  412. class_dim=100, use_custom_relu=use_custom_relu)
  413. else:
  414. raise ValueError("model_type should be one of ['']")
  415. return model