sast_fpn.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. # copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import paddle
  18. from paddle import nn
  19. import paddle.nn.functional as F
  20. from paddle import ParamAttr
  21. class ConvBNLayer(nn.Layer):
  22. def __init__(self,
  23. in_channels,
  24. out_channels,
  25. kernel_size,
  26. stride,
  27. groups=1,
  28. if_act=True,
  29. act=None,
  30. name=None):
  31. super(ConvBNLayer, self).__init__()
  32. self.if_act = if_act
  33. self.act = act
  34. self.conv = nn.Conv2D(
  35. in_channels=in_channels,
  36. out_channels=out_channels,
  37. kernel_size=kernel_size,
  38. stride=stride,
  39. padding=(kernel_size - 1) // 2,
  40. groups=groups,
  41. weight_attr=ParamAttr(name=name + '_weights'),
  42. bias_attr=False)
  43. self.bn = nn.BatchNorm(
  44. num_channels=out_channels,
  45. act=act,
  46. param_attr=ParamAttr(name="bn_" + name + "_scale"),
  47. bias_attr=ParamAttr(name="bn_" + name + "_offset"),
  48. moving_mean_name="bn_" + name + "_mean",
  49. moving_variance_name="bn_" + name + "_variance")
  50. def forward(self, x):
  51. x = self.conv(x)
  52. x = self.bn(x)
  53. return x
  54. class DeConvBNLayer(nn.Layer):
  55. def __init__(self,
  56. in_channels,
  57. out_channels,
  58. kernel_size,
  59. stride,
  60. groups=1,
  61. if_act=True,
  62. act=None,
  63. name=None):
  64. super(DeConvBNLayer, self).__init__()
  65. self.if_act = if_act
  66. self.act = act
  67. self.deconv = nn.Conv2DTranspose(
  68. in_channels=in_channels,
  69. out_channels=out_channels,
  70. kernel_size=kernel_size,
  71. stride=stride,
  72. padding=(kernel_size - 1) // 2,
  73. groups=groups,
  74. weight_attr=ParamAttr(name=name + '_weights'),
  75. bias_attr=False)
  76. self.bn = nn.BatchNorm(
  77. num_channels=out_channels,
  78. act=act,
  79. param_attr=ParamAttr(name="bn_" + name + "_scale"),
  80. bias_attr=ParamAttr(name="bn_" + name + "_offset"),
  81. moving_mean_name="bn_" + name + "_mean",
  82. moving_variance_name="bn_" + name + "_variance")
  83. def forward(self, x):
  84. x = self.deconv(x)
  85. x = self.bn(x)
  86. return x
  87. class FPN_Up_Fusion(nn.Layer):
  88. def __init__(self, in_channels):
  89. super(FPN_Up_Fusion, self).__init__()
  90. in_channels = in_channels[::-1]
  91. out_channels = [256, 256, 192, 192, 128]
  92. self.h0_conv = ConvBNLayer(in_channels[0], out_channels[0], 1, 1, act=None, name='fpn_up_h0')
  93. self.h1_conv = ConvBNLayer(in_channels[1], out_channels[1], 1, 1, act=None, name='fpn_up_h1')
  94. self.h2_conv = ConvBNLayer(in_channels[2], out_channels[2], 1, 1, act=None, name='fpn_up_h2')
  95. self.h3_conv = ConvBNLayer(in_channels[3], out_channels[3], 1, 1, act=None, name='fpn_up_h3')
  96. self.h4_conv = ConvBNLayer(in_channels[4], out_channels[4], 1, 1, act=None, name='fpn_up_h4')
  97. self.g0_conv = DeConvBNLayer(out_channels[0], out_channels[1], 4, 2, act=None, name='fpn_up_g0')
  98. self.g1_conv = nn.Sequential(
  99. ConvBNLayer(out_channels[1], out_channels[1], 3, 1, act='relu', name='fpn_up_g1_1'),
  100. DeConvBNLayer(out_channels[1], out_channels[2], 4, 2, act=None, name='fpn_up_g1_2')
  101. )
  102. self.g2_conv = nn.Sequential(
  103. ConvBNLayer(out_channels[2], out_channels[2], 3, 1, act='relu', name='fpn_up_g2_1'),
  104. DeConvBNLayer(out_channels[2], out_channels[3], 4, 2, act=None, name='fpn_up_g2_2')
  105. )
  106. self.g3_conv = nn.Sequential(
  107. ConvBNLayer(out_channels[3], out_channels[3], 3, 1, act='relu', name='fpn_up_g3_1'),
  108. DeConvBNLayer(out_channels[3], out_channels[4], 4, 2, act=None, name='fpn_up_g3_2')
  109. )
  110. self.g4_conv = nn.Sequential(
  111. ConvBNLayer(out_channels[4], out_channels[4], 3, 1, act='relu', name='fpn_up_fusion_1'),
  112. ConvBNLayer(out_channels[4], out_channels[4], 1, 1, act=None, name='fpn_up_fusion_2')
  113. )
  114. def _add_relu(self, x1, x2):
  115. x = paddle.add(x=x1, y=x2)
  116. x = F.relu(x)
  117. return x
  118. def forward(self, x):
  119. f = x[2:][::-1]
  120. h0 = self.h0_conv(f[0])
  121. h1 = self.h1_conv(f[1])
  122. h2 = self.h2_conv(f[2])
  123. h3 = self.h3_conv(f[3])
  124. h4 = self.h4_conv(f[4])
  125. g0 = self.g0_conv(h0)
  126. g1 = self._add_relu(g0, h1)
  127. g1 = self.g1_conv(g1)
  128. g2 = self.g2_conv(self._add_relu(g1, h2))
  129. g3 = self.g3_conv(self._add_relu(g2, h3))
  130. g4 = self.g4_conv(self._add_relu(g3, h4))
  131. return g4
  132. class FPN_Down_Fusion(nn.Layer):
  133. def __init__(self, in_channels):
  134. super(FPN_Down_Fusion, self).__init__()
  135. out_channels = [32, 64, 128]
  136. self.h0_conv = ConvBNLayer(in_channels[0], out_channels[0], 3, 1, act=None, name='fpn_down_h0')
  137. self.h1_conv = ConvBNLayer(in_channels[1], out_channels[1], 3, 1, act=None, name='fpn_down_h1')
  138. self.h2_conv = ConvBNLayer(in_channels[2], out_channels[2], 3, 1, act=None, name='fpn_down_h2')
  139. self.g0_conv = ConvBNLayer(out_channels[0], out_channels[1], 3, 2, act=None, name='fpn_down_g0')
  140. self.g1_conv = nn.Sequential(
  141. ConvBNLayer(out_channels[1], out_channels[1], 3, 1, act='relu', name='fpn_down_g1_1'),
  142. ConvBNLayer(out_channels[1], out_channels[2], 3, 2, act=None, name='fpn_down_g1_2')
  143. )
  144. self.g2_conv = nn.Sequential(
  145. ConvBNLayer(out_channels[2], out_channels[2], 3, 1, act='relu', name='fpn_down_fusion_1'),
  146. ConvBNLayer(out_channels[2], out_channels[2], 1, 1, act=None, name='fpn_down_fusion_2')
  147. )
  148. def forward(self, x):
  149. f = x[:3]
  150. h0 = self.h0_conv(f[0])
  151. h1 = self.h1_conv(f[1])
  152. h2 = self.h2_conv(f[2])
  153. g0 = self.g0_conv(h0)
  154. g1 = paddle.add(x=g0, y=h1)
  155. g1 = F.relu(g1)
  156. g1 = self.g1_conv(g1)
  157. g2 = paddle.add(x=g1, y=h2)
  158. g2 = F.relu(g2)
  159. g2 = self.g2_conv(g2)
  160. return g2
  161. class Cross_Attention(nn.Layer):
  162. def __init__(self, in_channels):
  163. super(Cross_Attention, self).__init__()
  164. self.theta_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_theta')
  165. self.phi_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_phi')
  166. self.g_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_g')
  167. self.fh_weight_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fh_weight')
  168. self.fh_sc_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fh_sc')
  169. self.fv_weight_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fv_weight')
  170. self.fv_sc_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fv_sc')
  171. self.f_attn_conv = ConvBNLayer(in_channels * 2, in_channels, 1, 1, act='relu', name='f_attn')
  172. def _cal_fweight(self, f, shape):
  173. f_theta, f_phi, f_g = f
  174. #flatten
  175. f_theta = paddle.transpose(f_theta, [0, 2, 3, 1])
  176. f_theta = paddle.reshape(f_theta, [shape[0] * shape[1], shape[2], 128])
  177. f_phi = paddle.transpose(f_phi, [0, 2, 3, 1])
  178. f_phi = paddle.reshape(f_phi, [shape[0] * shape[1], shape[2], 128])
  179. f_g = paddle.transpose(f_g, [0, 2, 3, 1])
  180. f_g = paddle.reshape(f_g, [shape[0] * shape[1], shape[2], 128])
  181. #correlation
  182. f_attn = paddle.matmul(f_theta, paddle.transpose(f_phi, [0, 2, 1]))
  183. #scale
  184. f_attn = f_attn / (128**0.5)
  185. f_attn = F.softmax(f_attn)
  186. #weighted sum
  187. f_weight = paddle.matmul(f_attn, f_g)
  188. f_weight = paddle.reshape(
  189. f_weight, [shape[0], shape[1], shape[2], 128])
  190. return f_weight
  191. def forward(self, f_common):
  192. f_shape = paddle.shape(f_common)
  193. # print('f_shape: ', f_shape)
  194. f_theta = self.theta_conv(f_common)
  195. f_phi = self.phi_conv(f_common)
  196. f_g = self.g_conv(f_common)
  197. ######## horizon ########
  198. fh_weight = self._cal_fweight([f_theta, f_phi, f_g],
  199. [f_shape[0], f_shape[2], f_shape[3]])
  200. fh_weight = paddle.transpose(fh_weight, [0, 3, 1, 2])
  201. fh_weight = self.fh_weight_conv(fh_weight)
  202. #short cut
  203. fh_sc = self.fh_sc_conv(f_common)
  204. f_h = F.relu(fh_weight + fh_sc)
  205. ######## vertical ########
  206. fv_theta = paddle.transpose(f_theta, [0, 1, 3, 2])
  207. fv_phi = paddle.transpose(f_phi, [0, 1, 3, 2])
  208. fv_g = paddle.transpose(f_g, [0, 1, 3, 2])
  209. fv_weight = self._cal_fweight([fv_theta, fv_phi, fv_g],
  210. [f_shape[0], f_shape[3], f_shape[2]])
  211. fv_weight = paddle.transpose(fv_weight, [0, 3, 2, 1])
  212. fv_weight = self.fv_weight_conv(fv_weight)
  213. #short cut
  214. fv_sc = self.fv_sc_conv(f_common)
  215. f_v = F.relu(fv_weight + fv_sc)
  216. ######## merge ########
  217. f_attn = paddle.concat([f_h, f_v], axis=1)
  218. f_attn = self.f_attn_conv(f_attn)
  219. return f_attn
  220. class SASTFPN(nn.Layer):
  221. def __init__(self, in_channels, with_cab=False, **kwargs):
  222. super(SASTFPN, self).__init__()
  223. self.in_channels = in_channels
  224. self.with_cab = with_cab
  225. self.FPN_Down_Fusion = FPN_Down_Fusion(self.in_channels)
  226. self.FPN_Up_Fusion = FPN_Up_Fusion(self.in_channels)
  227. self.out_channels = 128
  228. self.cross_attention = Cross_Attention(self.out_channels)
  229. def forward(self, x):
  230. #down fpn
  231. f_down = self.FPN_Down_Fusion(x)
  232. #up fpn
  233. f_up = self.FPN_Up_Fusion(x)
  234. #fusion
  235. f_common = paddle.add(x=f_down, y=f_up)
  236. f_common = F.relu(f_common)
  237. if self.with_cab:
  238. # print('enhence f_common with CAB.')
  239. f_common = self.cross_attention(f_common)
  240. return f_common