rec_can_head.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. # copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. This code is refer from:
  16. https://github.com/LBH1024/CAN/models/can.py
  17. https://github.com/LBH1024/CAN/models/counting.py
  18. https://github.com/LBH1024/CAN/models/decoder.py
  19. https://github.com/LBH1024/CAN/models/attention.py
  20. """
  21. from __future__ import absolute_import
  22. from __future__ import division
  23. from __future__ import print_function
  24. import paddle.nn as nn
  25. import paddle
  26. import math
  27. '''
  28. Counting Module
  29. '''
  30. class ChannelAtt(nn.Layer):
  31. def __init__(self, channel, reduction):
  32. super(ChannelAtt, self).__init__()
  33. self.avg_pool = nn.AdaptiveAvgPool2D(1)
  34. self.fc = nn.Sequential(
  35. nn.Linear(channel, channel // reduction),
  36. nn.ReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid())
  37. def forward(self, x):
  38. b, c, _, _ = x.shape
  39. y = paddle.reshape(self.avg_pool(x), [b, c])
  40. y = paddle.reshape(self.fc(y), [b, c, 1, 1])
  41. return x * y
  42. class CountingDecoder(nn.Layer):
  43. def __init__(self, in_channel, out_channel, kernel_size):
  44. super(CountingDecoder, self).__init__()
  45. self.in_channel = in_channel
  46. self.out_channel = out_channel
  47. self.trans_layer = nn.Sequential(
  48. nn.Conv2D(
  49. self.in_channel,
  50. 512,
  51. kernel_size=kernel_size,
  52. padding=kernel_size // 2,
  53. bias_attr=False),
  54. nn.BatchNorm2D(512))
  55. self.channel_att = ChannelAtt(512, 16)
  56. self.pred_layer = nn.Sequential(
  57. nn.Conv2D(
  58. 512, self.out_channel, kernel_size=1, bias_attr=False),
  59. nn.Sigmoid())
  60. def forward(self, x, mask):
  61. b, _, h, w = x.shape
  62. x = self.trans_layer(x)
  63. x = self.channel_att(x)
  64. x = self.pred_layer(x)
  65. if mask is not None:
  66. x = x * mask
  67. x = paddle.reshape(x, [b, self.out_channel, -1])
  68. x1 = paddle.sum(x, axis=-1)
  69. return x1, paddle.reshape(x, [b, self.out_channel, h, w])
  70. '''
  71. Attention Decoder
  72. '''
  73. class PositionEmbeddingSine(nn.Layer):
  74. def __init__(self,
  75. num_pos_feats=64,
  76. temperature=10000,
  77. normalize=False,
  78. scale=None):
  79. super().__init__()
  80. self.num_pos_feats = num_pos_feats
  81. self.temperature = temperature
  82. self.normalize = normalize
  83. if scale is not None and normalize is False:
  84. raise ValueError("normalize should be True if scale is passed")
  85. if scale is None:
  86. scale = 2 * math.pi
  87. self.scale = scale
  88. def forward(self, x, mask):
  89. y_embed = paddle.cumsum(mask, 1, dtype='float32')
  90. x_embed = paddle.cumsum(mask, 2, dtype='float32')
  91. if self.normalize:
  92. eps = 1e-6
  93. y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
  94. x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
  95. dim_t = paddle.arange(self.num_pos_feats, dtype='float32')
  96. dim_d = paddle.expand(paddle.to_tensor(2), dim_t.shape)
  97. dim_t = self.temperature**(2 * (dim_t / dim_d).astype('int64') /
  98. self.num_pos_feats)
  99. pos_x = paddle.unsqueeze(x_embed, [3]) / dim_t
  100. pos_y = paddle.unsqueeze(y_embed, [3]) / dim_t
  101. pos_x = paddle.flatten(
  102. paddle.stack(
  103. [
  104. paddle.sin(pos_x[:, :, :, 0::2]),
  105. paddle.cos(pos_x[:, :, :, 1::2])
  106. ],
  107. axis=4),
  108. 3)
  109. pos_y = paddle.flatten(
  110. paddle.stack(
  111. [
  112. paddle.sin(pos_y[:, :, :, 0::2]),
  113. paddle.cos(pos_y[:, :, :, 1::2])
  114. ],
  115. axis=4),
  116. 3)
  117. pos = paddle.transpose(
  118. paddle.concat(
  119. [pos_y, pos_x], axis=3), [0, 3, 1, 2])
  120. return pos
  121. class AttDecoder(nn.Layer):
  122. def __init__(self, ratio, is_train, input_size, hidden_size,
  123. encoder_out_channel, dropout, dropout_ratio, word_num,
  124. counting_decoder_out_channel, attention):
  125. super(AttDecoder, self).__init__()
  126. self.input_size = input_size
  127. self.hidden_size = hidden_size
  128. self.out_channel = encoder_out_channel
  129. self.attention_dim = attention['attention_dim']
  130. self.dropout_prob = dropout
  131. self.ratio = ratio
  132. self.word_num = word_num
  133. self.counting_num = counting_decoder_out_channel
  134. self.is_train = is_train
  135. self.init_weight = nn.Linear(self.out_channel, self.hidden_size)
  136. self.embedding = nn.Embedding(self.word_num, self.input_size)
  137. self.word_input_gru = nn.GRUCell(self.input_size, self.hidden_size)
  138. self.word_attention = Attention(hidden_size, attention['attention_dim'])
  139. self.encoder_feature_conv = nn.Conv2D(
  140. self.out_channel,
  141. self.attention_dim,
  142. kernel_size=attention['word_conv_kernel'],
  143. padding=attention['word_conv_kernel'] // 2)
  144. self.word_state_weight = nn.Linear(self.hidden_size, self.hidden_size)
  145. self.word_embedding_weight = nn.Linear(self.input_size,
  146. self.hidden_size)
  147. self.word_context_weight = nn.Linear(self.out_channel, self.hidden_size)
  148. self.counting_context_weight = nn.Linear(self.counting_num,
  149. self.hidden_size)
  150. self.word_convert = nn.Linear(self.hidden_size, self.word_num)
  151. if dropout:
  152. self.dropout = nn.Dropout(dropout_ratio)
  153. def forward(self, cnn_features, labels, counting_preds, images_mask):
  154. if self.is_train:
  155. _, num_steps = labels.shape
  156. else:
  157. num_steps = 36
  158. batch_size, _, height, width = cnn_features.shape
  159. images_mask = images_mask[:, :, ::self.ratio, ::self.ratio]
  160. word_probs = paddle.zeros((batch_size, num_steps, self.word_num))
  161. word_alpha_sum = paddle.zeros((batch_size, 1, height, width))
  162. hidden = self.init_hidden(cnn_features, images_mask)
  163. counting_context_weighted = self.counting_context_weight(counting_preds)
  164. cnn_features_trans = self.encoder_feature_conv(cnn_features)
  165. position_embedding = PositionEmbeddingSine(256, normalize=True)
  166. pos = position_embedding(cnn_features_trans, images_mask[:, 0, :, :])
  167. cnn_features_trans = cnn_features_trans + pos
  168. word = paddle.ones([batch_size, 1], dtype='int64') # init word as sos
  169. word = word.squeeze(axis=1)
  170. for i in range(num_steps):
  171. word_embedding = self.embedding(word)
  172. _, hidden = self.word_input_gru(word_embedding, hidden)
  173. word_context_vec, _, word_alpha_sum = self.word_attention(
  174. cnn_features, cnn_features_trans, hidden, word_alpha_sum,
  175. images_mask)
  176. current_state = self.word_state_weight(hidden)
  177. word_weighted_embedding = self.word_embedding_weight(word_embedding)
  178. word_context_weighted = self.word_context_weight(word_context_vec)
  179. if self.dropout_prob:
  180. word_out_state = self.dropout(
  181. current_state + word_weighted_embedding +
  182. word_context_weighted + counting_context_weighted)
  183. else:
  184. word_out_state = current_state + word_weighted_embedding + word_context_weighted + counting_context_weighted
  185. word_prob = self.word_convert(word_out_state)
  186. word_probs[:, i] = word_prob
  187. if self.is_train:
  188. word = labels[:, i]
  189. else:
  190. word = word_prob.argmax(1)
  191. word = paddle.multiply(
  192. word, labels[:, i]
  193. ) # labels are oneslike tensor in infer/predict mode
  194. return word_probs
  195. def init_hidden(self, features, feature_mask):
  196. average = paddle.sum(paddle.sum(features * feature_mask, axis=-1),
  197. axis=-1) / paddle.sum(
  198. (paddle.sum(feature_mask, axis=-1)), axis=-1)
  199. average = self.init_weight(average)
  200. return paddle.tanh(average)
  201. '''
  202. Attention Module
  203. '''
  204. class Attention(nn.Layer):
  205. def __init__(self, hidden_size, attention_dim):
  206. super(Attention, self).__init__()
  207. self.hidden = hidden_size
  208. self.attention_dim = attention_dim
  209. self.hidden_weight = nn.Linear(self.hidden, self.attention_dim)
  210. self.attention_conv = nn.Conv2D(
  211. 1, 512, kernel_size=11, padding=5, bias_attr=False)
  212. self.attention_weight = nn.Linear(
  213. 512, self.attention_dim, bias_attr=False)
  214. self.alpha_convert = nn.Linear(self.attention_dim, 1)
  215. def forward(self,
  216. cnn_features,
  217. cnn_features_trans,
  218. hidden,
  219. alpha_sum,
  220. image_mask=None):
  221. query = self.hidden_weight(hidden)
  222. alpha_sum_trans = self.attention_conv(alpha_sum)
  223. coverage_alpha = self.attention_weight(
  224. paddle.transpose(alpha_sum_trans, [0, 2, 3, 1]))
  225. alpha_score = paddle.tanh(
  226. paddle.unsqueeze(query, [1, 2]) + coverage_alpha + paddle.transpose(
  227. cnn_features_trans, [0, 2, 3, 1]))
  228. energy = self.alpha_convert(alpha_score)
  229. energy = energy - energy.max()
  230. energy_exp = paddle.exp(paddle.squeeze(energy, -1))
  231. if image_mask is not None:
  232. energy_exp = energy_exp * paddle.squeeze(image_mask, 1)
  233. alpha = energy_exp / (paddle.unsqueeze(
  234. paddle.sum(paddle.sum(energy_exp, -1), -1), [1, 2]) + 1e-10)
  235. alpha_sum = paddle.unsqueeze(alpha, 1) + alpha_sum
  236. context_vector = paddle.sum(
  237. paddle.sum((paddle.unsqueeze(alpha, 1) * cnn_features), -1), -1)
  238. return context_vector, alpha, alpha_sum
  239. class CANHead(nn.Layer):
  240. def __init__(self, in_channel, out_channel, ratio, attdecoder, **kwargs):
  241. super(CANHead, self).__init__()
  242. self.in_channel = in_channel
  243. self.out_channel = out_channel
  244. self.counting_decoder1 = CountingDecoder(self.in_channel,
  245. self.out_channel, 3) # mscm
  246. self.counting_decoder2 = CountingDecoder(self.in_channel,
  247. self.out_channel, 5)
  248. self.decoder = AttDecoder(ratio, **attdecoder)
  249. self.ratio = ratio
  250. def forward(self, inputs, targets=None):
  251. cnn_features, images_mask, labels = inputs
  252. counting_mask = images_mask[:, :, ::self.ratio, ::self.ratio]
  253. counting_preds1, _ = self.counting_decoder1(cnn_features, counting_mask)
  254. counting_preds2, _ = self.counting_decoder2(cnn_features, counting_mask)
  255. counting_preds = (counting_preds1 + counting_preds2) / 2
  256. word_probs = self.decoder(cnn_features, labels, counting_preds,
  257. images_mask)
  258. return word_probs, counting_preds, counting_preds1, counting_preds2