rec_aster_head.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. This code is refer from:
  16. https://github.com/ayumiymk/aster.pytorch/blob/master/lib/models/attention_recognition_head.py
  17. """
  18. from __future__ import absolute_import
  19. from __future__ import division
  20. from __future__ import print_function
  21. import sys
  22. import paddle
  23. from paddle import nn
  24. from paddle.nn import functional as F
  25. class AsterHead(nn.Layer):
  26. def __init__(self,
  27. in_channels,
  28. out_channels,
  29. sDim,
  30. attDim,
  31. max_len_labels,
  32. time_step=25,
  33. beam_width=5,
  34. **kwargs):
  35. super(AsterHead, self).__init__()
  36. self.num_classes = out_channels
  37. self.in_planes = in_channels
  38. self.sDim = sDim
  39. self.attDim = attDim
  40. self.max_len_labels = max_len_labels
  41. self.decoder = AttentionRecognitionHead(in_channels, out_channels, sDim,
  42. attDim, max_len_labels)
  43. self.time_step = time_step
  44. self.embeder = Embedding(self.time_step, in_channels)
  45. self.beam_width = beam_width
  46. self.eos = self.num_classes - 3
  47. def forward(self, x, targets=None, embed=None):
  48. return_dict = {}
  49. embedding_vectors = self.embeder(x)
  50. if self.training:
  51. rec_targets, rec_lengths, _ = targets
  52. rec_pred = self.decoder([x, rec_targets, rec_lengths],
  53. embedding_vectors)
  54. return_dict['rec_pred'] = rec_pred
  55. return_dict['embedding_vectors'] = embedding_vectors
  56. else:
  57. rec_pred, rec_pred_scores = self.decoder.beam_search(
  58. x, self.beam_width, self.eos, embedding_vectors)
  59. return_dict['rec_pred'] = rec_pred
  60. return_dict['rec_pred_scores'] = rec_pred_scores
  61. return_dict['embedding_vectors'] = embedding_vectors
  62. return return_dict
  63. class Embedding(nn.Layer):
  64. def __init__(self, in_timestep, in_planes, mid_dim=4096, embed_dim=300):
  65. super(Embedding, self).__init__()
  66. self.in_timestep = in_timestep
  67. self.in_planes = in_planes
  68. self.embed_dim = embed_dim
  69. self.mid_dim = mid_dim
  70. self.eEmbed = nn.Linear(
  71. in_timestep * in_planes,
  72. self.embed_dim) # Embed encoder output to a word-embedding like
  73. def forward(self, x):
  74. x = paddle.reshape(x, [paddle.shape(x)[0], -1])
  75. x = self.eEmbed(x)
  76. return x
  77. class AttentionRecognitionHead(nn.Layer):
  78. """
  79. input: [b x 16 x 64 x in_planes]
  80. output: probability sequence: [b x T x num_classes]
  81. """
  82. def __init__(self, in_channels, out_channels, sDim, attDim, max_len_labels):
  83. super(AttentionRecognitionHead, self).__init__()
  84. self.num_classes = out_channels # this is the output classes. So it includes the <EOS>.
  85. self.in_planes = in_channels
  86. self.sDim = sDim
  87. self.attDim = attDim
  88. self.max_len_labels = max_len_labels
  89. self.decoder = DecoderUnit(
  90. sDim=sDim, xDim=in_channels, yDim=self.num_classes, attDim=attDim)
  91. def forward(self, x, embed):
  92. x, targets, lengths = x
  93. batch_size = paddle.shape(x)[0]
  94. # Decoder
  95. state = self.decoder.get_initial_state(embed)
  96. outputs = []
  97. for i in range(max(lengths)):
  98. if i == 0:
  99. y_prev = paddle.full(
  100. shape=[batch_size], fill_value=self.num_classes)
  101. else:
  102. y_prev = targets[:, i - 1]
  103. output, state = self.decoder(x, state, y_prev)
  104. outputs.append(output)
  105. outputs = paddle.concat([_.unsqueeze(1) for _ in outputs], 1)
  106. return outputs
  107. # inference stage.
  108. def sample(self, x):
  109. x, _, _ = x
  110. batch_size = x.size(0)
  111. # Decoder
  112. state = paddle.zeros([1, batch_size, self.sDim])
  113. predicted_ids, predicted_scores = [], []
  114. for i in range(self.max_len_labels):
  115. if i == 0:
  116. y_prev = paddle.full(
  117. shape=[batch_size], fill_value=self.num_classes)
  118. else:
  119. y_prev = predicted
  120. output, state = self.decoder(x, state, y_prev)
  121. output = F.softmax(output, axis=1)
  122. score, predicted = output.max(1)
  123. predicted_ids.append(predicted.unsqueeze(1))
  124. predicted_scores.append(score.unsqueeze(1))
  125. predicted_ids = paddle.concat([predicted_ids, 1])
  126. predicted_scores = paddle.concat([predicted_scores, 1])
  127. # return predicted_ids.squeeze(), predicted_scores.squeeze()
  128. return predicted_ids, predicted_scores
  129. def beam_search(self, x, beam_width, eos, embed):
  130. def _inflate(tensor, times, dim):
  131. repeat_dims = [1] * tensor.dim()
  132. repeat_dims[dim] = times
  133. output = paddle.tile(tensor, repeat_dims)
  134. return output
  135. # https://github.com/IBM/pytorch-seq2seq/blob/fede87655ddce6c94b38886089e05321dc9802af/seq2seq/models/TopKDecoder.py
  136. batch_size, l, d = x.shape
  137. x = paddle.tile(
  138. paddle.transpose(
  139. x.unsqueeze(1), perm=[1, 0, 2, 3]), [beam_width, 1, 1, 1])
  140. inflated_encoder_feats = paddle.reshape(
  141. paddle.transpose(
  142. x, perm=[1, 0, 2, 3]), [-1, l, d])
  143. # Initialize the decoder
  144. state = self.decoder.get_initial_state(embed, tile_times=beam_width)
  145. pos_index = paddle.reshape(
  146. paddle.arange(batch_size) * beam_width, shape=[-1, 1])
  147. # Initialize the scores
  148. sequence_scores = paddle.full(
  149. shape=[batch_size * beam_width, 1], fill_value=-float('Inf'))
  150. index = [i * beam_width for i in range(0, batch_size)]
  151. sequence_scores[index] = 0.0
  152. # Initialize the input vector
  153. y_prev = paddle.full(
  154. shape=[batch_size * beam_width], fill_value=self.num_classes)
  155. # Store decisions for backtracking
  156. stored_scores = list()
  157. stored_predecessors = list()
  158. stored_emitted_symbols = list()
  159. for i in range(self.max_len_labels):
  160. output, state = self.decoder(inflated_encoder_feats, state, y_prev)
  161. state = paddle.unsqueeze(state, axis=0)
  162. log_softmax_output = paddle.nn.functional.log_softmax(
  163. output, axis=1)
  164. sequence_scores = _inflate(sequence_scores, self.num_classes, 1)
  165. sequence_scores += log_softmax_output
  166. scores, candidates = paddle.topk(
  167. paddle.reshape(sequence_scores, [batch_size, -1]),
  168. beam_width,
  169. axis=1)
  170. # Reshape input = (bk, 1) and sequence_scores = (bk, 1)
  171. y_prev = paddle.reshape(
  172. candidates % self.num_classes, shape=[batch_size * beam_width])
  173. sequence_scores = paddle.reshape(
  174. scores, shape=[batch_size * beam_width, 1])
  175. # Update fields for next timestep
  176. pos_index = paddle.expand_as(pos_index, candidates)
  177. predecessors = paddle.cast(
  178. candidates / self.num_classes + pos_index, dtype='int64')
  179. predecessors = paddle.reshape(
  180. predecessors, shape=[batch_size * beam_width, 1])
  181. state = paddle.index_select(
  182. state, index=predecessors.squeeze(), axis=1)
  183. # Update sequence socres and erase scores for <eos> symbol so that they aren't expanded
  184. stored_scores.append(sequence_scores.clone())
  185. y_prev = paddle.reshape(y_prev, shape=[-1, 1])
  186. eos_prev = paddle.full_like(y_prev, fill_value=eos)
  187. mask = eos_prev == y_prev
  188. mask = paddle.nonzero(mask)
  189. if mask.dim() > 0:
  190. sequence_scores = sequence_scores.numpy()
  191. mask = mask.numpy()
  192. sequence_scores[mask] = -float('inf')
  193. sequence_scores = paddle.to_tensor(sequence_scores)
  194. # Cache results for backtracking
  195. stored_predecessors.append(predecessors)
  196. y_prev = paddle.squeeze(y_prev)
  197. stored_emitted_symbols.append(y_prev)
  198. # Do backtracking to return the optimal values
  199. #====== backtrak ======#
  200. # Initialize return variables given different types
  201. p = list()
  202. l = [[self.max_len_labels] * beam_width for _ in range(batch_size)
  203. ] # Placeholder for lengths of top-k sequences
  204. # the last step output of the beams are not sorted
  205. # thus they are sorted here
  206. sorted_score, sorted_idx = paddle.topk(
  207. paddle.reshape(
  208. stored_scores[-1], shape=[batch_size, beam_width]),
  209. beam_width)
  210. # initialize the sequence scores with the sorted last step beam scores
  211. s = sorted_score.clone()
  212. batch_eos_found = [0] * batch_size # the number of EOS found
  213. # in the backward loop below for each batch
  214. t = self.max_len_labels - 1
  215. # initialize the back pointer with the sorted order of the last step beams.
  216. # add pos_index for indexing variable with b*k as the first dimension.
  217. t_predecessors = paddle.reshape(
  218. sorted_idx + pos_index.expand_as(sorted_idx),
  219. shape=[batch_size * beam_width])
  220. while t >= 0:
  221. # Re-order the variables with the back pointer
  222. current_symbol = paddle.index_select(
  223. stored_emitted_symbols[t], index=t_predecessors, axis=0)
  224. t_predecessors = paddle.index_select(
  225. stored_predecessors[t].squeeze(), index=t_predecessors, axis=0)
  226. eos_indices = stored_emitted_symbols[t] == eos
  227. eos_indices = paddle.nonzero(eos_indices)
  228. if eos_indices.dim() > 0:
  229. for i in range(eos_indices.shape[0] - 1, -1, -1):
  230. # Indices of the EOS symbol for both variables
  231. # with b*k as the first dimension, and b, k for
  232. # the first two dimensions
  233. idx = eos_indices[i]
  234. b_idx = int(idx[0] / beam_width)
  235. # The indices of the replacing position
  236. # according to the replacement strategy noted above
  237. res_k_idx = beam_width - (batch_eos_found[b_idx] %
  238. beam_width) - 1
  239. batch_eos_found[b_idx] += 1
  240. res_idx = b_idx * beam_width + res_k_idx
  241. # Replace the old information in return variables
  242. # with the new ended sequence information
  243. t_predecessors[res_idx] = stored_predecessors[t][idx[0]]
  244. current_symbol[res_idx] = stored_emitted_symbols[t][idx[0]]
  245. s[b_idx, res_k_idx] = stored_scores[t][idx[0], 0]
  246. l[b_idx][res_k_idx] = t + 1
  247. # record the back tracked results
  248. p.append(current_symbol)
  249. t -= 1
  250. # Sort and re-order again as the added ended sequences may change
  251. # the order (very unlikely)
  252. s, re_sorted_idx = s.topk(beam_width)
  253. for b_idx in range(batch_size):
  254. l[b_idx] = [
  255. l[b_idx][k_idx.item()] for k_idx in re_sorted_idx[b_idx, :]
  256. ]
  257. re_sorted_idx = paddle.reshape(
  258. re_sorted_idx + pos_index.expand_as(re_sorted_idx),
  259. [batch_size * beam_width])
  260. # Reverse the sequences and re-order at the same time
  261. # It is reversed because the backtracking happens in reverse time order
  262. p = [
  263. paddle.reshape(
  264. paddle.index_select(step, re_sorted_idx, 0),
  265. shape=[batch_size, beam_width, -1]) for step in reversed(p)
  266. ]
  267. p = paddle.concat(p, -1)[:, 0, :]
  268. return p, paddle.ones_like(p)
  269. class AttentionUnit(nn.Layer):
  270. def __init__(self, sDim, xDim, attDim):
  271. super(AttentionUnit, self).__init__()
  272. self.sDim = sDim
  273. self.xDim = xDim
  274. self.attDim = attDim
  275. self.sEmbed = nn.Linear(sDim, attDim)
  276. self.xEmbed = nn.Linear(xDim, attDim)
  277. self.wEmbed = nn.Linear(attDim, 1)
  278. def forward(self, x, sPrev):
  279. batch_size, T, _ = x.shape # [b x T x xDim]
  280. x = paddle.reshape(x, [-1, self.xDim]) # [(b x T) x xDim]
  281. xProj = self.xEmbed(x) # [(b x T) x attDim]
  282. xProj = paddle.reshape(xProj, [batch_size, T, -1]) # [b x T x attDim]
  283. sPrev = sPrev.squeeze(0)
  284. sProj = self.sEmbed(sPrev) # [b x attDim]
  285. sProj = paddle.unsqueeze(sProj, 1) # [b x 1 x attDim]
  286. sProj = paddle.expand(sProj,
  287. [batch_size, T, self.attDim]) # [b x T x attDim]
  288. sumTanh = paddle.tanh(sProj + xProj)
  289. sumTanh = paddle.reshape(sumTanh, [-1, self.attDim])
  290. vProj = self.wEmbed(sumTanh) # [(b x T) x 1]
  291. vProj = paddle.reshape(vProj, [batch_size, T])
  292. alpha = F.softmax(
  293. vProj, axis=1) # attention weights for each sample in the minibatch
  294. return alpha
  295. class DecoderUnit(nn.Layer):
  296. def __init__(self, sDim, xDim, yDim, attDim):
  297. super(DecoderUnit, self).__init__()
  298. self.sDim = sDim
  299. self.xDim = xDim
  300. self.yDim = yDim
  301. self.attDim = attDim
  302. self.emdDim = attDim
  303. self.attention_unit = AttentionUnit(sDim, xDim, attDim)
  304. self.tgt_embedding = nn.Embedding(
  305. yDim + 1, self.emdDim, weight_attr=nn.initializer.Normal(
  306. std=0.01)) # the last is used for <BOS>
  307. self.gru = nn.GRUCell(input_size=xDim + self.emdDim, hidden_size=sDim)
  308. self.fc = nn.Linear(
  309. sDim,
  310. yDim,
  311. weight_attr=nn.initializer.Normal(std=0.01),
  312. bias_attr=nn.initializer.Constant(value=0))
  313. self.embed_fc = nn.Linear(300, self.sDim)
  314. def get_initial_state(self, embed, tile_times=1):
  315. assert embed.shape[1] == 300
  316. state = self.embed_fc(embed) # N * sDim
  317. if tile_times != 1:
  318. state = state.unsqueeze(1)
  319. trans_state = paddle.transpose(state, perm=[1, 0, 2])
  320. state = paddle.tile(trans_state, repeat_times=[tile_times, 1, 1])
  321. trans_state = paddle.transpose(state, perm=[1, 0, 2])
  322. state = paddle.reshape(trans_state, shape=[-1, self.sDim])
  323. state = state.unsqueeze(0) # 1 * N * sDim
  324. return state
  325. def forward(self, x, sPrev, yPrev):
  326. # x: feature sequence from the image decoder.
  327. batch_size, T, _ = x.shape
  328. alpha = self.attention_unit(x, sPrev)
  329. context = paddle.squeeze(paddle.matmul(alpha.unsqueeze(1), x), axis=1)
  330. yPrev = paddle.cast(yPrev, dtype="int64")
  331. yProj = self.tgt_embedding(yPrev)
  332. concat_context = paddle.concat([yProj, context], 1)
  333. concat_context = paddle.squeeze(concat_context, 1)
  334. sPrev = paddle.squeeze(sPrev, 0)
  335. output, state = self.gru(concat_context, sPrev)
  336. output = paddle.squeeze(output, axis=1)
  337. output = self.fc(output)
  338. return output, state