gaspin_transformer.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import math
  18. import paddle
  19. from paddle import nn, ParamAttr
  20. from paddle.nn import functional as F
  21. import numpy as np
  22. import functools
  23. from .tps import GridGenerator
  24. '''This code is refer from:
  25. https://github.com/hikopensource/DAVAR-Lab-OCR/davarocr/davar_rcg/models/transformations/gaspin_transformation.py
  26. '''
  27. class SP_TransformerNetwork(nn.Layer):
  28. """
  29. Sturture-Preserving Transformation (SPT) as Equa. (2) in Ref. [1]
  30. Ref: [1] SPIN: Structure-Preserving Inner Offset Network for Scene Text Recognition. AAAI-2021.
  31. """
  32. def __init__(self, nc=1, default_type=5):
  33. """ Based on SPIN
  34. Args:
  35. nc (int): number of input channels (usually in 1 or 3)
  36. default_type (int): the complexity of transformation intensities (by default set to 6 as the paper)
  37. """
  38. super(SP_TransformerNetwork, self).__init__()
  39. self.power_list = self.cal_K(default_type)
  40. self.sigmoid = nn.Sigmoid()
  41. self.bn = nn.InstanceNorm2D(nc)
  42. def cal_K(self, k=5):
  43. """
  44. Args:
  45. k (int): the complexity of transformation intensities (by default set to 6 as the paper)
  46. Returns:
  47. List: the normalized intensity of each pixel in [0,1], denoted as \beta [1x(2K+1)]
  48. """
  49. from math import log
  50. x = []
  51. if k != 0:
  52. for i in range(1, k+1):
  53. lower = round(log(1-(0.5/(k+1))*i)/log((0.5/(k+1))*i), 2)
  54. upper = round(1/lower, 2)
  55. x.append(lower)
  56. x.append(upper)
  57. x.append(1.00)
  58. return x
  59. def forward(self, batch_I, weights, offsets, lambda_color=None):
  60. """
  61. Args:
  62. batch_I (Tensor): batch of input images [batch_size x nc x I_height x I_width]
  63. weights:
  64. offsets: the predicted offset by AIN, a scalar
  65. lambda_color: the learnable update gate \alpha in Equa. (5) as
  66. g(x) = (1 - \alpha) \odot x + \alpha \odot x_{offsets}
  67. Returns:
  68. Tensor: transformed images by SPN as Equa. (4) in Ref. [1]
  69. [batch_size x I_channel_num x I_r_height x I_r_width]
  70. """
  71. batch_I = (batch_I + 1) * 0.5
  72. if offsets is not None:
  73. batch_I = batch_I*(1-lambda_color) + offsets*lambda_color
  74. batch_weight_params = paddle.unsqueeze(paddle.unsqueeze(weights, -1), -1)
  75. batch_I_power = paddle.stack([batch_I.pow(p) for p in self.power_list], axis=1)
  76. batch_weight_sum = paddle.sum(batch_I_power * batch_weight_params, axis=1)
  77. batch_weight_sum = self.bn(batch_weight_sum)
  78. batch_weight_sum = self.sigmoid(batch_weight_sum)
  79. batch_weight_sum = batch_weight_sum * 2 - 1
  80. return batch_weight_sum
  81. class GA_SPIN_Transformer(nn.Layer):
  82. """
  83. Geometric-Absorbed SPIN Transformation (GA-SPIN) proposed in Ref. [1]
  84. Ref: [1] SPIN: Structure-Preserving Inner Offset Network for Scene Text Recognition. AAAI-2021.
  85. """
  86. def __init__(self, in_channels=1,
  87. I_r_size=(32, 100),
  88. offsets=False,
  89. norm_type='BN',
  90. default_type=6,
  91. loc_lr=1,
  92. stn=True):
  93. """
  94. Args:
  95. in_channels (int): channel of input features,
  96. set it to 1 if the grayscale images and 3 if RGB input
  97. I_r_size (tuple): size of rectified images (used in STN transformations)
  98. offsets (bool): set it to False if use SPN w.o. AIN,
  99. and set it to True if use SPIN (both with SPN and AIN)
  100. norm_type (str): the normalization type of the module,
  101. set it to 'BN' by default, 'IN' optionally
  102. default_type (int): the K chromatic space,
  103. set it to 3/5/6 depend on the complexity of transformation intensities
  104. loc_lr (float): learning rate of location network
  105. stn (bool): whther to use stn.
  106. """
  107. super(GA_SPIN_Transformer, self).__init__()
  108. self.nc = in_channels
  109. self.spt = True
  110. self.offsets = offsets
  111. self.stn = stn # set to True in GA-SPIN, while set it to False in SPIN
  112. self.I_r_size = I_r_size
  113. self.out_channels = in_channels
  114. if norm_type == 'BN':
  115. norm_layer = functools.partial(nn.BatchNorm2D, use_global_stats=True)
  116. elif norm_type == 'IN':
  117. norm_layer = functools.partial(nn.InstanceNorm2D, weight_attr=False,
  118. use_global_stats=False)
  119. else:
  120. raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
  121. if self.spt:
  122. self.sp_net = SP_TransformerNetwork(in_channels,
  123. default_type)
  124. self.spt_convnet = nn.Sequential(
  125. # 32*100
  126. nn.Conv2D(in_channels, 32, 3, 1, 1, bias_attr=False),
  127. norm_layer(32), nn.ReLU(),
  128. nn.MaxPool2D(kernel_size=2, stride=2),
  129. # 16*50
  130. nn.Conv2D(32, 64, 3, 1, 1, bias_attr=False),
  131. norm_layer(64), nn.ReLU(),
  132. nn.MaxPool2D(kernel_size=2, stride=2),
  133. # 8*25
  134. nn.Conv2D(64, 128, 3, 1, 1, bias_attr=False),
  135. norm_layer(128), nn.ReLU(),
  136. nn.MaxPool2D(kernel_size=2, stride=2),
  137. # 4*12
  138. )
  139. self.stucture_fc1 = nn.Sequential(
  140. nn.Conv2D(128, 256, 3, 1, 1, bias_attr=False),
  141. norm_layer(256), nn.ReLU(),
  142. nn.MaxPool2D(kernel_size=2, stride=2),
  143. nn.Conv2D(256, 256, 3, 1, 1, bias_attr=False),
  144. norm_layer(256), nn.ReLU(), # 2*6
  145. nn.MaxPool2D(kernel_size=2, stride=2),
  146. nn.Conv2D(256, 512, 3, 1, 1, bias_attr=False),
  147. norm_layer(512), nn.ReLU(), # 1*3
  148. nn.AdaptiveAvgPool2D(1),
  149. nn.Flatten(1, -1), # batch_size x 512
  150. nn.Linear(512, 256, weight_attr=nn.initializer.Normal(0.001)),
  151. nn.BatchNorm1D(256), nn.ReLU()
  152. )
  153. self.out_weight = 2*default_type+1
  154. self.spt_length = 2*default_type+1
  155. if offsets:
  156. self.out_weight += 1
  157. if self.stn:
  158. self.F = 20
  159. self.out_weight += self.F * 2
  160. self.GridGenerator = GridGenerator(self.F*2, self.F)
  161. # self.out_weight*=nc
  162. # Init structure_fc2 in LocalizationNetwork
  163. initial_bias = self.init_spin(default_type*2)
  164. initial_bias = initial_bias.reshape(-1)
  165. param_attr = ParamAttr(
  166. learning_rate=loc_lr,
  167. initializer=nn.initializer.Assign(np.zeros([256, self.out_weight])))
  168. bias_attr = ParamAttr(
  169. learning_rate=loc_lr,
  170. initializer=nn.initializer.Assign(initial_bias))
  171. self.stucture_fc2 = nn.Linear(256, self.out_weight,
  172. weight_attr=param_attr,
  173. bias_attr=bias_attr)
  174. self.sigmoid = nn.Sigmoid()
  175. if offsets:
  176. self.offset_fc1 = nn.Sequential(nn.Conv2D(128, 16,
  177. 3, 1, 1,
  178. bias_attr=False),
  179. norm_layer(16),
  180. nn.ReLU(),)
  181. self.offset_fc2 = nn.Conv2D(16, in_channels,
  182. 3, 1, 1)
  183. self.pool = nn.MaxPool2D(2, 2)
  184. def init_spin(self, nz):
  185. """
  186. Args:
  187. nz (int): number of paired \betas exponents, which means the value of K x 2
  188. """
  189. init_id = [0.00]*nz+[5.00]
  190. if self.offsets:
  191. init_id += [-5.00]
  192. # init_id *=3
  193. init = np.array(init_id)
  194. if self.stn:
  195. F = self.F
  196. ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
  197. ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2))
  198. ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2))
  199. ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
  200. ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
  201. initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
  202. initial_bias = initial_bias.reshape(-1)
  203. init = np.concatenate([init, initial_bias], axis=0)
  204. return init
  205. def forward(self, x, return_weight=False):
  206. """
  207. Args:
  208. x (Tensor): input image batch
  209. return_weight (bool): set to False by default,
  210. if set to True return the predicted offsets of AIN, denoted as x_{offsets}
  211. Returns:
  212. Tensor: rectified image [batch_size x I_channel_num x I_height x I_width], the same as the input size
  213. """
  214. if self.spt:
  215. feat = self.spt_convnet(x)
  216. fc1 = self.stucture_fc1(feat)
  217. sp_weight_fusion = self.stucture_fc2(fc1)
  218. sp_weight_fusion = sp_weight_fusion.reshape([x.shape[0], self.out_weight, 1])
  219. if self.offsets: # SPIN w. AIN
  220. lambda_color = sp_weight_fusion[:, self.spt_length, 0]
  221. lambda_color = self.sigmoid(lambda_color).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
  222. sp_weight = sp_weight_fusion[:, :self.spt_length, :]
  223. offsets = self.pool(self.offset_fc2(self.offset_fc1(feat)))
  224. assert offsets.shape[2] == 2 # 2
  225. assert offsets.shape[3] == 6 # 16
  226. offsets = self.sigmoid(offsets) # v12
  227. if return_weight:
  228. return offsets
  229. offsets = nn.functional.upsample(offsets, size=(x.shape[2], x.shape[3]), mode='bilinear')
  230. if self.stn:
  231. batch_C_prime = sp_weight_fusion[:, (self.spt_length + 1):, :].reshape([x.shape[0], self.F, 2])
  232. build_P_prime = self.GridGenerator(batch_C_prime, self.I_r_size)
  233. build_P_prime_reshape = build_P_prime.reshape([build_P_prime.shape[0],
  234. self.I_r_size[0],
  235. self.I_r_size[1],
  236. 2])
  237. else: # SPIN w.o. AIN
  238. sp_weight = sp_weight_fusion[:, :self.spt_length, :]
  239. lambda_color, offsets = None, None
  240. if self.stn:
  241. batch_C_prime = sp_weight_fusion[:, self.spt_length:, :].reshape([x.shape[0], self.F, 2])
  242. build_P_prime = self.GridGenerator(batch_C_prime, self.I_r_size)
  243. build_P_prime_reshape = build_P_prime.reshape([build_P_prime.shape[0],
  244. self.I_r_size[0],
  245. self.I_r_size[1],
  246. 2])
  247. x = self.sp_net(x, sp_weight, offsets, lambda_color)
  248. if self.stn:
  249. x = F.grid_sample(x=x, grid=build_P_prime_reshape, padding_mode='border')
  250. return x