stn.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. This code is refer from:
  16. https://github.com/ayumiymk/aster.pytorch/blob/master/lib/models/stn_head.py
  17. """
  18. from __future__ import absolute_import
  19. from __future__ import division
  20. from __future__ import print_function
  21. import math
  22. import paddle
  23. from paddle import nn, ParamAttr
  24. from paddle.nn import functional as F
  25. import numpy as np
  26. from .tps_spatial_transformer import TPSSpatialTransformer
  27. def conv3x3_block(in_channels, out_channels, stride=1):
  28. n = 3 * 3 * out_channels
  29. w = math.sqrt(2. / n)
  30. conv_layer = nn.Conv2D(
  31. in_channels,
  32. out_channels,
  33. kernel_size=3,
  34. stride=stride,
  35. padding=1,
  36. weight_attr=nn.initializer.Normal(
  37. mean=0.0, std=w),
  38. bias_attr=nn.initializer.Constant(0))
  39. block = nn.Sequential(conv_layer, nn.BatchNorm2D(out_channels), nn.ReLU())
  40. return block
  41. class STN(nn.Layer):
  42. def __init__(self, in_channels, num_ctrlpoints, activation='none'):
  43. super(STN, self).__init__()
  44. self.in_channels = in_channels
  45. self.num_ctrlpoints = num_ctrlpoints
  46. self.activation = activation
  47. self.stn_convnet = nn.Sequential(
  48. conv3x3_block(in_channels, 32), #32x64
  49. nn.MaxPool2D(
  50. kernel_size=2, stride=2),
  51. conv3x3_block(32, 64), #16x32
  52. nn.MaxPool2D(
  53. kernel_size=2, stride=2),
  54. conv3x3_block(64, 128), # 8*16
  55. nn.MaxPool2D(
  56. kernel_size=2, stride=2),
  57. conv3x3_block(128, 256), # 4*8
  58. nn.MaxPool2D(
  59. kernel_size=2, stride=2),
  60. conv3x3_block(256, 256), # 2*4,
  61. nn.MaxPool2D(
  62. kernel_size=2, stride=2),
  63. conv3x3_block(256, 256)) # 1*2
  64. self.stn_fc1 = nn.Sequential(
  65. nn.Linear(
  66. 2 * 256,
  67. 512,
  68. weight_attr=nn.initializer.Normal(0, 0.001),
  69. bias_attr=nn.initializer.Constant(0)),
  70. nn.BatchNorm1D(512),
  71. nn.ReLU())
  72. fc2_bias = self.init_stn()
  73. self.stn_fc2 = nn.Linear(
  74. 512,
  75. num_ctrlpoints * 2,
  76. weight_attr=nn.initializer.Constant(0.0),
  77. bias_attr=nn.initializer.Assign(fc2_bias))
  78. def init_stn(self):
  79. margin = 0.01
  80. sampling_num_per_side = int(self.num_ctrlpoints / 2)
  81. ctrl_pts_x = np.linspace(margin, 1. - margin, sampling_num_per_side)
  82. ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin
  83. ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1 - margin)
  84. ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
  85. ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
  86. ctrl_points = np.concatenate(
  87. [ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32)
  88. if self.activation == 'none':
  89. pass
  90. elif self.activation == 'sigmoid':
  91. ctrl_points = -np.log(1. / ctrl_points - 1.)
  92. ctrl_points = paddle.to_tensor(ctrl_points)
  93. fc2_bias = paddle.reshape(
  94. ctrl_points, shape=[ctrl_points.shape[0] * ctrl_points.shape[1]])
  95. return fc2_bias
  96. def forward(self, x):
  97. x = self.stn_convnet(x)
  98. batch_size, _, h, w = x.shape
  99. x = paddle.reshape(x, shape=(batch_size, -1))
  100. img_feat = self.stn_fc1(x)
  101. x = self.stn_fc2(0.1 * img_feat)
  102. if self.activation == 'sigmoid':
  103. x = F.sigmoid(x)
  104. x = paddle.reshape(x, shape=[-1, self.num_ctrlpoints, 2])
  105. return img_feat, x
  106. class STN_ON(nn.Layer):
  107. def __init__(self, in_channels, tps_inputsize, tps_outputsize,
  108. num_control_points, tps_margins, stn_activation):
  109. super(STN_ON, self).__init__()
  110. self.tps = TPSSpatialTransformer(
  111. output_image_size=tuple(tps_outputsize),
  112. num_control_points=num_control_points,
  113. margins=tuple(tps_margins))
  114. self.stn_head = STN(in_channels=in_channels,
  115. num_ctrlpoints=num_control_points,
  116. activation=stn_activation)
  117. self.tps_inputsize = tps_inputsize
  118. self.out_channels = in_channels
  119. def forward(self, image):
  120. stn_input = paddle.nn.functional.interpolate(
  121. image, self.tps_inputsize, mode="bilinear", align_corners=True)
  122. stn_img_feat, ctrl_points = self.stn_head(stn_input)
  123. x, _ = self.tps(image, ctrl_points)
  124. return x