east_fpn.py 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. # copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import paddle
  18. from paddle import nn
  19. import paddle.nn.functional as F
  20. from paddle import ParamAttr
  21. class ConvBNLayer(nn.Layer):
  22. def __init__(self,
  23. in_channels,
  24. out_channels,
  25. kernel_size,
  26. stride,
  27. padding,
  28. groups=1,
  29. if_act=True,
  30. act=None,
  31. name=None):
  32. super(ConvBNLayer, self).__init__()
  33. self.if_act = if_act
  34. self.act = act
  35. self.conv = nn.Conv2D(
  36. in_channels=in_channels,
  37. out_channels=out_channels,
  38. kernel_size=kernel_size,
  39. stride=stride,
  40. padding=padding,
  41. groups=groups,
  42. weight_attr=ParamAttr(name=name + '_weights'),
  43. bias_attr=False)
  44. self.bn = nn.BatchNorm(
  45. num_channels=out_channels,
  46. act=act,
  47. param_attr=ParamAttr(name="bn_" + name + "_scale"),
  48. bias_attr=ParamAttr(name="bn_" + name + "_offset"),
  49. moving_mean_name="bn_" + name + "_mean",
  50. moving_variance_name="bn_" + name + "_variance")
  51. def forward(self, x):
  52. x = self.conv(x)
  53. x = self.bn(x)
  54. return x
  55. class DeConvBNLayer(nn.Layer):
  56. def __init__(self,
  57. in_channels,
  58. out_channels,
  59. kernel_size,
  60. stride,
  61. padding,
  62. groups=1,
  63. if_act=True,
  64. act=None,
  65. name=None):
  66. super(DeConvBNLayer, self).__init__()
  67. self.if_act = if_act
  68. self.act = act
  69. self.deconv = nn.Conv2DTranspose(
  70. in_channels=in_channels,
  71. out_channels=out_channels,
  72. kernel_size=kernel_size,
  73. stride=stride,
  74. padding=padding,
  75. groups=groups,
  76. weight_attr=ParamAttr(name=name + '_weights'),
  77. bias_attr=False)
  78. self.bn = nn.BatchNorm(
  79. num_channels=out_channels,
  80. act=act,
  81. param_attr=ParamAttr(name="bn_" + name + "_scale"),
  82. bias_attr=ParamAttr(name="bn_" + name + "_offset"),
  83. moving_mean_name="bn_" + name + "_mean",
  84. moving_variance_name="bn_" + name + "_variance")
  85. def forward(self, x):
  86. x = self.deconv(x)
  87. x = self.bn(x)
  88. return x
  89. class EASTFPN(nn.Layer):
  90. def __init__(self, in_channels, model_name, **kwargs):
  91. super(EASTFPN, self).__init__()
  92. self.model_name = model_name
  93. if self.model_name == "large":
  94. self.out_channels = 128
  95. else:
  96. self.out_channels = 64
  97. self.in_channels = in_channels[::-1]
  98. self.h1_conv = ConvBNLayer(
  99. in_channels=self.out_channels+self.in_channels[1],
  100. out_channels=self.out_channels,
  101. kernel_size=3,
  102. stride=1,
  103. padding=1,
  104. if_act=True,
  105. act='relu',
  106. name="unet_h_1")
  107. self.h2_conv = ConvBNLayer(
  108. in_channels=self.out_channels+self.in_channels[2],
  109. out_channels=self.out_channels,
  110. kernel_size=3,
  111. stride=1,
  112. padding=1,
  113. if_act=True,
  114. act='relu',
  115. name="unet_h_2")
  116. self.h3_conv = ConvBNLayer(
  117. in_channels=self.out_channels+self.in_channels[3],
  118. out_channels=self.out_channels,
  119. kernel_size=3,
  120. stride=1,
  121. padding=1,
  122. if_act=True,
  123. act='relu',
  124. name="unet_h_3")
  125. self.g0_deconv = DeConvBNLayer(
  126. in_channels=self.in_channels[0],
  127. out_channels=self.out_channels,
  128. kernel_size=4,
  129. stride=2,
  130. padding=1,
  131. if_act=True,
  132. act='relu',
  133. name="unet_g_0")
  134. self.g1_deconv = DeConvBNLayer(
  135. in_channels=self.out_channels,
  136. out_channels=self.out_channels,
  137. kernel_size=4,
  138. stride=2,
  139. padding=1,
  140. if_act=True,
  141. act='relu',
  142. name="unet_g_1")
  143. self.g2_deconv = DeConvBNLayer(
  144. in_channels=self.out_channels,
  145. out_channels=self.out_channels,
  146. kernel_size=4,
  147. stride=2,
  148. padding=1,
  149. if_act=True,
  150. act='relu',
  151. name="unet_g_2")
  152. self.g3_conv = ConvBNLayer(
  153. in_channels=self.out_channels,
  154. out_channels=self.out_channels,
  155. kernel_size=3,
  156. stride=1,
  157. padding=1,
  158. if_act=True,
  159. act='relu',
  160. name="unet_g_3")
  161. def forward(self, x):
  162. f = x[::-1]
  163. h = f[0]
  164. g = self.g0_deconv(h)
  165. h = paddle.concat([g, f[1]], axis=1)
  166. h = self.h1_conv(h)
  167. g = self.g1_deconv(h)
  168. h = paddle.concat([g, f[2]], axis=1)
  169. h = self.h2_conv(h)
  170. g = self.g2_deconv(h)
  171. h = paddle.concat([g, f[3]], axis=1)
  172. h = self.h3_conv(h)
  173. g = self.g3_conv(h)
  174. return g