pren_fpn.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Code is refer from:
  16. https://github.com/RuijieJ/pren/blob/main/Nets/Aggregation.py
  17. """
  18. from __future__ import absolute_import
  19. from __future__ import division
  20. from __future__ import print_function
  21. import paddle
  22. from paddle import nn
  23. import paddle.nn.functional as F
  24. class PoolAggregate(nn.Layer):
  25. def __init__(self, n_r, d_in, d_middle=None, d_out=None):
  26. super(PoolAggregate, self).__init__()
  27. if not d_middle:
  28. d_middle = d_in
  29. if not d_out:
  30. d_out = d_in
  31. self.d_in = d_in
  32. self.d_middle = d_middle
  33. self.d_out = d_out
  34. self.act = nn.Swish()
  35. self.n_r = n_r
  36. self.aggs = self._build_aggs()
  37. def _build_aggs(self):
  38. aggs = []
  39. for i in range(self.n_r):
  40. aggs.append(
  41. self.add_sublayer(
  42. '{}'.format(i),
  43. nn.Sequential(
  44. ('conv1', nn.Conv2D(
  45. self.d_in, self.d_middle, 3, 2, 1, bias_attr=False)
  46. ), ('bn1', nn.BatchNorm(self.d_middle)),
  47. ('act', self.act), ('conv2', nn.Conv2D(
  48. self.d_middle, self.d_out, 3, 2, 1, bias_attr=False
  49. )), ('bn2', nn.BatchNorm(self.d_out)))))
  50. return aggs
  51. def forward(self, x):
  52. b = x.shape[0]
  53. outs = []
  54. for agg in self.aggs:
  55. y = agg(x)
  56. p = F.adaptive_avg_pool2d(y, 1)
  57. outs.append(p.reshape((b, 1, self.d_out)))
  58. out = paddle.concat(outs, 1)
  59. return out
  60. class WeightAggregate(nn.Layer):
  61. def __init__(self, n_r, d_in, d_middle=None, d_out=None):
  62. super(WeightAggregate, self).__init__()
  63. if not d_middle:
  64. d_middle = d_in
  65. if not d_out:
  66. d_out = d_in
  67. self.n_r = n_r
  68. self.d_out = d_out
  69. self.act = nn.Swish()
  70. self.conv_n = nn.Sequential(
  71. ('conv1', nn.Conv2D(
  72. d_in, d_in, 3, 1, 1,
  73. bias_attr=False)), ('bn1', nn.BatchNorm(d_in)),
  74. ('act1', self.act), ('conv2', nn.Conv2D(
  75. d_in, n_r, 1, bias_attr=False)), ('bn2', nn.BatchNorm(n_r)),
  76. ('act2', nn.Sigmoid()))
  77. self.conv_d = nn.Sequential(
  78. ('conv1', nn.Conv2D(
  79. d_in, d_middle, 3, 1, 1,
  80. bias_attr=False)), ('bn1', nn.BatchNorm(d_middle)),
  81. ('act1', self.act), ('conv2', nn.Conv2D(
  82. d_middle, d_out, 1,
  83. bias_attr=False)), ('bn2', nn.BatchNorm(d_out)))
  84. def forward(self, x):
  85. b, _, h, w = x.shape
  86. hmaps = self.conv_n(x)
  87. fmaps = self.conv_d(x)
  88. r = paddle.bmm(
  89. hmaps.reshape((b, self.n_r, h * w)),
  90. fmaps.reshape((b, self.d_out, h * w)).transpose((0, 2, 1)))
  91. return r
  92. class GCN(nn.Layer):
  93. def __init__(self, d_in, n_in, d_out=None, n_out=None, dropout=0.1):
  94. super(GCN, self).__init__()
  95. if not d_out:
  96. d_out = d_in
  97. if not n_out:
  98. n_out = d_in
  99. self.conv_n = nn.Conv1D(n_in, n_out, 1)
  100. self.linear = nn.Linear(d_in, d_out)
  101. self.dropout = nn.Dropout(dropout)
  102. self.act = nn.Swish()
  103. def forward(self, x):
  104. x = self.conv_n(x)
  105. x = self.dropout(self.linear(x))
  106. return self.act(x)
  107. class PRENFPN(nn.Layer):
  108. def __init__(self, in_channels, n_r, d_model, max_len, dropout):
  109. super(PRENFPN, self).__init__()
  110. assert len(in_channels) == 3, "in_channels' length must be 3."
  111. c1, c2, c3 = in_channels # the depths are from big to small
  112. # build fpn
  113. assert d_model % 3 == 0, "{} can't be divided by 3.".format(d_model)
  114. self.agg_p1 = PoolAggregate(n_r, c1, d_out=d_model // 3)
  115. self.agg_p2 = PoolAggregate(n_r, c2, d_out=d_model // 3)
  116. self.agg_p3 = PoolAggregate(n_r, c3, d_out=d_model // 3)
  117. self.agg_w1 = WeightAggregate(n_r, c1, 4 * c1, d_model // 3)
  118. self.agg_w2 = WeightAggregate(n_r, c2, 4 * c2, d_model // 3)
  119. self.agg_w3 = WeightAggregate(n_r, c3, 4 * c3, d_model // 3)
  120. self.gcn_pool = GCN(d_model, n_r, d_model, max_len, dropout)
  121. self.gcn_weight = GCN(d_model, n_r, d_model, max_len, dropout)
  122. self.out_channels = d_model
  123. def forward(self, inputs):
  124. f3, f5, f7 = inputs
  125. rp1 = self.agg_p1(f3)
  126. rp2 = self.agg_p2(f5)
  127. rp3 = self.agg_p3(f7)
  128. rp = paddle.concat([rp1, rp2, rp3], 2) # [b,nr,d]
  129. rw1 = self.agg_w1(f3)
  130. rw2 = self.agg_w2(f5)
  131. rw3 = self.agg_w3(f7)
  132. rw = paddle.concat([rw1, rw2, rw3], 2) # [b,nr,d]
  133. y1 = self.gcn_pool(rp)
  134. y2 = self.gcn_weight(rw)
  135. y = 0.5 * (y1 + y2)
  136. return y # [b,max_len,d]