gcn.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. This code is refer from:
  16. https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textdet/modules/gcn.py
  17. """
  18. from __future__ import absolute_import
  19. from __future__ import division
  20. from __future__ import print_function
  21. import paddle
  22. import paddle.nn as nn
  23. import paddle.nn.functional as F
  24. class BatchNorm1D(nn.BatchNorm1D):
  25. def __init__(self,
  26. num_features,
  27. eps=1e-05,
  28. momentum=0.1,
  29. affine=True,
  30. track_running_stats=True):
  31. momentum = 1 - momentum
  32. weight_attr = None
  33. bias_attr = None
  34. if not affine:
  35. weight_attr = paddle.ParamAttr(learning_rate=0.0)
  36. bias_attr = paddle.ParamAttr(learning_rate=0.0)
  37. super().__init__(
  38. num_features,
  39. momentum=momentum,
  40. epsilon=eps,
  41. weight_attr=weight_attr,
  42. bias_attr=bias_attr,
  43. use_global_stats=track_running_stats)
  44. class MeanAggregator(nn.Layer):
  45. def forward(self, features, A):
  46. x = paddle.bmm(A, features)
  47. return x
  48. class GraphConv(nn.Layer):
  49. def __init__(self, in_dim, out_dim):
  50. super().__init__()
  51. self.in_dim = in_dim
  52. self.out_dim = out_dim
  53. self.weight = self.create_parameter(
  54. [in_dim * 2, out_dim],
  55. default_initializer=nn.initializer.XavierUniform())
  56. self.bias = self.create_parameter(
  57. [out_dim],
  58. is_bias=True,
  59. default_initializer=nn.initializer.Assign([0] * out_dim))
  60. self.aggregator = MeanAggregator()
  61. def forward(self, features, A):
  62. b, n, d = features.shape
  63. assert d == self.in_dim
  64. agg_feats = self.aggregator(features, A)
  65. cat_feats = paddle.concat([features, agg_feats], axis=2)
  66. out = paddle.einsum('bnd,df->bnf', cat_feats, self.weight)
  67. out = F.relu(out + self.bias)
  68. return out
  69. class GCN(nn.Layer):
  70. def __init__(self, feat_len):
  71. super(GCN, self).__init__()
  72. self.bn0 = BatchNorm1D(feat_len, affine=False)
  73. self.conv1 = GraphConv(feat_len, 512)
  74. self.conv2 = GraphConv(512, 256)
  75. self.conv3 = GraphConv(256, 128)
  76. self.conv4 = GraphConv(128, 64)
  77. self.classifier = nn.Sequential(
  78. nn.Linear(64, 32), nn.PReLU(32), nn.Linear(32, 2))
  79. def forward(self, x, A, knn_inds):
  80. num_local_graphs, num_max_nodes, feat_len = x.shape
  81. x = x.reshape([-1, feat_len])
  82. x = self.bn0(x)
  83. x = x.reshape([num_local_graphs, num_max_nodes, feat_len])
  84. x = self.conv1(x, A)
  85. x = self.conv2(x, A)
  86. x = self.conv3(x, A)
  87. x = self.conv4(x, A)
  88. k = knn_inds.shape[-1]
  89. mid_feat_len = x.shape[-1]
  90. edge_feat = paddle.zeros([num_local_graphs, k, mid_feat_len])
  91. for graph_ind in range(num_local_graphs):
  92. edge_feat[graph_ind, :, :] = x[graph_ind][paddle.to_tensor(knn_inds[
  93. graph_ind])]
  94. edge_feat = edge_feat.reshape([-1, mid_feat_len])
  95. pred = self.classifier(edge_feat)
  96. return pred