det_pse_loss.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. This code is refer from:
  16. https://github.com/whai362/PSENet/blob/python3/models/head/psenet_head.py
  17. """
  18. import paddle
  19. from paddle import nn
  20. from paddle.nn import functional as F
  21. import numpy as np
  22. from ppocr.utils.iou import iou
  23. class PSELoss(nn.Layer):
  24. def __init__(self,
  25. alpha,
  26. ohem_ratio=3,
  27. kernel_sample_mask='pred',
  28. reduction='sum',
  29. eps=1e-6,
  30. **kwargs):
  31. """Implement PSE Loss.
  32. """
  33. super(PSELoss, self).__init__()
  34. assert reduction in ['sum', 'mean', 'none']
  35. self.alpha = alpha
  36. self.ohem_ratio = ohem_ratio
  37. self.kernel_sample_mask = kernel_sample_mask
  38. self.reduction = reduction
  39. self.eps = eps
  40. def forward(self, outputs, labels):
  41. predicts = outputs['maps']
  42. predicts = F.interpolate(predicts, scale_factor=4)
  43. texts = predicts[:, 0, :, :]
  44. kernels = predicts[:, 1:, :, :]
  45. gt_texts, gt_kernels, training_masks = labels[1:]
  46. # text loss
  47. selected_masks = self.ohem_batch(texts, gt_texts, training_masks)
  48. loss_text = self.dice_loss(texts, gt_texts, selected_masks)
  49. iou_text = iou((texts > 0).astype('int64'),
  50. gt_texts,
  51. training_masks,
  52. reduce=False)
  53. losses = dict(loss_text=loss_text, iou_text=iou_text)
  54. # kernel loss
  55. loss_kernels = []
  56. if self.kernel_sample_mask == 'gt':
  57. selected_masks = gt_texts * training_masks
  58. elif self.kernel_sample_mask == 'pred':
  59. selected_masks = (
  60. F.sigmoid(texts) > 0.5).astype('float32') * training_masks
  61. for i in range(kernels.shape[1]):
  62. kernel_i = kernels[:, i, :, :]
  63. gt_kernel_i = gt_kernels[:, i, :, :]
  64. loss_kernel_i = self.dice_loss(kernel_i, gt_kernel_i,
  65. selected_masks)
  66. loss_kernels.append(loss_kernel_i)
  67. loss_kernels = paddle.mean(paddle.stack(loss_kernels, axis=1), axis=1)
  68. iou_kernel = iou((kernels[:, -1, :, :] > 0).astype('int64'),
  69. gt_kernels[:, -1, :, :],
  70. training_masks * gt_texts,
  71. reduce=False)
  72. losses.update(dict(loss_kernels=loss_kernels, iou_kernel=iou_kernel))
  73. loss = self.alpha * loss_text + (1 - self.alpha) * loss_kernels
  74. losses['loss'] = loss
  75. if self.reduction == 'sum':
  76. losses = {x: paddle.sum(v) for x, v in losses.items()}
  77. elif self.reduction == 'mean':
  78. losses = {x: paddle.mean(v) for x, v in losses.items()}
  79. return losses
  80. def dice_loss(self, input, target, mask):
  81. input = F.sigmoid(input)
  82. input = input.reshape([input.shape[0], -1])
  83. target = target.reshape([target.shape[0], -1])
  84. mask = mask.reshape([mask.shape[0], -1])
  85. input = input * mask
  86. target = target * mask
  87. a = paddle.sum(input * target, 1)
  88. b = paddle.sum(input * input, 1) + self.eps
  89. c = paddle.sum(target * target, 1) + self.eps
  90. d = (2 * a) / (b + c)
  91. return 1 - d
  92. def ohem_single(self, score, gt_text, training_mask, ohem_ratio=3):
  93. pos_num = int(paddle.sum((gt_text > 0.5).astype('float32'))) - int(
  94. paddle.sum(
  95. paddle.logical_and((gt_text > 0.5), (training_mask <= 0.5))
  96. .astype('float32')))
  97. if pos_num == 0:
  98. selected_mask = training_mask
  99. selected_mask = selected_mask.reshape(
  100. [1, selected_mask.shape[0], selected_mask.shape[1]]).astype(
  101. 'float32')
  102. return selected_mask
  103. neg_num = int(paddle.sum((gt_text <= 0.5).astype('float32')))
  104. neg_num = int(min(pos_num * ohem_ratio, neg_num))
  105. if neg_num == 0:
  106. selected_mask = training_mask
  107. selected_mask = selected_mask.reshape(
  108. [1, selected_mask.shape[0], selected_mask.shape[1]]).astype(
  109. 'float32')
  110. return selected_mask
  111. neg_score = paddle.masked_select(score, gt_text <= 0.5)
  112. neg_score_sorted = paddle.sort(-neg_score)
  113. threshold = -neg_score_sorted[neg_num - 1]
  114. selected_mask = paddle.logical_and(
  115. paddle.logical_or((score >= threshold), (gt_text > 0.5)),
  116. (training_mask > 0.5))
  117. selected_mask = selected_mask.reshape(
  118. [1, selected_mask.shape[0], selected_mask.shape[1]]).astype(
  119. 'float32')
  120. return selected_mask
  121. def ohem_batch(self, scores, gt_texts, training_masks, ohem_ratio=3):
  122. selected_masks = []
  123. for i in range(scores.shape[0]):
  124. selected_masks.append(
  125. self.ohem_single(scores[i, :, :], gt_texts[i, :, :],
  126. training_masks[i, :, :], ohem_ratio))
  127. selected_masks = paddle.concat(selected_masks, 0).astype('float32')
  128. return selected_masks