rec_att_loss.py 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839
  1. # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import paddle
  18. from paddle import nn
  19. class AttentionLoss(nn.Layer):
  20. def __init__(self, **kwargs):
  21. super(AttentionLoss, self).__init__()
  22. self.loss_func = nn.CrossEntropyLoss(weight=None, reduction='none')
  23. def forward(self, predicts, batch):
  24. targets = batch[1].astype("int64")
  25. label_lengths = batch[2].astype('int64')
  26. batch_size, num_steps, num_classes = predicts.shape[0], predicts.shape[
  27. 1], predicts.shape[2]
  28. assert len(targets.shape) == len(list(predicts.shape)) - 1, \
  29. "The target's shape and inputs's shape is [N, d] and [N, num_steps]"
  30. inputs = paddle.reshape(predicts, [-1, predicts.shape[-1]])
  31. targets = paddle.reshape(targets, [-1])
  32. return {'loss': paddle.sum(self.loss_func(inputs, targets))}