metric.py 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. import paddle
  2. import paddle.nn.functional as F
  3. from collections import OrderedDict
  4. def create_metric(out,
  5. label,
  6. architecture=None,
  7. topk=5,
  8. classes_num=1000,
  9. use_distillation=False,
  10. mode="train"):
  11. """
  12. Create measures of model accuracy, such as top1 and top5
  13. Args:
  14. out(variable): model output variable
  15. feeds(dict): dict of model input variables(included label)
  16. topk(int): usually top5
  17. classes_num(int): num of classes
  18. use_distillation(bool): whether to use distillation training
  19. mode(str): mode, train/valid
  20. Returns:
  21. fetchs(dict): dict of measures
  22. """
  23. # if architecture["name"] == "GoogLeNet":
  24. # assert len(out) == 3, "GoogLeNet should have 3 outputs"
  25. # out = out[0]
  26. # else:
  27. # # just need student label to get metrics
  28. # if use_distillation:
  29. # out = out[1]
  30. softmax_out = F.softmax(out)
  31. fetchs = OrderedDict()
  32. # set top1 to fetchs
  33. top1 = paddle.metric.accuracy(softmax_out, label=label, k=1)
  34. # set topk to fetchs
  35. k = min(topk, classes_num)
  36. topk = paddle.metric.accuracy(softmax_out, label=label, k=k)
  37. # multi cards' eval
  38. if mode != "train" and paddle.distributed.get_world_size() > 1:
  39. top1 = paddle.distributed.all_reduce(
  40. top1, op=paddle.distributed.ReduceOp.
  41. SUM) / paddle.distributed.get_world_size()
  42. topk = paddle.distributed.all_reduce(
  43. topk, op=paddle.distributed.ReduceOp.
  44. SUM) / paddle.distributed.get_world_size()
  45. fetchs['top1'] = top1
  46. topk_name = 'top{}'.format(k)
  47. fetchs[topk_name] = topk
  48. return fetchs