infer_kie_token_ser.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import numpy as np
  18. import os
  19. import sys
  20. __dir__ = os.path.dirname(os.path.abspath(__file__))
  21. sys.path.append(__dir__)
  22. sys.path.insert(0, os.path.abspath(os.path.join(__dir__, '..')))
  23. os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
  24. import cv2
  25. import json
  26. import paddle
  27. from ppocr.data import create_operators, transform
  28. from ppocr.modeling.architectures import build_model
  29. from ppocr.postprocess import build_post_process
  30. from ppocr.utils.save_load import load_model
  31. from ppocr.utils.visual import draw_ser_results
  32. from ppocr.utils.utility import get_image_file_list, load_vqa_bio_label_maps
  33. import tools.program as program
  34. def to_tensor(data):
  35. import numbers
  36. from collections import defaultdict
  37. data_dict = defaultdict(list)
  38. to_tensor_idxs = []
  39. for idx, v in enumerate(data):
  40. if isinstance(v, (np.ndarray, paddle.Tensor, numbers.Number)):
  41. if idx not in to_tensor_idxs:
  42. to_tensor_idxs.append(idx)
  43. data_dict[idx].append(v)
  44. for idx in to_tensor_idxs:
  45. data_dict[idx] = paddle.to_tensor(data_dict[idx])
  46. return list(data_dict.values())
  47. class SerPredictor(object):
  48. def __init__(self, config):
  49. global_config = config['Global']
  50. self.algorithm = config['Architecture']["algorithm"]
  51. # build post process
  52. self.post_process_class = build_post_process(config['PostProcess'],
  53. global_config)
  54. # build model
  55. self.model = build_model(config['Architecture'])
  56. load_model(
  57. config, self.model, model_type=config['Architecture']["model_type"])
  58. from paddleocr import PaddleOCR
  59. self.ocr_engine = PaddleOCR(
  60. use_angle_cls=False,
  61. show_log=False,
  62. rec_model_dir=global_config.get("kie_rec_model_dir", None),
  63. det_model_dir=global_config.get("kie_det_model_dir", None),
  64. use_gpu=global_config['use_gpu'])
  65. # create data ops
  66. transforms = []
  67. for op in config['Eval']['dataset']['transforms']:
  68. op_name = list(op)[0]
  69. if 'Label' in op_name:
  70. op[op_name]['ocr_engine'] = self.ocr_engine
  71. elif op_name == 'KeepKeys':
  72. op[op_name]['keep_keys'] = [
  73. 'input_ids', 'bbox', 'attention_mask', 'token_type_ids',
  74. 'image', 'labels', 'segment_offset_id', 'ocr_info',
  75. 'entities'
  76. ]
  77. transforms.append(op)
  78. if config["Global"].get("infer_mode", None) is None:
  79. global_config['infer_mode'] = True
  80. self.ops = create_operators(config['Eval']['dataset']['transforms'],
  81. global_config)
  82. self.model.eval()
  83. def __call__(self, data):
  84. with open(data["img_path"], 'rb') as f:
  85. img = f.read()
  86. data["image"] = img
  87. batch = transform(data, self.ops)
  88. batch = to_tensor(batch)
  89. preds = self.model(batch)
  90. post_result = self.post_process_class(
  91. preds, segment_offset_ids=batch[6], ocr_infos=batch[7])
  92. return post_result, batch
  93. if __name__ == '__main__':
  94. config, device, logger, vdl_writer = program.preprocess()
  95. os.makedirs(config['Global']['save_res_path'], exist_ok=True)
  96. ser_engine = SerPredictor(config)
  97. if config["Global"].get("infer_mode", None) is False:
  98. data_dir = config['Eval']['dataset']['data_dir']
  99. with open(config['Global']['infer_img'], "rb") as f:
  100. infer_imgs = f.readlines()
  101. else:
  102. infer_imgs = get_image_file_list(config['Global']['infer_img'])
  103. with open(
  104. os.path.join(config['Global']['save_res_path'],
  105. "infer_results.txt"),
  106. "w",
  107. encoding='utf-8') as fout:
  108. for idx, info in enumerate(infer_imgs):
  109. if config["Global"].get("infer_mode", None) is False:
  110. data_line = info.decode('utf-8')
  111. substr = data_line.strip("\n").split("\t")
  112. img_path = os.path.join(data_dir, substr[0])
  113. data = {'img_path': img_path, 'label': substr[1]}
  114. else:
  115. img_path = info
  116. data = {'img_path': img_path}
  117. save_img_path = os.path.join(
  118. config['Global']['save_res_path'],
  119. os.path.splitext(os.path.basename(img_path))[0] + "_ser.jpg")
  120. result, _ = ser_engine(data)
  121. result = result[0]
  122. fout.write(img_path + "\t" + json.dumps(
  123. {
  124. "ocr_info": result,
  125. }, ensure_ascii=False) + "\n")
  126. img_res = draw_ser_results(img_path, result)
  127. cv2.imwrite(save_img_path, img_res)
  128. logger.info("process: [{}/{}], save result to {}".format(
  129. idx, len(infer_imgs), save_img_path))