predictors.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import cv2
  16. import math
  17. import paddle
  18. from arch import style_text_rec
  19. from utils.sys_funcs import check_gpu
  20. from utils.logging import get_logger
  21. class StyleTextRecPredictor(object):
  22. def __init__(self, config):
  23. algorithm = config['Predictor']['algorithm']
  24. assert algorithm in ["StyleTextRec"
  25. ], "Generator {} not supported.".format(algorithm)
  26. use_gpu = config["Global"]['use_gpu']
  27. check_gpu(use_gpu)
  28. paddle.set_device('gpu' if use_gpu else 'cpu')
  29. self.logger = get_logger()
  30. self.generator = getattr(style_text_rec, algorithm)(config)
  31. self.height = config["Global"]["image_height"]
  32. self.width = config["Global"]["image_width"]
  33. self.scale = config["Predictor"]["scale"]
  34. self.mean = config["Predictor"]["mean"]
  35. self.std = config["Predictor"]["std"]
  36. self.expand_result = config["Predictor"]["expand_result"]
  37. def reshape_to_same_height(self, img_list):
  38. h = img_list[0].shape[0]
  39. for idx in range(1, len(img_list)):
  40. new_w = round(1.0 * img_list[idx].shape[1] /
  41. img_list[idx].shape[0] * h)
  42. img_list[idx] = cv2.resize(img_list[idx], (new_w, h))
  43. return img_list
  44. def predict_single_image(self, style_input, text_input):
  45. style_input = self.rep_style_input(style_input, text_input)
  46. tensor_style_input = self.preprocess(style_input)
  47. tensor_text_input = self.preprocess(text_input)
  48. style_text_result = self.generator.forward(tensor_style_input,
  49. tensor_text_input)
  50. fake_fusion = self.postprocess(style_text_result["fake_fusion"])
  51. fake_text = self.postprocess(style_text_result["fake_text"])
  52. fake_sk = self.postprocess(style_text_result["fake_sk"])
  53. fake_bg = self.postprocess(style_text_result["fake_bg"])
  54. bbox = self.get_text_boundary(fake_text)
  55. if bbox:
  56. left, right, top, bottom = bbox
  57. fake_fusion = fake_fusion[top:bottom, left:right, :]
  58. fake_text = fake_text[top:bottom, left:right, :]
  59. fake_sk = fake_sk[top:bottom, left:right, :]
  60. fake_bg = fake_bg[top:bottom, left:right, :]
  61. # fake_fusion = self.crop_by_text(img_fake_fusion, img_fake_text)
  62. return {
  63. "fake_fusion": fake_fusion,
  64. "fake_text": fake_text,
  65. "fake_sk": fake_sk,
  66. "fake_bg": fake_bg,
  67. }
  68. def predict(self, style_input, text_input_list):
  69. if not isinstance(text_input_list, (tuple, list)):
  70. return self.predict_single_image(style_input, text_input_list)
  71. synth_result_list = []
  72. for text_input in text_input_list:
  73. synth_result = self.predict_single_image(style_input, text_input)
  74. synth_result_list.append(synth_result)
  75. for key in synth_result:
  76. res = [r[key] for r in synth_result_list]
  77. res = self.reshape_to_same_height(res)
  78. synth_result[key] = np.concatenate(res, axis=1)
  79. return synth_result
  80. def preprocess(self, img):
  81. img = (img.astype('float32') * self.scale - self.mean) / self.std
  82. img_height, img_width, channel = img.shape
  83. assert channel == 3, "Please use an rgb image."
  84. ratio = img_width / float(img_height)
  85. if math.ceil(self.height * ratio) > self.width:
  86. resized_w = self.width
  87. else:
  88. resized_w = int(math.ceil(self.height * ratio))
  89. img = cv2.resize(img, (resized_w, self.height))
  90. new_img = np.zeros([self.height, self.width, 3]).astype('float32')
  91. new_img[:, 0:resized_w, :] = img
  92. img = new_img.transpose((2, 0, 1))
  93. img = img[np.newaxis, :, :, :]
  94. return paddle.to_tensor(img)
  95. def postprocess(self, tensor):
  96. img = tensor.numpy()[0]
  97. img = img.transpose((1, 2, 0))
  98. img = (img * self.std + self.mean) / self.scale
  99. img = np.maximum(img, 0.0)
  100. img = np.minimum(img, 255.0)
  101. img = img.astype('uint8')
  102. return img
  103. def rep_style_input(self, style_input, text_input):
  104. rep_num = int(1.2 * (text_input.shape[1] / text_input.shape[0]) /
  105. (style_input.shape[1] / style_input.shape[0])) + 1
  106. style_input = np.tile(style_input, reps=[1, rep_num, 1])
  107. max_width = int(self.width / self.height * style_input.shape[0])
  108. style_input = style_input[:, :max_width, :]
  109. return style_input
  110. def get_text_boundary(self, text_img):
  111. img_height = text_img.shape[0]
  112. img_width = text_img.shape[1]
  113. bounder = 3
  114. text_canny_img = cv2.Canny(text_img, 10, 20)
  115. edge_num_h = text_canny_img.sum(axis=0)
  116. no_zero_list_h = np.where(edge_num_h > 0)[0]
  117. edge_num_w = text_canny_img.sum(axis=1)
  118. no_zero_list_w = np.where(edge_num_w > 0)[0]
  119. if len(no_zero_list_h) == 0 or len(no_zero_list_w) == 0:
  120. return None
  121. left = max(no_zero_list_h[0] - bounder, 0)
  122. right = min(no_zero_list_h[-1] + bounder, img_width)
  123. top = max(no_zero_list_w[0] - bounder, 0)
  124. bottom = min(no_zero_list_w[-1] + bounder, img_height)
  125. return [left, right, top, bottom]