123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524 |
- """
- # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """
- from __future__ import absolute_import
- from __future__ import division
- from __future__ import print_function
- from __future__ import unicode_literals
- import sys
- import six
- import cv2
- import numpy as np
- import math
- from PIL import Image
- class DecodeImage(object):
- """ decode image """
- def __init__(self,
- img_mode='RGB',
- channel_first=False,
- ignore_orientation=False,
- **kwargs):
- self.img_mode = img_mode
- self.channel_first = channel_first
- self.ignore_orientation = ignore_orientation
- def __call__(self, data):
- img = data['image']
- if six.PY2:
- assert type(img) is str and len(
- img) > 0, "invalid input 'img' in DecodeImage"
- else:
- assert type(img) is bytes and len(
- img) > 0, "invalid input 'img' in DecodeImage"
- img = np.frombuffer(img, dtype='uint8')
- if self.ignore_orientation:
- img = cv2.imdecode(img, cv2.IMREAD_IGNORE_ORIENTATION |
- cv2.IMREAD_COLOR)
- else:
- img = cv2.imdecode(img, 1)
- if img is None:
- return None
- if self.img_mode == 'GRAY':
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
- elif self.img_mode == 'RGB':
- assert img.shape[2] == 3, 'invalid shape of image[%s]' % (img.shape)
- img = img[:, :, ::-1]
- if self.channel_first:
- img = img.transpose((2, 0, 1))
- data['image'] = img
- return data
- class NormalizeImage(object):
- """ normalize image such as substract mean, divide std
- """
- def __init__(self, scale=None, mean=None, std=None, order='chw', **kwargs):
- if isinstance(scale, str):
- scale = eval(scale)
- self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
- mean = mean if mean is not None else [0.485, 0.456, 0.406]
- std = std if std is not None else [0.229, 0.224, 0.225]
- shape = (3, 1, 1) if order == 'chw' else (1, 1, 3)
- self.mean = np.array(mean).reshape(shape).astype('float32')
- self.std = np.array(std).reshape(shape).astype('float32')
- def __call__(self, data):
- img = data['image']
- from PIL import Image
- if isinstance(img, Image.Image):
- img = np.array(img)
- assert isinstance(img,
- np.ndarray), "invalid input 'img' in NormalizeImage"
- data['image'] = (
- img.astype('float32') * self.scale - self.mean) / self.std
- return data
- class ToCHWImage(object):
- """ convert hwc image to chw image
- """
- def __init__(self, **kwargs):
- pass
- def __call__(self, data):
- img = data['image']
- from PIL import Image
- if isinstance(img, Image.Image):
- img = np.array(img)
- data['image'] = img.transpose((2, 0, 1))
- return data
- class Fasttext(object):
- def __init__(self, path="None", **kwargs):
- import fasttext
- self.fast_model = fasttext.load_model(path)
- def __call__(self, data):
- label = data['label']
- fast_label = self.fast_model[label]
- data['fast_label'] = fast_label
- return data
- class KeepKeys(object):
- def __init__(self, keep_keys, **kwargs):
- self.keep_keys = keep_keys
- def __call__(self, data):
- data_list = []
- for key in self.keep_keys:
- data_list.append(data[key])
- return data_list
- class Pad(object):
- def __init__(self, size=None, size_div=32, **kwargs):
- if size is not None and not isinstance(size, (int, list, tuple)):
- raise TypeError("Type of target_size is invalid. Now is {}".format(
- type(size)))
- if isinstance(size, int):
- size = [size, size]
- self.size = size
- self.size_div = size_div
- def __call__(self, data):
- img = data['image']
- img_h, img_w = img.shape[0], img.shape[1]
- if self.size:
- resize_h2, resize_w2 = self.size
- assert (
- img_h < resize_h2 and img_w < resize_w2
- ), '(h, w) of target size should be greater than (img_h, img_w)'
- else:
- resize_h2 = max(
- int(math.ceil(img.shape[0] / self.size_div) * self.size_div),
- self.size_div)
- resize_w2 = max(
- int(math.ceil(img.shape[1] / self.size_div) * self.size_div),
- self.size_div)
- img = cv2.copyMakeBorder(
- img,
- 0,
- resize_h2 - img_h,
- 0,
- resize_w2 - img_w,
- cv2.BORDER_CONSTANT,
- value=0)
- data['image'] = img
- return data
- class Resize(object):
- def __init__(self, size=(640, 640), **kwargs):
- self.size = size
- def resize_image(self, img):
- resize_h, resize_w = self.size
- ori_h, ori_w = img.shape[:2] # (h, w, c)
- ratio_h = float(resize_h) / ori_h
- ratio_w = float(resize_w) / ori_w
- img = cv2.resize(img, (int(resize_w), int(resize_h)))
- return img, [ratio_h, ratio_w]
- def __call__(self, data):
- img = data['image']
- if 'polys' in data:
- text_polys = data['polys']
- img_resize, [ratio_h, ratio_w] = self.resize_image(img)
- if 'polys' in data:
- new_boxes = []
- for box in text_polys:
- new_box = []
- for cord in box:
- new_box.append([cord[0] * ratio_w, cord[1] * ratio_h])
- new_boxes.append(new_box)
- data['polys'] = np.array(new_boxes, dtype=np.float32)
- data['image'] = img_resize
- return data
- class DetResizeForTest(object):
- def __init__(self, **kwargs):
- super(DetResizeForTest, self).__init__()
- self.resize_type = 0
- self.keep_ratio = False
- if 'image_shape' in kwargs:
- self.image_shape = kwargs['image_shape']
- self.resize_type = 1
- if 'keep_ratio' in kwargs:
- self.keep_ratio = kwargs['keep_ratio']
- elif 'limit_side_len' in kwargs:
- self.limit_side_len = kwargs['limit_side_len']
- self.limit_type = kwargs.get('limit_type', 'min')
- elif 'resize_long' in kwargs:
- self.resize_type = 2
- self.resize_long = kwargs.get('resize_long', 960)
- else:
- self.limit_side_len = 736
- self.limit_type = 'min'
- def __call__(self, data):
- img = data['image']
- src_h, src_w, _ = img.shape
- if sum([src_h, src_w]) < 64:
- img = self.image_padding(img)
- if self.resize_type == 0:
- # img, shape = self.resize_image_type0(img)
- img, [ratio_h, ratio_w] = self.resize_image_type0(img)
- elif self.resize_type == 2:
- img, [ratio_h, ratio_w] = self.resize_image_type2(img)
- else:
- # img, shape = self.resize_image_type1(img)
- img, [ratio_h, ratio_w] = self.resize_image_type1(img)
- data['image'] = img
- data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
- return data
- def image_padding(self, im, value=0):
- h, w, c = im.shape
- im_pad = np.zeros((max(32, h), max(32, w), c), np.uint8) + value
- im_pad[:h, :w, :] = im
- return im_pad
- def resize_image_type1(self, img):
- resize_h, resize_w = self.image_shape
- ori_h, ori_w = img.shape[:2] # (h, w, c)
- if self.keep_ratio is True:
- resize_w = ori_w * resize_h / ori_h
- N = math.ceil(resize_w / 32)
- resize_w = N * 32
- ratio_h = float(resize_h) / ori_h
- ratio_w = float(resize_w) / ori_w
- img = cv2.resize(img, (int(resize_w), int(resize_h)))
- # return img, np.array([ori_h, ori_w])
- return img, [ratio_h, ratio_w]
- def resize_image_type0(self, img):
- """
- resize image to a size multiple of 32 which is required by the network
- args:
- img(array): array with shape [h, w, c]
- return(tuple):
- img, (ratio_h, ratio_w)
- """
- limit_side_len = self.limit_side_len
- h, w, c = img.shape
- # limit the max side
- if self.limit_type == 'max':
- if max(h, w) > limit_side_len:
- if h > w:
- ratio = float(limit_side_len) / h
- else:
- ratio = float(limit_side_len) / w
- else:
- ratio = 1.
- elif self.limit_type == 'min':
- if min(h, w) < limit_side_len:
- if h < w:
- ratio = float(limit_side_len) / h
- else:
- ratio = float(limit_side_len) / w
- else:
- ratio = 1.
- elif self.limit_type == 'resize_long':
- ratio = float(limit_side_len) / max(h, w)
- else:
- raise Exception('not support limit type, image ')
- resize_h = int(h * ratio)
- resize_w = int(w * ratio)
- resize_h = max(int(round(resize_h / 32) * 32), 32)
- resize_w = max(int(round(resize_w / 32) * 32), 32)
- try:
- if int(resize_w) <= 0 or int(resize_h) <= 0:
- return None, (None, None)
- img = cv2.resize(img, (int(resize_w), int(resize_h)))
- except:
- print(img.shape, resize_w, resize_h)
- sys.exit(0)
- ratio_h = resize_h / float(h)
- ratio_w = resize_w / float(w)
- return img, [ratio_h, ratio_w]
- def resize_image_type2(self, img):
- h, w, _ = img.shape
- resize_w = w
- resize_h = h
- if resize_h > resize_w:
- ratio = float(self.resize_long) / resize_h
- else:
- ratio = float(self.resize_long) / resize_w
- resize_h = int(resize_h * ratio)
- resize_w = int(resize_w * ratio)
- max_stride = 128
- resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
- resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
- img = cv2.resize(img, (int(resize_w), int(resize_h)))
- ratio_h = resize_h / float(h)
- ratio_w = resize_w / float(w)
- return img, [ratio_h, ratio_w]
- class E2EResizeForTest(object):
- def __init__(self, **kwargs):
- super(E2EResizeForTest, self).__init__()
- self.max_side_len = kwargs['max_side_len']
- self.valid_set = kwargs['valid_set']
- def __call__(self, data):
- img = data['image']
- src_h, src_w, _ = img.shape
- if self.valid_set == 'totaltext':
- im_resized, [ratio_h, ratio_w] = self.resize_image_for_totaltext(
- img, max_side_len=self.max_side_len)
- else:
- im_resized, (ratio_h, ratio_w) = self.resize_image(
- img, max_side_len=self.max_side_len)
- data['image'] = im_resized
- data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
- return data
- def resize_image_for_totaltext(self, im, max_side_len=512):
- h, w, _ = im.shape
- resize_w = w
- resize_h = h
- ratio = 1.25
- if h * ratio > max_side_len:
- ratio = float(max_side_len) / resize_h
- resize_h = int(resize_h * ratio)
- resize_w = int(resize_w * ratio)
- max_stride = 128
- resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
- resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
- im = cv2.resize(im, (int(resize_w), int(resize_h)))
- ratio_h = resize_h / float(h)
- ratio_w = resize_w / float(w)
- return im, (ratio_h, ratio_w)
- def resize_image(self, im, max_side_len=512):
- """
- resize image to a size multiple of max_stride which is required by the network
- :param im: the resized image
- :param max_side_len: limit of max image size to avoid out of memory in gpu
- :return: the resized image and the resize ratio
- """
- h, w, _ = im.shape
- resize_w = w
- resize_h = h
- # Fix the longer side
- if resize_h > resize_w:
- ratio = float(max_side_len) / resize_h
- else:
- ratio = float(max_side_len) / resize_w
- resize_h = int(resize_h * ratio)
- resize_w = int(resize_w * ratio)
- max_stride = 128
- resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
- resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
- im = cv2.resize(im, (int(resize_w), int(resize_h)))
- ratio_h = resize_h / float(h)
- ratio_w = resize_w / float(w)
- return im, (ratio_h, ratio_w)
- class KieResize(object):
- def __init__(self, **kwargs):
- super(KieResize, self).__init__()
- self.max_side, self.min_side = kwargs['img_scale'][0], kwargs[
- 'img_scale'][1]
- def __call__(self, data):
- img = data['image']
- points = data['points']
- src_h, src_w, _ = img.shape
- im_resized, scale_factor, [ratio_h, ratio_w
- ], [new_h, new_w] = self.resize_image(img)
- resize_points = self.resize_boxes(img, points, scale_factor)
- data['ori_image'] = img
- data['ori_boxes'] = points
- data['points'] = resize_points
- data['image'] = im_resized
- data['shape'] = np.array([new_h, new_w])
- return data
- def resize_image(self, img):
- norm_img = np.zeros([1024, 1024, 3], dtype='float32')
- scale = [512, 1024]
- h, w = img.shape[:2]
- max_long_edge = max(scale)
- max_short_edge = min(scale)
- scale_factor = min(max_long_edge / max(h, w),
- max_short_edge / min(h, w))
- resize_w, resize_h = int(w * float(scale_factor) + 0.5), int(h * float(
- scale_factor) + 0.5)
- max_stride = 32
- resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
- resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
- im = cv2.resize(img, (resize_w, resize_h))
- new_h, new_w = im.shape[:2]
- w_scale = new_w / w
- h_scale = new_h / h
- scale_factor = np.array(
- [w_scale, h_scale, w_scale, h_scale], dtype=np.float32)
- norm_img[:new_h, :new_w, :] = im
- return norm_img, scale_factor, [h_scale, w_scale], [new_h, new_w]
- def resize_boxes(self, im, points, scale_factor):
- points = points * scale_factor
- img_shape = im.shape[:2]
- points[:, 0::2] = np.clip(points[:, 0::2], 0, img_shape[1])
- points[:, 1::2] = np.clip(points[:, 1::2], 0, img_shape[0])
- return points
- class SRResize(object):
- def __init__(self,
- imgH=32,
- imgW=128,
- down_sample_scale=4,
- keep_ratio=False,
- min_ratio=1,
- mask=False,
- infer_mode=False,
- **kwargs):
- self.imgH = imgH
- self.imgW = imgW
- self.keep_ratio = keep_ratio
- self.min_ratio = min_ratio
- self.down_sample_scale = down_sample_scale
- self.mask = mask
- self.infer_mode = infer_mode
- def __call__(self, data):
- imgH = self.imgH
- imgW = self.imgW
- images_lr = data["image_lr"]
- transform2 = ResizeNormalize(
- (imgW // self.down_sample_scale, imgH // self.down_sample_scale))
- images_lr = transform2(images_lr)
- data["img_lr"] = images_lr
- if self.infer_mode:
- return data
- images_HR = data["image_hr"]
- label_strs = data["label"]
- transform = ResizeNormalize((imgW, imgH))
- images_HR = transform(images_HR)
- data["img_hr"] = images_HR
- return data
- class ResizeNormalize(object):
- def __init__(self, size, interpolation=Image.BICUBIC):
- self.size = size
- self.interpolation = interpolation
- def __call__(self, img):
- img = img.resize(self.size, self.interpolation)
- img_numpy = np.array(img).astype("float32")
- img_numpy = img_numpy.transpose((2, 0, 1)) / 255
- return img_numpy
- class GrayImageChannelFormat(object):
- """
- format gray scale image's channel: (3,h,w) -> (1,h,w)
- Args:
- inverse: inverse gray image
- """
- def __init__(self, inverse=False, **kwargs):
- self.inverse = inverse
- def __call__(self, data):
- img = data['image']
- img_single_channel = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- img_expanded = np.expand_dims(img_single_channel, 0)
- if self.inverse:
- data['image'] = np.abs(img_expanded - 1)
- else:
- data['image'] = img_expanded
- data['src_image'] = img
- return data
|