You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

functional_pil.py 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. import PIL
  4. from PIL import Image, ImageOps, ImageEnhance
  5. import numpy as np
  6. import colorsys
  7. import random
  8. import math
  9. from numpy import sin, cos, tan
  10. import numbers
  11. _pil_interp_from_str = {
  12. 'nearest': Image.NEAREST,
  13. 'bilinear': Image.BILINEAR,
  14. 'bicubic': Image.BICUBIC,
  15. 'box': Image.BOX,
  16. 'lanczos': Image.LANCZOS,
  17. 'hamming': Image.HAMMING
  18. }
  19. def crop(image, offset_height, offset_width, target_height, target_width):
  20. image_width, image_height = image.size
  21. if offset_width < 0:
  22. raise ValueError('offset_width must be >0.')
  23. if offset_height < 0:
  24. raise ValueError('offset_height must be >0.')
  25. if target_height < 0:
  26. raise ValueError('target_height must be >0.')
  27. if target_width < 0:
  28. raise ValueError('target_width must be >0.')
  29. if offset_width + target_width > image_width:
  30. raise ValueError('offset_width + target_width must be <= image width.')
  31. if offset_height + target_height > image_height:
  32. raise ValueError('offset_height + target_height must be <= image height.')
  33. return image.crop((offset_width, offset_height, offset_width + target_width, offset_height + target_height))
  34. def center_crop(image, size, central_fraction):
  35. image_width, image_height = image.size
  36. if size is not None:
  37. if not isinstance(size, (int, list, tuple)) or (isinstance(size, (list, tuple)) and len(size) != 2):
  38. raise TypeError(
  39. "Size should be a single integer or a list/tuple (h, w) of length 2.But"
  40. "got {}.".format(size)
  41. )
  42. if isinstance(size, int):
  43. target_height = size
  44. target_width = size
  45. else:
  46. target_height = size[0]
  47. target_width = size[1]
  48. elif central_fraction is not None:
  49. if central_fraction <= 0.0 or central_fraction > 1.0:
  50. raise ValueError('central_fraction must be within (0, 1]')
  51. target_height = int(central_fraction * image_height)
  52. target_width = int(central_fraction * image_width)
  53. crop_top = int(round((image_height - target_height) / 2.))
  54. crop_left = int(round((image_width - target_width) / 2.))
  55. return crop(image, crop_top, crop_left, target_height, target_width)
  56. def pad(image, padding, padding_value, mode):
  57. if isinstance(padding, int):
  58. top = bottom = left = right = padding
  59. elif isinstance(padding, (tuple, list)):
  60. if len(padding) == 2:
  61. left = right = padding[0]
  62. top = bottom = padding[1]
  63. elif len(padding) == 4:
  64. left = padding[0]
  65. top = padding[1]
  66. right = padding[2]
  67. bottom = padding[3]
  68. else:
  69. raise TypeError("The size of the padding list or tuple should be 2 or 4." "But got {}".format(padding))
  70. else:
  71. raise TypeError("Padding can be any of: a number, a tuple or list of size 2 or 4." "But got {}".format(padding))
  72. if mode not in ['constant', 'edge', 'reflect', 'symmetric']:
  73. raise TypeError("Padding mode should be 'constant', 'edge', 'reflect', or 'symmetric'.")
  74. if mode == 'constant':
  75. if image.mode == 'P':
  76. palette = image.getpalette()
  77. image = ImageOps.expand(image, border=padding, fill=padding_value)
  78. image.putpalette(palette)
  79. return image
  80. return ImageOps.expand(image, border=padding, fill=padding_value)
  81. if image.mode == 'P':
  82. palette = image.getpalette()
  83. image = np.asarray(image)
  84. image = np.pad(image, ((top, bottom), (left, right)), mode)
  85. image = Image.fromarray(image)
  86. image.putpalette(palette)
  87. return image
  88. image = np.asarray(image)
  89. # RGB image
  90. if len(image.shape) == 3:
  91. image = np.pad(image, ((top, bottom), (left, right), (0, 0)), mode)
  92. # Grayscale image
  93. if len(image.shape) == 2:
  94. image = np.pad(image, ((top, bottom), (left, right)), mode)
  95. return Image.fromarray(image)
  96. def resize(image, size, method):
  97. if not (isinstance(size, int) or (isinstance(size, (list, tuple)) and len(size) == 2)):
  98. raise TypeError('Size should be a single number or a list/tuple (h, w) of length 2.' 'Got {}.'.format(size))
  99. if method not in ('nearest', 'bilinear', 'bicubic', 'box', 'lanczos', 'hamming'):
  100. raise ValueError(
  101. "Unknown resize method! resize method must be in "
  102. "(\'nearest\',\'bilinear\',\'bicubic\',\'box\',\'lanczos\',\'hamming\')"
  103. )
  104. if isinstance(size, int):
  105. w, h = image.size
  106. if (w <= h and w == size) or (h <= w and h == size):
  107. return image
  108. if w < h:
  109. ow = size
  110. oh = int(size * h / w)
  111. return image.resize((ow, oh), _pil_interp_from_str[method])
  112. else:
  113. oh = size
  114. ow = int(size * w / h)
  115. return image.resize((ow, oh), _pil_interp_from_str[method])
  116. else:
  117. return image.resize(size[::-1], _pil_interp_from_str[method])
  118. def transpose(image, order):
  119. image = np.asarray(image)
  120. if not (isinstance(order, (list, tuple)) and len(order) == 3):
  121. raise TypeError("Order must be a list/tuple of length 3." "But got {}.".format(order))
  122. image_shape = image.shape
  123. if len(image_shape) == 2:
  124. image = image[..., np.newaxis]
  125. image = image.transpose(order)
  126. image = Image.fromarray(image)
  127. return image
  128. def hwc_to_chw(image):
  129. image_shape = image.shape
  130. if len(image_shape) == 2:
  131. image = image[..., np.newaxis]
  132. image = image.transpose((2, 0, 1))
  133. image = Image.fromarray(image)
  134. return image
  135. def chw_to_hwc(image):
  136. image_shape = image.shape
  137. if len(image_shape) == 2:
  138. image = image[..., np.newaxis]
  139. image = image.transpose((1, 2, 0))
  140. image = Image.fromarray(image)
  141. return image
  142. def rgb_to_hsv(image):
  143. return image.convert('HSV')
  144. def hsv_to_rgb(image):
  145. return image.convert('RGB')
  146. def rgb_to_gray(image, num_output_channels):
  147. if num_output_channels == 1:
  148. img = image.convert('L')
  149. elif num_output_channels == 3:
  150. img = image.convert('L')
  151. np_img = np.array(img, dtype=np.uint8)
  152. np_img = np.dstack([np_img, np_img, np_img])
  153. img = Image.fromarray(np_img, 'RGB')
  154. else:
  155. raise ValueError('num_output_channels should be either 1 or 3')
  156. return img
  157. def adjust_brightness(image, brightness_factor):
  158. """Adjusts brightness of an Image.
  159. Args:
  160. image (PIL.Image): PIL Image to be adjusted.
  161. brightness_factor (float): How much to adjust the brightness. Can be
  162. any non negative number. 0 gives a black image, 1 gives the
  163. original image while 2 increases the brightness by a factor of 2.
  164. Returns:
  165. PIL.Image: Brightness adjusted image.
  166. """
  167. if brightness_factor < 0:
  168. raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor))
  169. enhancer = ImageEnhance.Brightness(image)
  170. image = enhancer.enhance(brightness_factor)
  171. return image
  172. def adjust_contrast(image, contrast_factor):
  173. """Adjusts contrast of an Image.
  174. Args:
  175. image (PIL.Image): PIL Image to be adjusted.
  176. contrast_factor (float): How much to adjust the contrast. Can be any
  177. non negative number. 0 gives a solid gray image, 1 gives the
  178. original image while 2 increases the contrast by a factor of 2.
  179. Returns:
  180. PIL.Image: Contrast adjusted image.
  181. """
  182. if contrast_factor < 0:
  183. raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor))
  184. enhancer = ImageEnhance.Contrast(image)
  185. image = enhancer.enhance(contrast_factor)
  186. return image
  187. def adjust_hue(image, hue_factor):
  188. """Adjusts hue of an image.
  189. The image hue is adjusted by converting the image to HSV and
  190. cyclically shifting the intensities in the hue channel (H).
  191. The image is then converted back to original image mode.
  192. `hue_factor` is the amount of shift in H channel and must be in the
  193. interval `[-0.5, 0.5]`.
  194. Args:
  195. image (PIL.Image): PIL Image to be adjusted.
  196. hue_factor (float): How much to shift the hue channel. Should be in
  197. [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
  198. HSV space in positive and negative direction respectively.
  199. 0 means no shift. Therefore, both -0.5 and 0.5 will give an image
  200. with complementary colors while 0 gives the original image.
  201. Returns:
  202. PIL.Image: Hue adjusted image.
  203. """
  204. if not (-0.5 <= hue_factor <= 0.5):
  205. raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
  206. input_mode = image.mode
  207. if input_mode in {'L', '1', 'I', 'F'}:
  208. return image
  209. h, s, v = image.convert('HSV').split()
  210. np_h = np.array(h, dtype=np.uint8)
  211. # uint8 addition take cares of rotation across boundaries
  212. with np.errstate(over='ignore'):
  213. np_h += np.uint8(hue_factor * 255)
  214. h = Image.fromarray(np_h, 'L')
  215. image = Image.merge('HSV', (h, s, v)).convert(input_mode)
  216. return image
  217. def adjust_saturation(image, saturation_factor):
  218. """Adjusts color saturation of an image.
  219. Args:
  220. image (PIL.Image): PIL Image to be adjusted.
  221. saturation_factor (float): How much to adjust the saturation. 0 will
  222. give a black and white image, 1 will give the original image while
  223. 2 will enhance the saturation by a factor of 2.
  224. Returns:
  225. PIL.Image: Saturation adjusted image.
  226. """
  227. if saturation_factor < 0:
  228. raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor))
  229. enhancer = ImageEnhance.Color(image)
  230. image = enhancer.enhance(saturation_factor)
  231. return image
  232. def hflip(image):
  233. """Horizontally flips the given PIL Image.
  234. Args:
  235. img (PIL.Image): Image to be flipped.
  236. Returns:
  237. PIL.Image: Horizontall flipped image.
  238. """
  239. return image.transpose(Image.FLIP_LEFT_RIGHT)
  240. def vflip(image):
  241. """Vertically flips the given PIL Image.
  242. Args:
  243. img (PIL.Image): Image to be flipped.
  244. Returns:
  245. PIL.Image: Vertically flipped image.
  246. """
  247. return image.transpose(Image.FLIP_TOP_BOTTOM)
  248. def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value):
  249. '''
  250. Parameters
  251. ----------
  252. image:
  253. A PIL image to be padded size of (target_width, target_height)
  254. offset_height:
  255. Number of rows of padding_values to add on top.
  256. offset_width:
  257. Number of columns of padding_values to add on the left.
  258. target_height:
  259. Height of output image.
  260. target_width:
  261. Width of output image.
  262. padding_value:
  263. value to pad
  264. Returns:
  265. PIL.Image: padded image
  266. -------
  267. '''
  268. if offset_height < 0:
  269. raise ValueError('offset_height must be >= 0')
  270. if offset_width < 0:
  271. raise ValueError('offset_width must be >= 0')
  272. width, height = image.size
  273. after_padding_width = target_width - offset_width - width
  274. after_padding_height = target_height - offset_height - height
  275. if after_padding_height < 0:
  276. raise ValueError('image height must be <= target - offset')
  277. if after_padding_width < 0:
  278. raise ValueError('image width must be <= target - offset')
  279. return pad(
  280. image, padding=(offset_width, offset_height, after_padding_width, after_padding_height),
  281. padding_value=padding_value, mode='constant'
  282. )
  283. def rotate(image, angle, interpolation, expand, center, fill):
  284. """Rotates the image by angle.
  285. Args:
  286. img (PIL.Image): Image to be rotated.
  287. angle (float or int): In degrees degrees counter clockwise order.
  288. interpolation (str, optional): Interpolation method. If omitted, or if the
  289. image has only one channel, it is set to PIL.Image.NEAREST . when use pil backend,
  290. support method are as following:
  291. - "nearest": Image.NEAREST,
  292. - "bilinear": Image.BILINEAR,
  293. - "bicubic": Image.BICUBIC
  294. expand (bool, optional): Optional expansion flag.
  295. If true, expands the output image to make it large enough to hold the entire rotated image.
  296. If false or omitted, make the output image the same size as the input image.
  297. Note that the expand flag assumes rotation around the center and no translation.
  298. center (2-tuple, optional): Optional center of rotation.
  299. Origin is the upper left corner.
  300. Default is the center of the image.
  301. fill (3-tuple or int): RGB pixel fill value for area outside the rotated image.
  302. If int, it is used for all channels respectively.
  303. Returns:
  304. PIL.Image: Rotated image.
  305. """
  306. c = 1 if image.mode == 'L' else 3
  307. if isinstance(fill, numbers.Number):
  308. fill = (fill, ) * c
  309. elif not (isinstance(fill, (list, tuple)) and len(fill) == c):
  310. raise ValueError(
  311. 'If fill should be a single number or a list/tuple with length of image channels.'
  312. 'But got {}'.format(fill)
  313. )
  314. return image.rotate(angle, _pil_interp_from_str[interpolation], expand, center, fillcolor=fill)
  315. def get_affine_matrix(center, angle, translate, scale, shear):
  316. rot = math.radians(angle)
  317. sx, sy = [math.radians(s) for s in shear]
  318. cx, cy = center
  319. tx, ty = translate
  320. # RSS without scaling
  321. a = math.cos(rot - sy) / math.cos(sy)
  322. b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
  323. c = math.sin(rot - sy) / math.cos(sy)
  324. d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
  325. # Inverted rotation matrix with scale and shear
  326. # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
  327. matrix = [d, -b, 0.0, -c, a, 0.0]
  328. matrix = [x / scale for x in matrix]
  329. # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
  330. matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
  331. matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
  332. # Apply center translation: C * RSS^-1 * C^-1 * T^-1
  333. matrix[2] += cx
  334. matrix[5] += cy
  335. return matrix
  336. def random_shear(image, degrees, interpolation, fill):
  337. c = 1 if image.mode == 'L' else 3
  338. if isinstance(fill, numbers.Number):
  339. fill = (fill, ) * c
  340. elif not (isinstance(fill, (list, tuple)) and len(fill) == c):
  341. raise ValueError(
  342. 'If fill should be a single number or a list/tuple with length of image channels.'
  343. 'But got {}'.format(fill)
  344. )
  345. w, h = image.size
  346. center = (w / 2.0, h / 2.0)
  347. shear = [np.random.uniform(degrees[0], degrees[1]), np.random.uniform(degrees[2], degrees[3])]
  348. interpolation = _pil_interp_from_str[interpolation]
  349. matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=1.0, shear=shear)
  350. output_size = (w, h)
  351. kwargs = {"fillcolor": fill}
  352. return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs)
  353. def random_shift(image, shift, interpolation, fill):
  354. c = 1 if image.mode == 'L' else 3
  355. if isinstance(fill, numbers.Number):
  356. fill = (fill, ) * c
  357. elif not (isinstance(fill, (list, tuple)) and len(fill) == c):
  358. raise ValueError(
  359. 'If fill should be a single number or a list/tuple with length of image channels.'
  360. 'But got {}'.format(fill)
  361. )
  362. w, h = image.size
  363. center = (w / 2.0, h / 2.0)
  364. hrg = shift[0]
  365. wrg = shift[1]
  366. tx = np.random.uniform(-hrg, hrg) * h
  367. ty = np.random.uniform(-wrg, wrg) * w
  368. matrix = get_affine_matrix(center=center, angle=0, translate=(tx, ty), scale=1.0, shear=(0, 0))
  369. print(matrix)
  370. output_size = (w, h)
  371. kwargs = {"fillcolor": fill}
  372. return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs)
  373. def random_zoom(image, zoom, interpolation, fill):
  374. c = 1 if image.mode == 'L' else 3
  375. if isinstance(fill, numbers.Number):
  376. fill = (fill, ) * c
  377. elif not (isinstance(fill, (list, tuple)) and len(fill) == c):
  378. raise ValueError(
  379. 'If fill should be a single number or a list/tuple with length of image channels.'
  380. 'But got {}'.format(fill)
  381. )
  382. w, h = image.size
  383. scale = np.random.uniform(zoom[0], zoom[1])
  384. center = (w / 2.0, h / 2.0)
  385. matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=scale, shear=(0, 0))
  386. output_size = (w, h)
  387. kwargs = {"fillcolor": fill}
  388. return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs)
  389. def random_affine(image, degrees, shift, zoom, shear, interpolation, fill):
  390. c = 1 if image.mode == 'L' else 3
  391. if isinstance(fill, numbers.Number):
  392. fill = (fill, ) * c
  393. elif not (isinstance(fill, (list, tuple)) and len(fill) == c):
  394. raise ValueError(
  395. 'If fill should be a single number or a list/tuple with length of image channels.'
  396. 'But got {}'.format(fill)
  397. )
  398. w, h = image.size
  399. angle = float(np.random.uniform(float(degrees[0]), float(degrees[1])))
  400. center = (w / 2.0, h / 2.0)
  401. if shift is not None:
  402. max_dx = float(shift[0] * w)
  403. max_dy = float(shift[1] * h)
  404. tx = int(round(np.random.uniform(-max_dx, max_dx)))
  405. ty = int(round(np.random.uniform(-max_dy, max_dy)))
  406. translations = (tx, ty)
  407. else:
  408. translations = (0, 0)
  409. if zoom is not None:
  410. scale = float(np.random.uniform(zoom[0], zoom[1]))
  411. else:
  412. scale = 1.0
  413. shear_x = shear_y = 0
  414. if shear is not None:
  415. shear_x = float(np.random.uniform(shear[0], shear[1]))
  416. if len(shear) == 4:
  417. shear_y = float(np.random.uniform(shear[2], shear[3]))
  418. shear = (shear_x, shear_y)
  419. matrix = get_affine_matrix(center=center, angle=angle, translate=translations, scale=scale, shear=shear)
  420. output_size = (w, h)
  421. kwargs = {"fillcolor": fill}
  422. return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs)

TensorLayer3.0 是一款兼容多种深度学习框架为计算后端的深度学习库。计划兼容TensorFlow, Pytorch, MindSpore, Paddle.