You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

imgprocess.py 4.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. # !/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. """
  4. Copyright 2020 Tianshu AI Platform. All Rights Reserved.
  5. Licensed under the Apache License, Version 2.0 (the "License");
  6. you may not use this file except in compliance with the License.
  7. You may obtain a copy of the License at
  8. http://www.apache.org/licenses/LICENSE-2.0
  9. Unless required by applicable law or agreed to in writing, software
  10. distributed under the License is distributed on an "AS IS" BASIS,
  11. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. See the License for the specific language governing permissions and
  13. limitations under the License.
  14. =============================================================
  15. """
  16. import logging
  17. import time
  18. import cv2
  19. import numpy as np
  20. import shutil
  21. import os
  22. from abc import ABC
  23. from program.abstract.algorithm import Algorithm
  24. from common.util.algorithm.ACE import ACE_color
  25. from common.util.algorithm.dehaze import deHaze, addHaze
  26. from common.util.algorithm.hist_equalize import adaptive_hist_equalize
  27. class Imgprocess(Algorithm, ABC):
  28. def __init__(self):
  29. pass
  30. def execute(task):
  31. return Imgprocess.start_enhance_task(task)
  32. def start_enhance_task(taskParameters):
  33. """
  34. Enhance task method.
  35. Args:
  36. enhanceTaskId: enhance task id.
  37. redisClient: redis client.
  38. """
  39. dataset_id = taskParameters['id']
  40. img_save_path = taskParameters['enhanceFilePath']
  41. ann_save_path = taskParameters["enhanceAnnotationPath"]
  42. file_list = taskParameters['fileDtos']
  43. nums_, img_path_list, ann_path_list = Imgprocess.img_ann_list_gen(file_list)
  44. process_type = taskParameters['type']
  45. re_task_id = taskParameters['reTaskId']
  46. img_process_config = [dataset_id, img_save_path,
  47. ann_save_path, img_path_list,
  48. ann_path_list, process_type, re_task_id]
  49. return Imgprocess.image_enhance_process(img_process_config)
  50. logging.info(str(nums_) + ' images for augment')
  51. def img_ann_list_gen(file_list):
  52. """Analyze the json request and convert to list"""
  53. nums_ = len(file_list)
  54. img_list = []
  55. ann_list = []
  56. for i in range(nums_):
  57. img_list.append(file_list[i]['filePath'])
  58. ann_list.append(file_list[i]['annotationPath'])
  59. return nums_, img_list, ann_list
  60. def image_enhance_process(img_task):
  61. """The implementation of image augmentation thread"""
  62. global finish_key
  63. global re_task_id
  64. logging.info('img_process server start'.center(66, '-'))
  65. result = True
  66. try:
  67. dataset_id = img_task[0]
  68. img_save_path = img_task[1]
  69. ann_save_path = img_task[2]
  70. img_list = img_task[3]
  71. ann_list = img_task[4]
  72. method = img_task[5]
  73. re_task_id = img_task[6]
  74. suffix = '_enchanced_' + re_task_id
  75. logging.info("dataset_id " + str(dataset_id))
  76. finish_key = {"processKey": re_task_id}
  77. finish_data = {"id": re_task_id,
  78. "suffix": suffix}
  79. for j in range(len(ann_list)):
  80. img_path = img_list[j]
  81. ann_path = ann_list[j]
  82. Imgprocess.img_process(suffix, img_path, ann_path,
  83. img_save_path, ann_save_path, method)
  84. logging.info('suffix:' + suffix)
  85. logging.info("End img_process of dataset:" + str(dataset_id))
  86. return finish_data, result
  87. except Exception as e:
  88. result = False
  89. return finish_data, result
  90. logging.error("Error imgProcess")
  91. logging.error(e)
  92. time.sleep(0.01)
  93. def img_process(suffix, img_path, ann_path, img_save_path, ann_save_path, method_ind):
  94. """Process images and save in specified path"""
  95. inds2method = {1: deHaze, 2: addHaze, 3: ACE_color, 4: adaptive_hist_equalize}
  96. method = inds2method[method_ind]
  97. img_raw = cv2.imdecode(np.fromfile(img_path.encode('utf-8'), dtype=np.uint8), 1)
  98. img_suffix = os.path.splitext(img_path)[-1]
  99. ann_name = os.path.basename(ann_path)
  100. if method_ind <= 3:
  101. processed_img = method(img_raw / 255.0) * 255
  102. else:
  103. processed_img = method(img_raw)
  104. cv2.imwrite(img_save_path + "/" + ann_name + suffix + img_suffix,
  105. processed_img.astype(np.uint8))
  106. shutil.copyfile(ann_path.encode('utf-8'), (ann_save_path + "/" + ann_name + suffix).encode('utf-8'))

一站式算法开发平台、高性能分布式深度学习框架、先进算法模型库、视觉模型炼知平台、数据可视化分析平台等一系列平台及工具,在模型高效分布式训练、数据处理和可视分析、模型炼知和轻量化等技术上形成独特优势,目前已在产学研等各领域近千家单位及个人提供AI应用赋能