You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

img_process_server.py 6.7 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. """
  2. /**
  3. * Copyright 2020 Zhejiang Lab. All Rights Reserved.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. * =============================================================
  17. */
  18. """
  19. # !/usr/bin/env python3
  20. # -*- coding: utf-8 -*-
  21. import web
  22. import os
  23. import string
  24. import cv2
  25. import numpy as np
  26. import _thread
  27. import logging
  28. import urllib
  29. from queue import Queue
  30. import time
  31. import random
  32. import json
  33. import argparse
  34. import sys
  35. import codecs
  36. import shutil
  37. from augment_utils.ACE import ACE_color
  38. from augment_utils.dehaze import deHaze, addHaze
  39. from augment_utils.hist_equalize import adaptive_hist_equalize
  40. from log_config import setup_log
  41. from upload_config import Upload_cfg, MyApplication
  42. urls = ('/img_process', 'Image_augmentation')
  43. sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
  44. # task url suffix
  45. img_pro_url = 'api/data/datasets/'
  46. # arguments
  47. parser = argparse.ArgumentParser(description="config for image augmentation server")
  48. parser.add_argument("-p", "--port", type=int, required=True)
  49. parser.add_argument("-m", "--mode", type=str, default="test", required=False)
  50. args = parser.parse_args()
  51. # url concat(ip + port + suffix)
  52. url_json = './config/url.json'
  53. with open(url_json) as f:
  54. url_dict = json.loads(f.read())
  55. img_pro_url = url_dict[args.mode] + img_pro_url
  56. port = args.port
  57. # creat task quene
  58. imageProcessQuene = Queue()
  59. base_path = "/nfs/"
  60. # create log path and file
  61. des_folder = os.path.join('./log', args.mode)
  62. if not os.path.exists(des_folder):
  63. os.makedirs(des_folder)
  64. logging = setup_log(args.mode, 'enhance-' + args.mode + '.log')
  65. class Image_augmentation(Upload_cfg):
  66. """Recieve and analyze the post request"""
  67. def POST(self):
  68. try:
  69. super().POST()
  70. x = web.data()
  71. x = json.loads(x.decode())
  72. dataset_id = x['id']
  73. img_save_path = x['enhanceFilePath']
  74. ann_save_path = x["enhanceAnnotationPath"]
  75. file_list = x['fileDtos']
  76. nums_, img_path_list, ann_path_list = img_ann_list_gen(file_list)
  77. process_type = x['type']
  78. re_task_id = ''.join(random.sample(string.ascii_letters + string.digits, 8))
  79. img_process_config = [dataset_id, img_save_path,
  80. ann_save_path, img_path_list,
  81. ann_path_list, process_type, re_task_id]
  82. web.t_queue2.put(img_process_config)
  83. logging.info(str(nums_) + ' images for augment')
  84. return {"code": 200, "msg": "", "data": re_task_id}
  85. except Exception as e:
  86. print(e)
  87. print("Error Post")
  88. logging.error("Error post")
  89. logging.error(e)
  90. return 'post error'
  91. def image_process_thread():
  92. """The implementation of image augmentation thread"""
  93. global img_pro_url
  94. global imageProcessQuene
  95. logging.info('img_process server start'.center(66, '-'))
  96. logging.info(img_pro_url)
  97. task_cond = []
  98. while True:
  99. try:
  100. img_task = imageProcessQuene.get()
  101. if img_task and img_task[0] not in task_cond:
  102. index = len(task_cond)
  103. task_cond.append(img_task[0])
  104. dataset_id = img_task[0]
  105. img_save_path = img_task[1]
  106. ann_save_path = img_task[2]
  107. img_list = img_task[3]
  108. ann_list = img_task[4]
  109. method = img_task[5]
  110. re_task_id = img_task[6]
  111. suffix = '_enchanced_' + re_task_id
  112. logging.info("dataset_id " + str(dataset_id))
  113. for j in range(len(ann_list)):
  114. img_path = img_list[j]
  115. ann_path = ann_list[j]
  116. img_process(suffix, img_path, ann_path,
  117. img_save_path, ann_save_path, method)
  118. task_url = img_pro_url + 'enhance/finish'
  119. send_data = {"id": re_task_id,
  120. "suffix": suffix}
  121. headers = {'Content-Type': 'application/json'}
  122. req = urllib.request.Request(task_url,
  123. data=json.dumps(send_data).encode(),
  124. headers=headers)
  125. response = urllib.request.urlopen(req, timeout=5)
  126. logging.info('suffix:' + suffix)
  127. logging.info(task_url)
  128. logging.info(response.read())
  129. logging.info("End img_process of dataset:" + str(dataset_id))
  130. task_cond.pop(index)
  131. else:
  132. continue
  133. except Exception as e:
  134. logging.info(img_pro_url)
  135. logging.error("Error imgProcess")
  136. logging.error(e)
  137. time.sleep(0.01)
  138. def img_ann_list_gen(file_list):
  139. """Analyze the json request and convert to list"""
  140. nums_ = len(file_list)
  141. img_list = []
  142. ann_list = []
  143. for i in range(nums_):
  144. img_list.append(file_list[i]['filePath'])
  145. ann_list.append(file_list[i]['annotationPath'])
  146. return nums_, img_list, ann_list
  147. def img_process(suffix, img_path, ann_path, img_save_path, ann_save_path, method_ind):
  148. """Process images and save in specified path"""
  149. inds2method = {1: deHaze, 2: addHaze, 3: ACE_color, 4: adaptive_hist_equalize}
  150. method = inds2method[method_ind]
  151. img_raw = cv2.imdecode(np.fromfile(img_path.encode('utf-8'), dtype=np.uint8), 1)
  152. img_suffix = os.path.splitext(img_path)[-1]
  153. ann_name = ann_path.replace(ann_save_path, '')
  154. if method_ind <= 3:
  155. processed_img = method(img_raw / 255.0) * 255
  156. else:
  157. processed_img = method(img_raw)
  158. cv2.imwrite(img_save_path + ann_name + suffix + img_suffix,
  159. processed_img.astype(np.uint8))
  160. shutil.copyfile(ann_path.encode('utf-8'), (ann_path + suffix).encode('utf-8'))
  161. def img_process_thread(no, interval):
  162. """Running the image augmentation thread"""
  163. image_process_thread()
  164. if __name__ == "__main__":
  165. _thread.start_new_thread(img_process_thread, (5, 5))
  166. app = MyApplication(urls, globals())
  167. web.t_queue2 = imageProcessQuene
  168. app.run(port=port)

一站式算法开发平台、高性能分布式深度学习框架、先进算法模型库、视觉模型炼知平台、数据可视化分析平台等一系列平台及工具,在模型高效分布式训练、数据处理和可视分析、模型炼知和轻量化等技术上形成独特优势,目前已在产学研等各领域近千家单位及个人提供AI应用赋能

Contributors (1)