You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

onnx2tensorrt.py 8.5 kB

2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import argparse
  3. import os
  4. import os.path as osp
  5. import warnings
  6. import numpy as np
  7. import onnx
  8. import torch
  9. from mmcv import Config
  10. from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine
  11. from mmdet.core.export import preprocess_example_input
  12. from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector,
  13. TensorRTDetector)
  14. from mmdet.datasets import DATASETS
  15. def get_GiB(x: int):
  16. """return x GiB."""
  17. return x * (1 << 30)
  18. def onnx2tensorrt(onnx_file,
  19. trt_file,
  20. input_config,
  21. verify=False,
  22. show=False,
  23. workspace_size=1,
  24. verbose=False):
  25. import tensorrt as trt
  26. onnx_model = onnx.load(onnx_file)
  27. max_shape = input_config['max_shape']
  28. min_shape = input_config['min_shape']
  29. opt_shape = input_config['opt_shape']
  30. fp16_mode = False
  31. # create trt engine and wrapper
  32. opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]}
  33. max_workspace_size = get_GiB(workspace_size)
  34. trt_engine = onnx2trt(
  35. onnx_model,
  36. opt_shape_dict,
  37. log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
  38. fp16_mode=fp16_mode,
  39. max_workspace_size=max_workspace_size)
  40. save_dir, _ = osp.split(trt_file)
  41. if save_dir:
  42. os.makedirs(save_dir, exist_ok=True)
  43. save_trt_engine(trt_engine, trt_file)
  44. print(f'Successfully created TensorRT engine: {trt_file}')
  45. if verify:
  46. # prepare input
  47. one_img, one_meta = preprocess_example_input(input_config)
  48. img_list, img_meta_list = [one_img], [[one_meta]]
  49. img_list = [_.cuda().contiguous() for _ in img_list]
  50. # wrap ONNX and TensorRT model
  51. onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0)
  52. trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0)
  53. # inference with wrapped model
  54. with torch.no_grad():
  55. onnx_results = onnx_model(
  56. img_list, img_metas=img_meta_list, return_loss=False)[0]
  57. trt_results = trt_model(
  58. img_list, img_metas=img_meta_list, return_loss=False)[0]
  59. if show:
  60. out_file_ort, out_file_trt = None, None
  61. else:
  62. out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png'
  63. show_img = one_meta['show_img']
  64. score_thr = 0.3
  65. onnx_model.show_result(
  66. show_img,
  67. onnx_results,
  68. score_thr=score_thr,
  69. show=True,
  70. win_name='ONNXRuntime',
  71. out_file=out_file_ort)
  72. trt_model.show_result(
  73. show_img,
  74. trt_results,
  75. score_thr=score_thr,
  76. show=True,
  77. win_name='TensorRT',
  78. out_file=out_file_trt)
  79. with_mask = trt_model.with_masks
  80. # compare a part of result
  81. if with_mask:
  82. compare_pairs = list(zip(onnx_results, trt_results))
  83. else:
  84. compare_pairs = [(onnx_results, trt_results)]
  85. err_msg = 'The numerical values are different between Pytorch' + \
  86. ' and ONNX, but it does not necessarily mean the' + \
  87. ' exported ONNX model is problematic.'
  88. # check the numerical value
  89. for onnx_res, pytorch_res in compare_pairs:
  90. for o_res, p_res in zip(onnx_res, pytorch_res):
  91. np.testing.assert_allclose(
  92. o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
  93. print('The numerical values are the same between Pytorch and ONNX')
  94. def parse_normalize_cfg(test_pipeline):
  95. transforms = None
  96. for pipeline in test_pipeline:
  97. if 'transforms' in pipeline:
  98. transforms = pipeline['transforms']
  99. break
  100. assert transforms is not None, 'Failed to find `transforms`'
  101. norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
  102. assert len(norm_config_li) == 1, '`norm_config` should only have one'
  103. norm_config = norm_config_li[0]
  104. return norm_config
  105. def parse_args():
  106. parser = argparse.ArgumentParser(
  107. description='Convert MMDetection models from ONNX to TensorRT')
  108. parser.add_argument('config', help='test config file path')
  109. parser.add_argument('model', help='Filename of input ONNX model')
  110. parser.add_argument(
  111. '--trt-file',
  112. type=str,
  113. default='tmp.trt',
  114. help='Filename of output TensorRT engine')
  115. parser.add_argument(
  116. '--input-img', type=str, default='', help='Image for test')
  117. parser.add_argument(
  118. '--show', action='store_true', help='Whether to show output results')
  119. parser.add_argument(
  120. '--dataset',
  121. type=str,
  122. default='coco',
  123. help='Dataset name. This argument is deprecated and will be \
  124. removed in future releases.')
  125. parser.add_argument(
  126. '--verify',
  127. action='store_true',
  128. help='Verify the outputs of ONNXRuntime and TensorRT')
  129. parser.add_argument(
  130. '--verbose',
  131. action='store_true',
  132. help='Whether to verbose logging messages while creating \
  133. TensorRT engine. Defaults to False.')
  134. parser.add_argument(
  135. '--to-rgb',
  136. action='store_false',
  137. help='Feed model with RGB or BGR image. Default is RGB. This \
  138. argument is deprecated and will be removed in future releases.')
  139. parser.add_argument(
  140. '--shape',
  141. type=int,
  142. nargs='+',
  143. default=[400, 600],
  144. help='Input size of the model')
  145. parser.add_argument(
  146. '--mean',
  147. type=float,
  148. nargs='+',
  149. default=[123.675, 116.28, 103.53],
  150. help='Mean value used for preprocess input data. This argument \
  151. is deprecated and will be removed in future releases.')
  152. parser.add_argument(
  153. '--std',
  154. type=float,
  155. nargs='+',
  156. default=[58.395, 57.12, 57.375],
  157. help='Variance value used for preprocess input data. \
  158. This argument is deprecated and will be removed in future releases.')
  159. parser.add_argument(
  160. '--min-shape',
  161. type=int,
  162. nargs='+',
  163. default=None,
  164. help='Minimum input size of the model in TensorRT')
  165. parser.add_argument(
  166. '--max-shape',
  167. type=int,
  168. nargs='+',
  169. default=None,
  170. help='Maximum input size of the model in TensorRT')
  171. parser.add_argument(
  172. '--workspace-size',
  173. type=int,
  174. default=1,
  175. help='Max workspace size in GiB')
  176. args = parser.parse_args()
  177. return args
  178. if __name__ == '__main__':
  179. assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
  180. args = parse_args()
  181. warnings.warn(
  182. 'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \
  183. parsed directly from config file and are deprecated and will be \
  184. removed in future releases.')
  185. if not args.input_img:
  186. args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.jpg')
  187. cfg = Config.fromfile(args.config)
  188. def parse_shape(shape):
  189. if len(shape) == 1:
  190. shape = (1, 3, shape[0], shape[0])
  191. elif len(args.shape) == 2:
  192. shape = (1, 3) + tuple(shape)
  193. else:
  194. raise ValueError('invalid input shape')
  195. return shape
  196. if args.shape:
  197. input_shape = parse_shape(args.shape)
  198. else:
  199. img_scale = cfg.test_pipeline[1]['img_scale']
  200. input_shape = (1, 3, img_scale[1], img_scale[0])
  201. if not args.max_shape:
  202. max_shape = input_shape
  203. else:
  204. max_shape = parse_shape(args.max_shape)
  205. if not args.min_shape:
  206. min_shape = input_shape
  207. else:
  208. min_shape = parse_shape(args.min_shape)
  209. dataset = DATASETS.get(cfg.data.test['type'])
  210. assert (dataset is not None)
  211. CLASSES = dataset.CLASSES
  212. normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
  213. input_config = {
  214. 'min_shape': min_shape,
  215. 'opt_shape': input_shape,
  216. 'max_shape': max_shape,
  217. 'input_shape': input_shape,
  218. 'input_path': args.input_img,
  219. 'normalize_cfg': normalize_cfg
  220. }
  221. # Create TensorRT engine
  222. onnx2tensorrt(
  223. args.model,
  224. args.trt_file,
  225. input_config,
  226. verify=args.verify,
  227. show=args.show,
  228. workspace_size=args.workspace_size,
  229. verbose=args.verbose)

No Description

Contributors (2)