You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

eval.py 8.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """eval deeplabv3."""
  16. import os
  17. import argparse
  18. import numpy as np
  19. import cv2
  20. from mindspore import Tensor
  21. import mindspore.common.dtype as mstype
  22. import mindspore.nn as nn
  23. import mindspore.ops as ops
  24. from mindspore import context
  25. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  26. from src.nets import net_factory
  27. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False,
  28. device_id=int(os.getenv('DEVICE_ID')))
  29. def parse_args():
  30. parser = argparse.ArgumentParser('mindspore deeplabv3 eval')
  31. # val data
  32. parser.add_argument('--data_root', type=str, default='', help='root path of val data')
  33. parser.add_argument('--data_lst', type=str, default='', help='list of val data')
  34. parser.add_argument('--batch_size', type=int, default=16, help='batch size')
  35. parser.add_argument('--crop_size', type=int, default=513, help='crop size')
  36. parser.add_argument('--image_mean', type=list, default=[103.53, 116.28, 123.675], help='image mean')
  37. parser.add_argument('--image_std', type=list, default=[57.375, 57.120, 58.395], help='image std')
  38. parser.add_argument('--scales', type=float, action='append', help='scales of evaluation')
  39. parser.add_argument('--flip', action='store_true', help='perform left-right flip')
  40. parser.add_argument('--ignore_label', type=int, default=255, help='ignore label')
  41. parser.add_argument('--num_classes', type=int, default=21, help='number of classes')
  42. # model
  43. parser.add_argument('--model', type=str, default='deeplab_v3_s16', help='select model')
  44. parser.add_argument('--freeze_bn', action='store_true', default=False, help='freeze bn')
  45. parser.add_argument('--ckpt_path', type=str, default='', help='model to evaluate')
  46. parser.add_argument("--input_format", type=str, choices=["NCHW", "NHWC"], default="NCHW",
  47. help="NCHW or NHWC")
  48. args, _ = parser.parse_known_args()
  49. return args
  50. def cal_hist(a, b, n):
  51. k = (a >= 0) & (a < n)
  52. return np.bincount(n * a[k].astype(np.int32) + b[k], minlength=n ** 2).reshape(n, n)
  53. def resize_long(img, long_size=513):
  54. h, w, _ = img.shape
  55. if h > w:
  56. new_h = long_size
  57. new_w = int(1.0 * long_size * w / h)
  58. else:
  59. new_w = long_size
  60. new_h = int(1.0 * long_size * h / w)
  61. imo = cv2.resize(img, (new_w, new_h))
  62. return imo
  63. class BuildEvalNetwork(nn.Cell):
  64. def __init__(self, network, input_format="NCHW"):
  65. super(BuildEvalNetwork, self).__init__()
  66. self.network = network
  67. self.softmax = nn.Softmax(axis=1)
  68. self.transpose = ops.Transpose()
  69. self.format = input_format
  70. def construct(self, input_data):
  71. if self.format == "NHWC":
  72. input_data = self.transpose(input_data, (0, 3, 1, 2))
  73. output = self.network(input_data)
  74. output = self.softmax(output)
  75. return output
  76. def pre_process(args, img_, crop_size=513):
  77. # resize
  78. img_ = resize_long(img_, crop_size)
  79. resize_h, resize_w, _ = img_.shape
  80. # mean, std
  81. image_mean = np.array(args.image_mean)
  82. image_std = np.array(args.image_std)
  83. img_ = (img_ - image_mean) / image_std
  84. # pad to crop_size
  85. pad_h = crop_size - img_.shape[0]
  86. pad_w = crop_size - img_.shape[1]
  87. if pad_h > 0 or pad_w > 0:
  88. img_ = cv2.copyMakeBorder(img_, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=0)
  89. # hwc to chw
  90. img_ = img_.transpose((2, 0, 1))
  91. return img_, resize_h, resize_w
  92. def eval_batch(args, eval_net, img_lst, crop_size=513, flip=True):
  93. result_lst = []
  94. batch_size = len(img_lst)
  95. batch_img = np.zeros((args.batch_size, 3, crop_size, crop_size), dtype=np.float32)
  96. resize_hw = []
  97. for l in range(batch_size):
  98. img_ = img_lst[l]
  99. img_, resize_h, resize_w = pre_process(args, img_, crop_size)
  100. batch_img[l] = img_
  101. resize_hw.append([resize_h, resize_w])
  102. batch_img = np.ascontiguousarray(batch_img)
  103. net_out = eval_net(Tensor(batch_img, mstype.float32))
  104. net_out = net_out.asnumpy()
  105. if flip:
  106. batch_img = batch_img[:, :, :, ::-1]
  107. net_out_flip = eval_net(Tensor(batch_img, mstype.float32))
  108. net_out += net_out_flip.asnumpy()[:, :, :, ::-1]
  109. for bs in range(batch_size):
  110. probs_ = net_out[bs][:, :resize_hw[bs][0], :resize_hw[bs][1]].transpose((1, 2, 0))
  111. ori_h, ori_w = img_lst[bs].shape[0], img_lst[bs].shape[1]
  112. probs_ = cv2.resize(probs_, (ori_w, ori_h))
  113. result_lst.append(probs_)
  114. return result_lst
  115. def eval_batch_scales(args, eval_net, img_lst, scales,
  116. base_crop_size=513, flip=True):
  117. sizes_ = [int((base_crop_size - 1) * sc) + 1 for sc in scales]
  118. probs_lst = eval_batch(args, eval_net, img_lst, crop_size=sizes_[0], flip=flip)
  119. print(sizes_)
  120. for crop_size_ in sizes_[1:]:
  121. probs_lst_tmp = eval_batch(args, eval_net, img_lst, crop_size=crop_size_, flip=flip)
  122. for pl, _ in enumerate(probs_lst):
  123. probs_lst[pl] += probs_lst_tmp[pl]
  124. result_msk = []
  125. for i in probs_lst:
  126. result_msk.append(i.argmax(axis=2))
  127. return result_msk
  128. def net_eval():
  129. args = parse_args()
  130. # data list
  131. with open(args.data_lst) as f:
  132. img_lst = f.readlines()
  133. # network
  134. if args.model == 'deeplab_v3_s16':
  135. network = net_factory.nets_map[args.model]('eval', args.num_classes, 16, args.freeze_bn)
  136. elif args.model == 'deeplab_v3_s8':
  137. network = net_factory.nets_map[args.model]('eval', args.num_classes, 8, args.freeze_bn)
  138. else:
  139. raise NotImplementedError('model [{:s}] not recognized'.format(args.model))
  140. eval_net = BuildEvalNetwork(network, args.input_format)
  141. # load model
  142. param_dict = load_checkpoint(args.ckpt_path)
  143. load_param_into_net(eval_net, param_dict)
  144. eval_net.set_train(False)
  145. # evaluate
  146. hist = np.zeros((args.num_classes, args.num_classes))
  147. batch_img_lst = []
  148. batch_msk_lst = []
  149. bi = 0
  150. image_num = 0
  151. for i, line in enumerate(img_lst):
  152. img_path, msk_path = line.strip().split(' ')
  153. img_path = os.path.join(args.data_root, img_path)
  154. msk_path = os.path.join(args.data_root, msk_path)
  155. img_ = cv2.imread(img_path)
  156. msk_ = cv2.imread(msk_path, cv2.IMREAD_GRAYSCALE)
  157. batch_img_lst.append(img_)
  158. batch_msk_lst.append(msk_)
  159. bi += 1
  160. if bi == args.batch_size:
  161. batch_res = eval_batch_scales(args, eval_net, batch_img_lst, scales=args.scales,
  162. base_crop_size=args.crop_size, flip=args.flip)
  163. for mi in range(args.batch_size):
  164. hist += cal_hist(batch_msk_lst[mi].flatten(), batch_res[mi].flatten(), args.num_classes)
  165. bi = 0
  166. batch_img_lst = []
  167. batch_msk_lst = []
  168. print('processed {} images'.format(i+1))
  169. image_num = i
  170. if bi > 0:
  171. batch_res = eval_batch_scales(args, eval_net, batch_img_lst, scales=args.scales,
  172. base_crop_size=args.crop_size, flip=args.flip)
  173. for mi in range(bi):
  174. hist += cal_hist(batch_msk_lst[mi].flatten(), batch_res[mi].flatten(), args.num_classes)
  175. print('processed {} images'.format(image_num + 1))
  176. print(hist)
  177. iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
  178. print('per-class IoU', iu)
  179. print('mean IoU', np.nanmean(iu))
  180. if __name__ == '__main__':
  181. net_eval()