You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

_image_classification_runner.py 34 kB

5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Image Classification Runner."""
  16. import os
  17. import re
  18. from time import time
  19. import numpy as np
  20. from PIL import Image
  21. import mindspore as ms
  22. import mindspore.dataset as ds
  23. from mindspore import log
  24. from mindspore.dataset.engine.datasets import Dataset
  25. from mindspore.nn import Cell, SequentialCell
  26. from mindspore.ops.operations import ExpandDims
  27. from mindspore.train._utils import check_value_type
  28. from mindspore.train.summary._summary_adapter import _convert_image_format
  29. from mindspore.train.summary.summary_record import SummaryRecord
  30. from mindspore.train.summary_pb2 import Explain
  31. from .benchmark import Localization
  32. from .explanation import RISE
  33. from .benchmark._attribution.metric import AttributionMetric, LabelSensitiveMetric, LabelAgnosticMetric
  34. from .explanation._attribution.attribution import Attribution
  35. _EXPAND_DIMS = ExpandDims()
  36. def _normalize(img_np):
  37. """Normalize the numpy image to the range of [0, 1]. """
  38. max_ = img_np.max()
  39. min_ = img_np.min()
  40. normed = (img_np - min_) / (max_ - min_).clip(min=1e-10)
  41. return normed
  42. def _np_to_image(img_np, mode):
  43. """Convert numpy array to PIL image."""
  44. return Image.fromarray(np.uint8(img_np * 255), mode=mode)
  45. class ImageClassificationRunner:
  46. """
  47. A high-level API for users to generate and store results of the explanation methods and the evaluation methods.
  48. Update in 2020.11: Adjust the storage structure and format of the data. Summary files generated by previous version
  49. will be deprecated and will not be supported in MindInsight of current version.
  50. Args:
  51. summary_dir (str): The directory path to save the summary files which store the generated results.
  52. data (tuple[Dataset, list[str]]): Tuple of dataset and the corresponding class label list. The dataset
  53. should provides [images], [images, labels] or [images, labels, bboxes] as columns. The label list must
  54. share the exact same length and order of the network outputs.
  55. network (Cell): The network(with logit outputs) to be explained.
  56. activation_fn (Cell): The activation layer that transforms logits to prediction probabilities. For
  57. single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification tasks,
  58. `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long as
  59. when combining this function with network, the final output is the probability of the input.
  60. Examples:
  61. >>> from mindspore.explainer import ImageClassificationRunner
  62. >>> from mindspore.explainer.explanation import GuidedBackprop, Gradient
  63. >>> from mindspore.explainer.benchmark import Faithfulness
  64. >>> from mindspore.nn import Softmax
  65. >>> from mindspore.train.serialization import load_checkpoint, load_param_into_net
  66. >>> # Prepare the dataset for explaining and evaluation, e.g., Cifar10
  67. >>> dataset = get_dataset('/path/to/Cifar10_dataset')
  68. >>> labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
  69. >>> # load checkpoint to a network, e.g. checkpoint of resnet50 trained on Cifar10
  70. >>> param_dict = load_checkpoint("checkpoint.ckpt")
  71. >>> net = resnet50(len(labels))
  72. >>> activation_fn = Softmax()
  73. >>> load_param_into_net(net, param_dict)
  74. >>> gbp = GuidedBackprop(net)
  75. >>> gradient = Gradient(net)
  76. >>> explainers = [gbp, gradient]
  77. >>> faithfulness = Faithfulness(len(labels), activation_fn, "NaiveFaithfulness")
  78. >>> benchmarkers = [faithfulness]
  79. >>> runner = ImageClassificationRunner("./summary_dir", (dataset, labels), net, activation_fn)
  80. >>> runner.register_saliency(explainers=explainers, benchmarkers=benchmarkers)
  81. >>> runner.run()
  82. """
  83. # datafile directory names
  84. _DATAFILE_DIRNAME_PREFIX = "_explain_"
  85. _ORIGINAL_IMAGE_DIRNAME = "origin_images"
  86. _HEATMAP_DIRNAME = "heatmap"
  87. # max. no. of sample per directory
  88. _SAMPLE_PER_DIR = 1000
  89. # seed for fixing the iterating order of the dataset
  90. _DATASET_SEED = 58
  91. # printing spacer
  92. _SPACER = "{:120}\r"
  93. # datafile directory's permission
  94. _DIR_MODE = 0o750
  95. # datafile's permission
  96. _FILE_MODE = 0o600
  97. def __init__(self,
  98. summary_dir,
  99. data,
  100. network,
  101. activation_fn):
  102. check_value_type("data", data, tuple)
  103. if len(data) != 2:
  104. raise ValueError("Argument data is not a tuple with 2 elements")
  105. check_value_type("data[0]", data[0], Dataset)
  106. check_value_type("data[1]", data[1], list)
  107. if not all(isinstance(ele, str) for ele in data[1]):
  108. raise ValueError("Argument data[1] is not list of str.")
  109. check_value_type("summary_dir", summary_dir, str)
  110. check_value_type("network", network, Cell)
  111. check_value_type("activation_fn", activation_fn, Cell)
  112. self._summary_dir = summary_dir
  113. self._dataset = data[0]
  114. self._labels = data[1]
  115. self._network = network
  116. self._explainers = None
  117. self._benchmarkers = None
  118. self._summary_timestamp = None
  119. self._sample_index = -1
  120. self._full_network = SequentialCell([self._network, activation_fn])
  121. self._verify_data_n_settings(check_data_n_network=True)
  122. def register_saliency(self,
  123. explainers,
  124. benchmarkers=None):
  125. """
  126. Register saliency explanation instances.
  127. Note:
  128. This function call not be invoked more then once on each runner.
  129. Args:
  130. explainers (list[Attribution]): The explainers to be evaluated,
  131. see `mindspore.explainer.explanation`. All explainers' class must be distinct and their network
  132. must be the exact same instance of the runner's network.
  133. benchmarkers (list[AttributionMetric], optional): The benchmarkers for scoring the explainers,
  134. see `mindspore.explainer.benchmark`. All benchmarkers' class must be distinct.
  135. Raises:
  136. ValueError: Be raised for any data or settings' value problem.
  137. TypeError: Be raised for any data or settings' type problem.
  138. RuntimeError: Be raised if this function was invoked before.
  139. """
  140. check_value_type("explainers", explainers, list)
  141. if not all(isinstance(ele, Attribution) for ele in explainers):
  142. raise TypeError("Argument explainers is not list of mindspore.explainer.explanation .")
  143. if not explainers:
  144. raise ValueError("Argument explainers is empty.")
  145. if benchmarkers:
  146. check_value_type("benchmarkers", benchmarkers, list)
  147. if not all(isinstance(ele, AttributionMetric) for ele in benchmarkers):
  148. raise TypeError("Argument benchmarkers is not list of mindspore.explainer.benchmark .")
  149. if self._explainers is not None:
  150. raise RuntimeError("Function register_saliency() was invoked already.")
  151. self._explainers = explainers
  152. self._benchmarkers = benchmarkers
  153. try:
  154. self._verify_data_n_settings(check_saliency=True)
  155. except (ValueError, TypeError):
  156. self._explainers = None
  157. self._benchmarkers = None
  158. raise
  159. def run(self):
  160. """
  161. Run the explain job and save the result as a summary in summary_dir.
  162. Note:
  163. User should call register_saliency() once before running this function.
  164. Raises:
  165. ValueError: Be raised for any data or settings' value problem.
  166. TypeError: Be raised for any data or settings' type problem.
  167. RuntimeError: Be raised for any runtime problem.
  168. """
  169. self._verify_data_n_settings(check_all=True)
  170. with SummaryRecord(self._summary_dir) as summary:
  171. print("Start running and writing......")
  172. begin = time()
  173. self._summary_timestamp = self._extract_timestamp(summary.event_file_name)
  174. if self._summary_timestamp is None:
  175. raise RuntimeError("Cannot extract timestamp from summary filename!"
  176. " It should contains a timestamp after 'summary.' .")
  177. self._save_metadata(summary)
  178. imageid_labels = self._run_inference(summary)
  179. if self._is_saliency_registered:
  180. self._run_saliency(summary, imageid_labels)
  181. print("Finish running and writing. Total time elapsed: {:.3f} s".format(time() - begin))
  182. @property
  183. def _is_saliency_registered(self):
  184. """Check if saliency module is registered."""
  185. return bool(self._explainers)
  186. def _save_metadata(self, summary):
  187. """Save metadata of the explain job to summary."""
  188. print("Start writing metadata......")
  189. explain = Explain()
  190. explain.metadata.label.extend(self._labels)
  191. if self._is_saliency_registered:
  192. exp_names = [exp.__class__.__name__ for exp in self._explainers]
  193. explain.metadata.explain_method.extend(exp_names)
  194. if self._benchmarkers is not None:
  195. bench_names = [bench.__class__.__name__ for bench in self._benchmarkers]
  196. explain.metadata.benchmark_method.extend(bench_names)
  197. summary.add_value("explainer", "metadata", explain)
  198. summary.record(1)
  199. print("Finish writing metadata.")
  200. def _run_inference(self, summary, threshold=0.5):
  201. """
  202. Run inference for the dataset and write the inference related data into summary.
  203. Args:
  204. summary (SummaryRecord): The summary object to store the data
  205. threshold (float): The threshold for prediction.
  206. Returns:
  207. dict, The map of sample d to the union of its ground truth and predicted labels.
  208. """
  209. sample_id_labels = {}
  210. self._sample_index = 0
  211. ds.config.set_seed(self._DATASET_SEED)
  212. for j, next_element in enumerate(self._dataset):
  213. now = time()
  214. inputs, labels, _ = self._unpack_next_element(next_element)
  215. prob = self._full_network(inputs).asnumpy()
  216. for idx, inp in enumerate(inputs):
  217. gt_labels = labels[idx]
  218. gt_probs = [float(prob[idx][i]) for i in gt_labels]
  219. data_np = _convert_image_format(np.expand_dims(inp.asnumpy(), 0), 'NCHW')
  220. original_image = _np_to_image(_normalize(data_np), mode='RGB')
  221. original_image_path = self._save_original_image(self._sample_index, original_image)
  222. predicted_labels = [int(i) for i in (prob[idx] > threshold).nonzero()[0]]
  223. predicted_probs = [float(prob[idx][i]) for i in predicted_labels]
  224. union_labs = list(set(gt_labels + predicted_labels))
  225. sample_id_labels[str(self._sample_index)] = union_labs
  226. explain = Explain()
  227. explain.sample_id = self._sample_index
  228. explain.image_path = original_image_path
  229. summary.add_value("explainer", "sample", explain)
  230. explain = Explain()
  231. explain.sample_id = self._sample_index
  232. explain.ground_truth_label.extend(gt_labels)
  233. explain.inference.ground_truth_prob.extend(gt_probs)
  234. explain.inference.predicted_label.extend(predicted_labels)
  235. explain.inference.predicted_prob.extend(predicted_probs)
  236. summary.add_value("explainer", "inference", explain)
  237. summary.record(1)
  238. self._sample_index += 1
  239. self._spaced_print("Finish running and writing {}-th batch inference data."
  240. " Time elapsed: {:.3f} s".format(j, time() - now),
  241. end='')
  242. return sample_id_labels
  243. def _run_saliency(self, summary, sample_id_labels):
  244. """Run the saliency explanations."""
  245. if self._benchmarkers is None or not self._benchmarkers:
  246. for exp in self._explainers:
  247. start = time()
  248. print("Start running and writing explanation data for {}......".format(exp.__class__.__name__))
  249. self._sample_index = 0
  250. ds.config.set_seed(self._DATASET_SEED)
  251. for idx, next_element in enumerate(self._dataset):
  252. now = time()
  253. self._spaced_print("Start running {}-th explanation data for {}......".format(
  254. idx, exp.__class__.__name__), end='')
  255. self._run_exp_step(next_element, exp, sample_id_labels, summary)
  256. self._spaced_print("Finish writing {}-th explanation data for {}. Time elapsed: "
  257. "{:.3f} s".format(idx, exp.__class__.__name__, time() - now), end='')
  258. self._spaced_print(
  259. "Finish running and writing explanation data for {}. Time elapsed: {:.3f} s".format(
  260. exp.__class__.__name__, time() - start))
  261. else:
  262. for exp in self._explainers:
  263. explain = Explain()
  264. for bench in self._benchmarkers:
  265. bench.reset()
  266. print(f"Start running and writing explanation and "
  267. f"benchmark data for {exp.__class__.__name__}......")
  268. self._sample_index = 0
  269. start = time()
  270. ds.config.set_seed(self._DATASET_SEED)
  271. for idx, next_element in enumerate(self._dataset):
  272. now = time()
  273. self._spaced_print("Start running {}-th explanation data for {}......".format(
  274. idx, exp.__class__.__name__), end='')
  275. saliency_dict_lst = self._run_exp_step(next_element, exp, sample_id_labels, summary)
  276. self._spaced_print(
  277. "Finish writing {}-th batch explanation data for {}. Time elapsed: {:.3f} s".format(
  278. idx, exp.__class__.__name__, time() - now), end='')
  279. for bench in self._benchmarkers:
  280. now = time()
  281. self._spaced_print(
  282. "Start running {}-th batch {} data for {}......".format(
  283. idx, bench.__class__.__name__, exp.__class__.__name__), end='')
  284. self._run_exp_benchmark_step(next_element, exp, bench, saliency_dict_lst)
  285. self._spaced_print(
  286. "Finish running {}-th batch {} data for {}. Time elapsed: {:.3f} s".format(
  287. idx, bench.__class__.__name__, exp.__class__.__name__, time() - now), end='')
  288. for bench in self._benchmarkers:
  289. benchmark = explain.benchmark.add()
  290. benchmark.explain_method = exp.__class__.__name__
  291. benchmark.benchmark_method = bench.__class__.__name__
  292. benchmark.total_score = bench.performance
  293. if isinstance(bench, LabelSensitiveMetric):
  294. benchmark.label_score.extend(bench.class_performances)
  295. self._spaced_print("Finish running and writing explanation and benchmark data for {}. "
  296. "Time elapsed: {:.3f} s".format(exp.__class__.__name__, time() - start))
  297. summary.add_value('explainer', 'benchmark', explain)
  298. summary.record(1)
  299. def _run_exp_step(self, next_element, explainer, sample_id_labels, summary):
  300. """
  301. Run the explanation for each step and write explanation results into summary.
  302. Args:
  303. next_element (Tuple): Data of one step
  304. explainer (_Attribution): An Attribution object to generate saliency maps.
  305. sample_id_labels (dict): A dict that maps the sample id and its union labels.
  306. summary (SummaryRecord): The summary object to store the data
  307. Returns:
  308. list, List of dict that maps label to its corresponding saliency map.
  309. """
  310. inputs, labels, _ = self._unpack_next_element(next_element)
  311. sample_index = self._sample_index
  312. unions = []
  313. for _ in range(len(labels)):
  314. unions_labels = sample_id_labels[str(sample_index)]
  315. unions.append(unions_labels)
  316. sample_index += 1
  317. batch_unions = self._make_label_batch(unions)
  318. saliency_dict_lst = []
  319. if isinstance(explainer, RISE):
  320. batch_saliency_full = explainer(inputs, batch_unions)
  321. else:
  322. batch_saliency_full = []
  323. for i in range(len(batch_unions[0])):
  324. batch_saliency = explainer(inputs, batch_unions[:, i])
  325. batch_saliency_full.append(batch_saliency)
  326. concat = ms.ops.operations.Concat(1)
  327. batch_saliency_full = concat(tuple(batch_saliency_full))
  328. for idx, union in enumerate(unions):
  329. saliency_dict = {}
  330. explain = Explain()
  331. explain.sample_id = self._sample_index
  332. for k, lab in enumerate(union):
  333. saliency = batch_saliency_full[idx:idx + 1, k:k + 1]
  334. saliency_dict[lab] = saliency
  335. saliency_np = _normalize(saliency.asnumpy().squeeze())
  336. saliency_image = _np_to_image(saliency_np, mode='L')
  337. heatmap_path = self._save_heatmap(explainer.__class__.__name__, lab, self._sample_index, saliency_image)
  338. explanation = explain.explanation.add()
  339. explanation.explain_method = explainer.__class__.__name__
  340. explanation.heatmap_path = heatmap_path
  341. explanation.label = lab
  342. summary.add_value("explainer", "explanation", explain)
  343. summary.record(1)
  344. self._sample_index += 1
  345. saliency_dict_lst.append(saliency_dict)
  346. return saliency_dict_lst
  347. def _run_exp_benchmark_step(self, next_element, explainer, benchmarker, saliency_dict_lst):
  348. """Run the explanation and evaluation for each step and write explanation results into summary."""
  349. inputs, labels, _ = self._unpack_next_element(next_element)
  350. for idx, inp in enumerate(inputs):
  351. inp = _EXPAND_DIMS(inp, 0)
  352. if isinstance(benchmarker, LabelAgnosticMetric):
  353. res = benchmarker.evaluate(explainer, inp)
  354. benchmarker.aggregate(res)
  355. else:
  356. saliency_dict = saliency_dict_lst[idx]
  357. for label, saliency in saliency_dict.items():
  358. if isinstance(benchmarker, Localization):
  359. _, _, bboxes = self._unpack_next_element(next_element, True)
  360. if label in labels[idx]:
  361. res = benchmarker.evaluate(explainer, inp, targets=label, mask=bboxes[idx][label],
  362. saliency=saliency)
  363. benchmarker.aggregate(res, label)
  364. elif isinstance(benchmarker, LabelSensitiveMetric):
  365. res = benchmarker.evaluate(explainer, inp, targets=label, saliency=saliency)
  366. benchmarker.aggregate(res, label)
  367. else:
  368. raise TypeError('Benchmarker must be one of LabelSensitiveMetric or LabelAgnosticMetric, but'
  369. 'receive {}'.format(type(benchmarker)))
  370. def _verify_data(self):
  371. """Verify dataset and labels."""
  372. next_element = next(self._dataset.create_tuple_iterator())
  373. if len(next_element) not in [1, 2, 3]:
  374. raise ValueError("The dataset should provide [images] or [images, labels], [images, labels, bboxes]"
  375. " as columns.")
  376. if len(next_element) == 3:
  377. inputs, labels, bboxes = next_element
  378. if bboxes.shape[-1] != 4:
  379. raise ValueError("The third element of dataset should be bounding boxes with shape of "
  380. "[batch_size, num_ground_truth, 4].")
  381. else:
  382. if self._benchmarkers is not None:
  383. if any([isinstance(bench, Localization) for bench in self._benchmarkers]):
  384. raise ValueError("The dataset must provide bboxes if Localization is to be computed.")
  385. if len(next_element) == 2:
  386. inputs, labels = next_element
  387. if len(next_element) == 1:
  388. inputs = next_element[0]
  389. if len(inputs.shape) > 4 or len(inputs.shape) < 3 or inputs.shape[-3] not in [1, 3, 4]:
  390. raise ValueError(
  391. "Image shape {} is unrecognizable: the dimension of image can only be CHW or NCHW.".format(
  392. inputs.shape))
  393. if len(inputs.shape) == 3:
  394. log.warning(
  395. "Image shape {} is 3-dimensional. All the data will be automatically unsqueezed at the 0-th"
  396. " dimension as batch data.".format(inputs.shape))
  397. if len(next_element) > 1:
  398. if len(labels.shape) > 2 and (np.array(labels.shape[1:]) > 1).sum() > 1:
  399. raise ValueError(
  400. "Labels shape {} is unrecognizable: outputs should not have more than two dimensions"
  401. " with length greater than 1.".format(labels.shape))
  402. def _verify_network(self):
  403. """Verify the network."""
  404. label_set = set()
  405. for i, label in enumerate(self._labels):
  406. if label.strip() == "":
  407. raise ValueError(f"Label [{i}] is all whitespaces or empty. Please make sure there is "
  408. f"no empty label.")
  409. if label in label_set:
  410. raise ValueError(f"Duplicated label:{label}! Please make sure all labels are unique.")
  411. label_set.add(label)
  412. next_element = next(self._dataset.create_tuple_iterator())
  413. inputs, _, _ = self._unpack_next_element(next_element)
  414. prop_test = self._full_network(inputs)
  415. check_value_type("output of network in explainer", prop_test, ms.Tensor)
  416. if prop_test.shape[1] != len(self._labels):
  417. raise ValueError("The dimension of network output does not match the no. of classes. Please "
  418. "check labels or the network in the explainer again.")
  419. def _verify_saliency(self):
  420. """Verify the saliency settings."""
  421. if self._explainers:
  422. explainer_classes = []
  423. for explainer in self._explainers:
  424. if explainer.__class__ in explainer_classes:
  425. raise ValueError(f"Repeated {explainer.__class__.__name__} explainer! "
  426. "Please make sure all explainers' class is distinct.")
  427. if explainer.network is not self._network:
  428. raise ValueError(f"The network of {explainer.__class__.__name__} explainer is different "
  429. "instance from network of runner. Please make sure they are the same "
  430. "instance.")
  431. explainer_classes.append(explainer.__class__)
  432. if self._benchmarkers:
  433. benchmarker_classes = []
  434. for benchmarker in self._benchmarkers:
  435. if benchmarker.__class__ in benchmarker_classes:
  436. raise ValueError(f"Repeated {benchmarker.__class__.__name__} benchmarker! "
  437. "Please make sure all benchmarkers' class is distinct.")
  438. if isinstance(benchmarker, LabelSensitiveMetric) and benchmarker.num_labels != len(self._labels):
  439. raise ValueError(f"The num_labels of {benchmarker.__class__.__name__} benchmarker is different "
  440. "from no. of labels of runner. Please make them are the same.")
  441. benchmarker_classes.append(benchmarker.__class__)
  442. def _verify_data_n_settings(self,
  443. check_all=False,
  444. check_registration=False,
  445. check_data_n_network=False,
  446. check_saliency=False):
  447. """
  448. Verify the validity of dataset and other settings.
  449. Args:
  450. check_all (bool): Set it True for checking everything.
  451. check_registration (bool): Set it True for checking registrations, check if it is enough to invoke run().
  452. check_data_n_network (bool): Set it True for checking data and network.
  453. check_saliency (bool): Set it True for checking saliency related settings.
  454. Raises:
  455. ValueError: Be raised for any data or settings' value problem.
  456. TypeError: Be raised for any data or settings' type problem.
  457. """
  458. if check_all:
  459. check_registration = True
  460. check_data_n_network = True
  461. check_saliency = True
  462. if check_registration:
  463. if not self._is_saliency_registered:
  464. raise ValueError("No explanation module was registered, user should at least call register_saliency()"
  465. " once with proper explanation instances")
  466. if check_data_n_network or check_saliency:
  467. self._verify_data()
  468. if check_data_n_network:
  469. self._verify_network()
  470. if check_saliency:
  471. self._verify_saliency()
  472. def _transform_data(self, inputs, labels, bboxes, ifbbox):
  473. """
  474. Transform the data from one iteration of dataset to a unifying form for the follow-up operations.
  475. Args:
  476. inputs (Tensor): the image data
  477. labels (Tensor): the labels
  478. bboxes (Tensor): the boudnding boxes data
  479. ifbbox (bool): whether to preprocess bboxes. If True, a dictionary that indicates bounding boxes w.r.t
  480. label id will be returned. If False, the returned bboxes is the the parsed bboxes.
  481. Returns:
  482. inputs (Tensor): the image data, unified to a 4D Tensor.
  483. labels (list[list[int]]): the ground truth labels.
  484. bboxes (Union[list[dict], None, Tensor]): the bounding boxes
  485. """
  486. inputs = ms.Tensor(inputs, ms.float32)
  487. if len(inputs.shape) == 3:
  488. inputs = _EXPAND_DIMS(inputs, 0)
  489. if isinstance(labels, ms.Tensor):
  490. labels = ms.Tensor(labels, ms.int32)
  491. labels = _EXPAND_DIMS(labels, 0)
  492. if isinstance(bboxes, ms.Tensor):
  493. bboxes = ms.Tensor(bboxes, ms.int32)
  494. bboxes = _EXPAND_DIMS(bboxes, 0)
  495. input_len = len(inputs)
  496. if bboxes is not None and ifbbox:
  497. bboxes = ms.Tensor(bboxes, ms.int32)
  498. masks_lst = []
  499. labels = labels.asnumpy().reshape([input_len, -1])
  500. bboxes = bboxes.asnumpy().reshape([input_len, -1, 4])
  501. for idx, label in enumerate(labels):
  502. height, width = inputs[idx].shape[-2], inputs[idx].shape[-1]
  503. masks = {}
  504. for j, label_item in enumerate(label):
  505. target = int(label_item)
  506. if -1 < target < len(self._labels):
  507. if target not in masks:
  508. mask = np.zeros((1, 1, height, width))
  509. else:
  510. mask = masks[target]
  511. x_min, y_min, x_len, y_len = bboxes[idx][j].astype(int)
  512. mask[:, :, x_min:x_min + x_len, y_min:y_min + y_len] = 1
  513. masks[target] = mask
  514. masks_lst.append(masks)
  515. bboxes = masks_lst
  516. labels = ms.Tensor(labels, ms.int32)
  517. if len(labels.shape) == 1:
  518. labels_lst = [[int(i)] for i in labels.asnumpy()]
  519. else:
  520. labels = labels.asnumpy().reshape([input_len, -1])
  521. labels_lst = []
  522. for item in labels:
  523. labels_lst.append(list(set(int(i) for i in item if -1 < int(i) < len(self._labels))))
  524. labels = labels_lst
  525. return inputs, labels, bboxes
  526. def _unpack_next_element(self, next_element, ifbbox=False):
  527. """
  528. Unpack a single iteration of dataset.
  529. Args:
  530. next_element (Tuple): a single element iterated from dataset object.
  531. ifbbox (bool): whether to preprocess bboxes in self._transform_data.
  532. Returns:
  533. tuple, a unified Tuple contains image_data, labels, and bounding boxes.
  534. """
  535. if len(next_element) == 3:
  536. inputs, labels, bboxes = next_element
  537. elif len(next_element) == 2:
  538. inputs, labels = next_element
  539. bboxes = None
  540. else:
  541. inputs = next_element[0]
  542. labels = [[] for _ in inputs]
  543. bboxes = None
  544. inputs, labels, bboxes = self._transform_data(inputs, labels, bboxes, ifbbox)
  545. return inputs, labels, bboxes
  546. @staticmethod
  547. def _make_label_batch(labels):
  548. """
  549. Unify a List of List of labels to be a 2D Tensor with shape (b, m), where b = len(labels) and m is the max
  550. length of all the rows in labels.
  551. Args:
  552. labels (List[List]): the union labels of a data batch.
  553. Returns:
  554. 2D Tensor.
  555. """
  556. max_len = max([len(label) for label in labels])
  557. batch_labels = np.zeros((len(labels), max_len))
  558. for idx, _ in enumerate(batch_labels):
  559. length = len(labels[idx])
  560. batch_labels[idx, :length] = np.array(labels[idx])
  561. return ms.Tensor(batch_labels, ms.int32)
  562. def _save_original_image(self, sample_id, image):
  563. """Save an image to summary directory."""
  564. id_dirname = self._get_sample_dirname(sample_id)
  565. path_tokens = [self._summary_dir,
  566. self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp),
  567. self._ORIGINAL_IMAGE_DIRNAME,
  568. id_dirname]
  569. abs_dir_path = self._create_subdir(*path_tokens)
  570. filename = f"{sample_id}.jpg"
  571. save_path = os.path.join(abs_dir_path, filename)
  572. image.save(save_path)
  573. os.chmod(save_path, self._FILE_MODE)
  574. return os.path.join(*path_tokens[1:], filename)
  575. def _save_heatmap(self, explain_method, class_id, sample_id, image):
  576. """Save heatmap image to summary directory."""
  577. id_dirname = self._get_sample_dirname(sample_id)
  578. path_tokens = [self._summary_dir,
  579. self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp),
  580. self._HEATMAP_DIRNAME,
  581. explain_method,
  582. id_dirname]
  583. abs_dir_path = self._create_subdir(*path_tokens)
  584. filename = f"{sample_id}_{class_id}.jpg"
  585. save_path = os.path.join(abs_dir_path, filename)
  586. image.save(save_path, optimize=True)
  587. os.chmod(save_path, self._FILE_MODE)
  588. return os.path.join(*path_tokens[1:], filename)
  589. def _create_subdir(self, *args):
  590. """Recursively create subdirectories."""
  591. abs_path = None
  592. for token in args:
  593. if abs_path is None:
  594. abs_path = os.path.realpath(token)
  595. else:
  596. abs_path = os.path.join(abs_path, token)
  597. # os.makedirs() don't set intermediate dir permission properly, we mkdir() one by one
  598. try:
  599. os.mkdir(abs_path, mode=self._DIR_MODE)
  600. # In some platform, mode may be ignored in os.mkdir(), we have to chmod() again to make sure
  601. os.chmod(abs_path, mode=self._DIR_MODE)
  602. except FileExistsError:
  603. pass
  604. return abs_path
  605. @classmethod
  606. def _get_sample_dirname(cls, sample_id):
  607. """Get the name of parent directory of the image id."""
  608. return str(int(sample_id / cls._SAMPLE_PER_DIR) * cls._SAMPLE_PER_DIR)
  609. @staticmethod
  610. def _extract_timestamp(filename):
  611. """Extract timestamp from summary filename."""
  612. matched = re.search(r"summary\.(\d+)", filename)
  613. if matched:
  614. return int(matched.group(1))
  615. return None
  616. @classmethod
  617. def _spaced_print(cls, message, *args, **kwargs):
  618. """Spaced message printing."""
  619. # workaround to print logs starting new line in case line width mismatch.
  620. print(cls._SPACER.format(message))