You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_metrics.py 19 kB

6 years ago
6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. import unittest
  2. import numpy as np
  3. import torch
  4. from fastNLP import AccuracyMetric
  5. from fastNLP.core.metrics import _pred_topk, _accuracy_topk
  6. class TestAccuracyMetric(unittest.TestCase):
  7. def test_AccuracyMetric1(self):
  8. # (1) only input, targets passed
  9. pred_dict = {"pred": torch.zeros(4, 3)}
  10. target_dict = {'target': torch.zeros(4)}
  11. metric = AccuracyMetric()
  12. metric(pred_dict=pred_dict, target_dict=target_dict)
  13. print(metric.get_metric())
  14. def test_AccuracyMetric2(self):
  15. # (2) with corrupted size
  16. try:
  17. pred_dict = {"pred": torch.zeros(4, 3, 2)}
  18. target_dict = {'target': torch.zeros(4)}
  19. metric = AccuracyMetric()
  20. metric(pred_dict=pred_dict, target_dict=target_dict, )
  21. print(metric.get_metric())
  22. except Exception as e:
  23. print(e)
  24. return
  25. print("No exception catches.")
  26. def test_AccuracyMetric3(self):
  27. # (3) the second batch is corrupted size
  28. try:
  29. metric = AccuracyMetric()
  30. pred_dict = {"pred": torch.zeros(4, 3, 2)}
  31. target_dict = {'target': torch.zeros(4, 3)}
  32. metric(pred_dict=pred_dict, target_dict=target_dict)
  33. pred_dict = {"pred": torch.zeros(4, 3, 2)}
  34. target_dict = {'target': torch.zeros(4)}
  35. metric(pred_dict=pred_dict, target_dict=target_dict)
  36. print(metric.get_metric())
  37. except Exception as e:
  38. print(e)
  39. return
  40. self.assertTrue(True, False), "No exception catches."
  41. def test_AccuaryMetric4(self):
  42. # (5) check reset
  43. metric = AccuracyMetric()
  44. pred_dict = {"pred": torch.randn(4, 3, 2)}
  45. target_dict = {'target': torch.ones(4, 3)}
  46. metric(pred_dict=pred_dict, target_dict=target_dict)
  47. ans = torch.argmax(pred_dict["pred"], dim=2).to(target_dict["target"]) == target_dict["target"]
  48. res = metric.get_metric()
  49. self.assertTrue(isinstance(res, dict))
  50. self.assertTrue("acc" in res)
  51. self.assertAlmostEqual(res["acc"], float(ans.float().mean()), places=3)
  52. def test_AccuaryMetric5(self):
  53. # (5) check reset
  54. metric = AccuracyMetric()
  55. pred_dict = {"pred": torch.randn(4, 3, 2)}
  56. target_dict = {'target': torch.zeros(4, 3)}
  57. metric(pred_dict=pred_dict, target_dict=target_dict)
  58. res = metric.get_metric(reset=False)
  59. ans = (torch.argmax(pred_dict["pred"], dim=2).float() == target_dict["target"]).float().mean()
  60. self.assertAlmostEqual(res["acc"], float(ans), places=4)
  61. def test_AccuaryMetric6(self):
  62. # (6) check numpy array is not acceptable
  63. try:
  64. metric = AccuracyMetric()
  65. pred_dict = {"pred": np.zeros((4, 3, 2))}
  66. target_dict = {'target': np.zeros((4, 3))}
  67. metric(pred_dict=pred_dict, target_dict=target_dict)
  68. except Exception as e:
  69. print(e)
  70. return
  71. self.assertTrue(True, False), "No exception catches."
  72. def test_AccuaryMetric7(self):
  73. # (7) check map, match
  74. metric = AccuracyMetric(pred='predictions', target='targets')
  75. pred_dict = {"predictions": torch.randn(4, 3, 2)}
  76. target_dict = {'targets': torch.zeros(4, 3)}
  77. metric(pred_dict=pred_dict, target_dict=target_dict)
  78. res = metric.get_metric()
  79. ans = (torch.argmax(pred_dict["predictions"], dim=2).float() == target_dict["targets"]).float().mean()
  80. self.assertAlmostEqual(res["acc"], float(ans), places=4)
  81. def test_AccuaryMetric8(self):
  82. try:
  83. metric = AccuracyMetric(pred='predictions', target='targets')
  84. pred_dict = {"prediction": torch.zeros(4, 3, 2)}
  85. target_dict = {'targets': torch.zeros(4, 3)}
  86. metric(pred_dict=pred_dict, target_dict=target_dict, )
  87. self.assertDictEqual(metric.get_metric(), {'acc': 1})
  88. except Exception as e:
  89. print(e)
  90. return
  91. self.assertTrue(True, False), "No exception catches."
  92. def test_AccuaryMetric9(self):
  93. # (9) check map, include unused
  94. try:
  95. metric = AccuracyMetric(pred='prediction', target='targets')
  96. pred_dict = {"prediction": torch.zeros(4, 3, 2), 'unused': 1}
  97. target_dict = {'targets': torch.zeros(4, 3)}
  98. metric(pred_dict=pred_dict, target_dict=target_dict)
  99. self.assertDictEqual(metric.get_metric(), {'acc': 1})
  100. except Exception as e:
  101. print(e)
  102. return
  103. self.assertTrue(True, False), "No exception catches."
  104. def test_AccuaryMetric10(self):
  105. # (10) check _fast_metric
  106. try:
  107. metric = AccuracyMetric()
  108. pred_dict = {"predictions": torch.zeros(4, 3, 2), "seq_len": torch.ones(3) * 3}
  109. target_dict = {'targets': torch.zeros(4, 3)}
  110. metric(pred_dict=pred_dict, target_dict=target_dict)
  111. self.assertDictEqual(metric.get_metric(), {'acc': 1})
  112. except Exception as e:
  113. print(e)
  114. return
  115. self.assertTrue(True, False), "No exception catches."
  116. def test_seq_len(self):
  117. N = 256
  118. seq_len = torch.zeros(N).long()
  119. seq_len[0] = 2
  120. pred = {'pred': torch.ones(N, 2)}
  121. target = {'target': torch.ones(N, 2), 'seq_len': seq_len}
  122. metric = AccuracyMetric()
  123. metric(pred_dict=pred, target_dict=target)
  124. self.assertDictEqual(metric.get_metric(), {'acc': 1.})
  125. seq_len[1:] = 1
  126. metric(pred_dict=pred, target_dict=target)
  127. self.assertDictEqual(metric.get_metric(), {'acc': 1.})
  128. class SpanF1PreRecMetric(unittest.TestCase):
  129. def test_case1(self):
  130. from fastNLP.core.metrics import _bmes_tag_to_spans
  131. from fastNLP.core.metrics import _bio_tag_to_spans
  132. bmes_lst = ['M-8', 'S-2', 'S-0', 'B-9', 'B-6', 'E-5', 'B-7', 'S-2', 'E-7', 'S-8']
  133. bio_lst = ['O-8', 'O-2', 'B-0', 'O-9', 'I-6', 'I-5', 'I-7', 'I-2', 'I-7', 'O-8']
  134. expect_bmes_res = set()
  135. expect_bmes_res.update([('8', (0, 1)), ('2', (1, 2)), ('0', (2, 3)), ('9', (3, 4)), ('6', (4, 5)),
  136. ('5', (5, 6)), ('7', (6, 7)), ('2', (7, 8)), ('7', (8, 9)), ('8', (9, 10))])
  137. expect_bio_res = set()
  138. expect_bio_res.update([('7', (8, 9)), ('0', (2, 3)), ('2', (7, 8)), ('5', (5, 6)),
  139. ('6', (4, 5)), ('7', (6, 7))])
  140. self.assertSetEqual(expect_bmes_res, set(_bmes_tag_to_spans(bmes_lst)))
  141. self.assertSetEqual(expect_bio_res, set(_bio_tag_to_spans(bio_lst)))
  142. # 已与allennlp对应函数做过验证,但由于测试不能依赖allennlp,所以这里只是截取上面的例子做固定测试
  143. # from allennlp.data.dataset_readers.dataset_utils import bio_tags_to_spans as allen_bio_tags_to_spans
  144. # from allennlp.data.dataset_readers.dataset_utils import bmes_tags_to_spans as allen_bmes_tags_to_spans
  145. # for i in range(1000):
  146. # strs = list(map(str, np.random.randint(100, size=1000)))
  147. # bmes = list('bmes'.upper())
  148. # bmes_strs = [str_ + '-' + tag for tag, str_ in zip(strs, np.random.choice(bmes, size=len(strs)))]
  149. # bio = list('bio'.upper())
  150. # bio_strs = [str_ + '-' + tag for tag, str_ in zip(strs, np.random.choice(bio, size=len(strs)))]
  151. # self.assertSetEqual(set(allen_bmes_tags_to_spans(bmes_strs)),set(bmes_tag_to_spans(bmes_strs)))
  152. # self.assertSetEqual(set(allen_bio_tags_to_spans(bio_strs)), set(bio_tag_to_spans(bio_strs)))
  153. def test_case2(self):
  154. # 测试不带label的
  155. from fastNLP.core.metrics import _bmes_tag_to_spans
  156. from fastNLP.core.metrics import _bio_tag_to_spans
  157. bmes_lst = ['B', 'E', 'B', 'S', 'B', 'M', 'E', 'M', 'B', 'E']
  158. bio_lst = ['I', 'B', 'O', 'O', 'I', 'O', 'I', 'B', 'O', 'O']
  159. expect_bmes_res = set()
  160. expect_bmes_res.update([('', (0, 2)), ('', (2, 3)), ('', (3, 4)), ('', (4, 7)), ('', (7, 8)), ('', (8, 10))])
  161. expect_bio_res = set()
  162. expect_bio_res.update([('', (7, 8)), ('', (6, 7)), ('', (4, 5)), ('', (0, 1)), ('', (1, 2))])
  163. self.assertSetEqual(expect_bmes_res, set(_bmes_tag_to_spans(bmes_lst)))
  164. self.assertSetEqual(expect_bio_res, set(_bio_tag_to_spans(bio_lst)))
  165. # 已与allennlp对应函数做过验证,但由于测试不能依赖allennlp,所以这里只是截取上面的例子做固定测试
  166. # from allennlp.data.dataset_readers.dataset_utils import bio_tags_to_spans as allen_bio_tags_to_spans
  167. # from allennlp.data.dataset_readers.dataset_utils import bmes_tags_to_spans as allen_bmes_tags_to_spans
  168. # for i in range(1000):
  169. # bmes = list('bmes'.upper())
  170. # bmes_strs = np.random.choice(bmes, size=1000)
  171. # bio = list('bio'.upper())
  172. # bio_strs = np.random.choice(bio, size=100)
  173. # self.assertSetEqual(set(allen_bmes_tags_to_spans(bmes_strs)),set(bmes_tag_to_spans(bmes_strs)))
  174. # self.assertSetEqual(set(allen_bio_tags_to_spans(bio_strs)), set(bio_tag_to_spans(bio_strs)))
  175. def tese_case3(self):
  176. from fastNLP.core.vocabulary import Vocabulary
  177. from collections import Counter
  178. from fastNLP.core.metrics import SpanFPreRecMetric
  179. # 与allennlp测试能否正确计算f metric
  180. #
  181. def generate_allen_tags(encoding_type, number_labels=4):
  182. vocab = {}
  183. for i in range(number_labels):
  184. label = str(i)
  185. for tag in encoding_type:
  186. if tag == 'O':
  187. if tag not in vocab:
  188. vocab['O'] = len(vocab) + 1
  189. continue
  190. vocab['{}-{}'.format(tag, label)] = len(vocab) + 1 # 其实表达的是这个的count
  191. return vocab
  192. number_labels = 4
  193. # bio tag
  194. fastnlp_bio_vocab = Vocabulary(unknown=None, padding=None)
  195. fastnlp_bio_vocab.word_count = Counter(generate_allen_tags('BIO', number_labels))
  196. fastnlp_bio_metric = SpanFPreRecMetric(tag_vocab=fastnlp_bio_vocab, only_gross=False)
  197. bio_sequence = torch.FloatTensor(
  198. [[[-0.9543, -1.4357, -0.2365, 0.2438, 1.0312, -1.4302, 0.3011,
  199. 0.0470, 0.0971],
  200. [-0.6638, -0.7116, -1.9804, 0.2787, -0.2732, -0.9501, -1.4523,
  201. 0.7987, -0.3970],
  202. [0.2939, 0.8132, -0.0903, -2.8296, 0.2080, -0.9823, -0.1898,
  203. 0.6880, 1.4348],
  204. [-0.1886, 0.0067, -0.6862, -0.4635, 2.2776, 0.0710, -1.6793,
  205. -1.6876, -0.8917],
  206. [-0.7663, 0.6377, 0.8669, 0.1237, 1.7628, 0.0313, -1.0824,
  207. 1.4217, 0.2622]],
  208. [[0.1529, 0.7474, -0.9037, 1.5287, 0.2771, 0.2223, 0.8136,
  209. 1.3592, -0.8973],
  210. [0.4515, -0.5235, 0.3265, -1.1947, 0.8308, 1.8754, -0.4887,
  211. -0.4025, -0.3417],
  212. [-0.7855, 0.1615, -0.1272, -1.9289, -0.5181, 1.9742, -0.9698,
  213. 0.2861, -0.3966],
  214. [-0.8291, -0.8823, -1.1496, 0.2164, 1.3390, -0.3964, -0.5275,
  215. 0.0213, 1.4777],
  216. [-1.1299, 0.0627, -0.1358, -1.5951, 0.4484, -0.6081, -1.9566,
  217. 1.3024, 0.2001]]]
  218. )
  219. bio_target = torch.LongTensor([[5., 0., 3., 3., 3.],
  220. [5., 6., 8., 6., 0.]])
  221. fastnlp_bio_metric({'pred': bio_sequence, 'seq_lens': torch.LongTensor([5, 5])}, {'target': bio_target})
  222. expect_bio_res = {'pre-1': 0.24999999999999373, 'rec-1': 0.499999999999975, 'f-1': 0.33333333333327775,
  223. 'pre-2': 0.0, 'rec-2': 0.0, 'f-2': 0.0, 'pre-3': 0.0, 'rec-3': 0.0, 'f-3': 0.0, 'pre-0': 0.0,
  224. 'rec-0': 0.0, 'f-0': 0.0, 'pre': 0.12499999999999845, 'rec': 0.12499999999999845,
  225. 'f': 0.12499999999994846}
  226. self.assertDictEqual(expect_bio_res, fastnlp_bio_metric.get_metric())
  227. # bmes tag
  228. bmes_sequence = torch.FloatTensor(
  229. [[[0.6536, -0.7179, 0.6579, 1.2503, 0.4176, 0.6696, 0.2352,
  230. -0.4085, 0.4084, -0.4185, 1.4172, -0.9162, -0.2679, 0.3332,
  231. -0.3505, -0.6002],
  232. [0.3238, -1.2378, -1.3304, -0.4903, 1.4518, -0.1868, -0.7641,
  233. 1.6199, -0.8877, 0.1449, 0.8995, -0.5810, 0.1041, 0.1002,
  234. 0.4439, 0.2514],
  235. [-0.8362, 2.9526, 0.8008, 0.1193, 1.0488, 0.6670, 1.1696,
  236. -1.1006, -0.8540, -0.1600, -0.9519, -0.2749, -0.4948, -1.4753,
  237. 0.5802, -0.0516],
  238. [-0.8383, -1.7292, -1.4079, -1.5023, 0.5383, 0.6653, 0.3121,
  239. 4.1249, -0.4173, -0.2043, 1.7755, 1.1110, -1.7069, -0.0390,
  240. -0.9242, -0.0333],
  241. [0.9088, -0.4955, -0.5076, 0.3732, 0.0283, -0.0263, -1.0393,
  242. 0.7734, 1.0968, 0.4132, -1.3647, -0.5762, 0.6678, 0.8809,
  243. -0.3779, -0.3195]],
  244. [[-0.4638, -0.5939, -0.1052, -0.5573, 0.4600, -1.3484, 0.1753,
  245. 0.0685, 0.3663, -0.6789, 0.0097, 1.0327, -0.0212, -0.9957,
  246. -0.1103, 0.4417],
  247. [-0.2903, 0.9205, -1.5758, -1.0421, 0.2921, -0.2142, -0.3049,
  248. -0.0879, -0.4412, -1.3195, -0.0657, -0.2986, 0.7214, 0.0631,
  249. -0.6386, 0.2797],
  250. [0.6440, -0.3748, 1.2912, -0.0170, 0.7447, 1.4075, -0.4947,
  251. 0.4123, -0.8447, -0.5502, 0.3520, -0.2832, 0.5019, -0.1522,
  252. 1.1237, -1.5385],
  253. [0.2839, -0.7649, 0.9067, -0.1163, -1.3789, 0.2571, -1.3977,
  254. -0.3680, -0.8902, -0.6983, -1.1583, 1.2779, 0.2197, 0.1376,
  255. -0.0591, -0.2461],
  256. [-0.2977, -1.8564, -0.5347, 1.0011, -1.1260, 0.4252, -2.0097,
  257. 2.6973, -0.8308, -1.4939, 0.9865, -0.3935, 0.2743, 0.1142,
  258. -0.7344, -1.2046]]]
  259. )
  260. bmes_target = torch.LongTensor([[9., 6., 1., 9., 15.],
  261. [6., 15., 6., 15., 5.]])
  262. fastnlp_bmes_vocab = Vocabulary(unknown=None, padding=None)
  263. fastnlp_bmes_vocab.word_count = Counter(generate_allen_tags('BMES', number_labels))
  264. fastnlp_bmes_metric = SpanFPreRecMetric(tag_vocab=fastnlp_bmes_vocab, only_gross=False, encoding_type='bmes')
  265. fastnlp_bmes_metric({'pred': bmes_sequence, 'seq_lens': torch.LongTensor([20, 20])}, {'target': bmes_target})
  266. expect_bmes_res = {'f-3': 0.6666666666665778, 'pre-3': 0.499999999999975, 'rec-3': 0.9999999999999001,
  267. 'f-0': 0.0, 'pre-0': 0.0, 'rec-0': 0.0, 'f-1': 0.33333333333327775,
  268. 'pre-1': 0.24999999999999373, 'rec-1': 0.499999999999975, 'f-2': 0.7499999999999314,
  269. 'pre-2': 0.7499999999999812, 'rec-2': 0.7499999999999812, 'f': 0.49999999999994504,
  270. 'pre': 0.499999999999995, 'rec': 0.499999999999995}
  271. self.assertDictEqual(fastnlp_bmes_metric.get_metric(), expect_bmes_res)
  272. # 已经和allennlp做过验证,但由于不能依赖allennlp,所以注释了以下代码
  273. # from allennlp.data.vocabulary import Vocabulary as allen_Vocabulary
  274. # from allennlp.training.metrics import SpanBasedF1Measure
  275. # allen_bio_vocab = allen_Vocabulary({"tags": generate_allen_tags('BIO', number_labels)},
  276. # non_padded_namespaces=['tags'])
  277. # allen_bio_metric = SpanBasedF1Measure(allen_bio_vocab, 'tags')
  278. # bio_sequence = torch.randn(size=(2, 20, 2 * number_labels + 1))
  279. # bio_target = torch.randint(2 * number_labels + 1, size=(2, 20))
  280. # allen_bio_metric(bio_sequence, bio_target, torch.ones(2, 20))
  281. # fastnlp_bio_vocab = Vocabulary(unknown=None, padding=None)
  282. # fastnlp_bio_vocab.word_count = Counter(generate_allen_tags('BIO', number_labels))
  283. # fastnlp_bio_metric = SpanFPreRecMetric(tag_vocab=fastnlp_bio_vocab, only_gross=False)
  284. #
  285. # def convert_allen_res_to_fastnlp_res(metric_result):
  286. # allen_result = {}
  287. # key_map = {'f1-measure-overall': "f", "recall-overall": "rec", "precision-overall": "pre"}
  288. # for key, value in metric_result.items():
  289. # if key in key_map:
  290. # key = key_map[key]
  291. # else:
  292. # label = key.split('-')[-1]
  293. # if key.startswith('f1'):
  294. # key = 'f-{}'.format(label)
  295. # else:
  296. # key = '{}-{}'.format(key[:3], label)
  297. # allen_result[key] = value
  298. # return allen_result
  299. #
  300. # # print(convert_allen_res_to_fastnlp_res(allen_bio_metric.get_metric()))
  301. # # print(fastnlp_bio_metric.get_metric())
  302. # self.assertDictEqual(convert_allen_res_to_fastnlp_res(allen_bio_metric.get_metric()),
  303. # fastnlp_bio_metric.get_metric())
  304. #
  305. # allen_bmes_vocab = allen_Vocabulary({"tags": generate_allen_tags('BMES', number_labels)})
  306. # allen_bmes_metric = SpanBasedF1Measure(allen_bmes_vocab, 'tags', label_encoding='BMES')
  307. # fastnlp_bmes_vocab = Vocabulary(unknown=None, padding=None)
  308. # fastnlp_bmes_vocab.word_count = Counter(generate_allen_tags('BMES', number_labels))
  309. # fastnlp_bmes_metric = SpanFPreRecMetric(tag_vocab=fastnlp_bmes_vocab, only_gross=False, encoding_type='bmes')
  310. # bmes_sequence = torch.randn(size=(2, 20, 4 * number_labels))
  311. # bmes_target = torch.randint(4 * number_labels, size=(2, 20))
  312. # allen_bmes_metric(bmes_sequence, bmes_target, torch.ones(2, 20))
  313. # fastnlp_bmes_metric({'pred': bmes_sequence, 'seq_lens': torch.LongTensor([20, 20])}, {'target': bmes_target})
  314. #
  315. # # print(convert_allen_res_to_fastnlp_res(allen_bmes_metric.get_metric()))
  316. # # print(fastnlp_bmes_metric.get_metric())
  317. # self.assertDictEqual(convert_allen_res_to_fastnlp_res(allen_bmes_metric.get_metric()),
  318. # fastnlp_bmes_metric.get_metric())
  319. class TestUsefulFunctions(unittest.TestCase):
  320. # 测试metrics.py中一些看上去挺有用的函数
  321. def test_case_1(self):
  322. # multi-class
  323. _ = _accuracy_topk(np.random.randint(0, 3, size=(10, 1)), np.random.randint(0, 3, size=(10, 1)), k=3)
  324. _ = _pred_topk(np.random.randint(0, 3, size=(10, 1)))
  325. # 跑通即可