You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_trainer_gpu.py 8.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. # Copyright (c) Alibaba, Inc. and its affiliates.
  2. import glob
  3. import os
  4. import shutil
  5. import tempfile
  6. import unittest
  7. import json
  8. import numpy as np
  9. import torch
  10. from torch import nn
  11. from torch.optim import SGD
  12. from torch.optim.lr_scheduler import StepLR
  13. from modelscope.metrics.builder import MetricKeys
  14. from modelscope.trainers import build_trainer
  15. from modelscope.utils.constant import LogKeys, ModeKeys, ModelFile
  16. from modelscope.utils.test_utils import (DistributedTestCase,
  17. create_dummy_test_dataset, test_level)
  18. class DummyMetric:
  19. def __call__(self, ground_truth, predict_results):
  20. return {'accuracy': 0.5}
  21. dummy_dataset_small = create_dummy_test_dataset(
  22. np.random.random(size=(5, )), np.random.randint(0, 4, (1, )), 20)
  23. dummy_dataset_big = create_dummy_test_dataset(
  24. np.random.random(size=(5, )), np.random.randint(0, 4, (1, )), 40)
  25. class DummyModel(nn.Module):
  26. def __init__(self):
  27. super().__init__()
  28. self.linear = nn.Linear(5, 4)
  29. self.bn = nn.BatchNorm1d(4)
  30. def forward(self, feat, labels):
  31. x = self.linear(feat)
  32. x = self.bn(x)
  33. loss = torch.sum(x)
  34. return dict(logits=x, loss=loss)
  35. def train_func(work_dir, dist=False):
  36. json_cfg = {
  37. 'train': {
  38. 'work_dir': work_dir,
  39. 'dataloader': {
  40. 'batch_size_per_gpu': 2,
  41. 'workers_per_gpu': 1
  42. },
  43. 'hooks': [{
  44. 'type': 'EvaluationHook',
  45. 'interval': 1
  46. }]
  47. },
  48. 'evaluation': {
  49. 'dataloader': {
  50. 'batch_size_per_gpu': 1,
  51. 'workers_per_gpu': 1,
  52. 'shuffle': False
  53. },
  54. 'metrics': ['seq_cls_metric']
  55. }
  56. }
  57. config_path = os.path.join(work_dir, ModelFile.CONFIGURATION)
  58. with open(config_path, 'w') as f:
  59. json.dump(json_cfg, f)
  60. model = DummyModel()
  61. optimmizer = SGD(model.parameters(), lr=0.01)
  62. lr_scheduler = StepLR(optimmizer, 2)
  63. trainer_name = 'EpochBasedTrainer'
  64. kwargs = dict(
  65. cfg_file=config_path,
  66. model=model,
  67. data_collator=None,
  68. train_dataset=dummy_dataset_big,
  69. eval_dataset=dummy_dataset_small,
  70. optimizers=(optimmizer, lr_scheduler),
  71. max_epochs=3,
  72. device='gpu',
  73. launcher='pytorch' if dist else None)
  74. trainer = build_trainer(trainer_name, kwargs)
  75. trainer.train()
  76. @unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest')
  77. class TrainerTestSingleGpu(unittest.TestCase):
  78. def setUp(self):
  79. print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
  80. self.tmp_dir = tempfile.TemporaryDirectory().name
  81. if not os.path.exists(self.tmp_dir):
  82. os.makedirs(self.tmp_dir)
  83. def tearDown(self):
  84. super().tearDown()
  85. shutil.rmtree(self.tmp_dir)
  86. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  87. def test_single_gpu(self):
  88. train_func(self.tmp_dir)
  89. results_files = os.listdir(self.tmp_dir)
  90. json_files = glob.glob(os.path.join(self.tmp_dir, '*.log.json'))
  91. self.assertEqual(len(json_files), 1)
  92. with open(json_files[0], 'r') as f:
  93. lines = [i.strip() for i in f.readlines()]
  94. self.assertDictContainsSubset(
  95. {
  96. LogKeys.MODE: ModeKeys.TRAIN,
  97. LogKeys.EPOCH: 1,
  98. LogKeys.ITER: 10,
  99. LogKeys.LR: 0.01
  100. }, json.loads(lines[0]))
  101. self.assertDictContainsSubset(
  102. {
  103. LogKeys.MODE: ModeKeys.TRAIN,
  104. LogKeys.EPOCH: 1,
  105. LogKeys.ITER: 20,
  106. LogKeys.LR: 0.01
  107. }, json.loads(lines[1]))
  108. self.assertDictContainsSubset(
  109. {
  110. LogKeys.MODE: ModeKeys.EVAL,
  111. LogKeys.EPOCH: 1,
  112. LogKeys.ITER: 20
  113. }, json.loads(lines[2]))
  114. self.assertDictContainsSubset(
  115. {
  116. LogKeys.MODE: ModeKeys.TRAIN,
  117. LogKeys.EPOCH: 2,
  118. LogKeys.ITER: 10,
  119. LogKeys.LR: 0.001
  120. }, json.loads(lines[3]))
  121. self.assertDictContainsSubset(
  122. {
  123. LogKeys.MODE: ModeKeys.TRAIN,
  124. LogKeys.EPOCH: 2,
  125. LogKeys.ITER: 20,
  126. LogKeys.LR: 0.001
  127. }, json.loads(lines[4]))
  128. self.assertDictContainsSubset(
  129. {
  130. LogKeys.MODE: ModeKeys.EVAL,
  131. LogKeys.EPOCH: 2,
  132. LogKeys.ITER: 20
  133. }, json.loads(lines[5]))
  134. self.assertDictContainsSubset(
  135. {
  136. LogKeys.MODE: ModeKeys.TRAIN,
  137. LogKeys.EPOCH: 3,
  138. LogKeys.ITER: 10,
  139. LogKeys.LR: 0.001
  140. }, json.loads(lines[6]))
  141. self.assertDictContainsSubset(
  142. {
  143. LogKeys.MODE: ModeKeys.TRAIN,
  144. LogKeys.EPOCH: 3,
  145. LogKeys.ITER: 20,
  146. LogKeys.LR: 0.001
  147. }, json.loads(lines[7]))
  148. self.assertDictContainsSubset(
  149. {
  150. LogKeys.MODE: ModeKeys.EVAL,
  151. LogKeys.EPOCH: 3,
  152. LogKeys.ITER: 20
  153. }, json.loads(lines[8]))
  154. self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files)
  155. self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files)
  156. self.assertIn(f'{LogKeys.EPOCH}_3.pth', results_files)
  157. for i in [0, 1, 3, 4, 6, 7]:
  158. self.assertIn(LogKeys.DATA_LOAD_TIME, lines[i])
  159. self.assertIn(LogKeys.ITER_TIME, lines[i])
  160. for i in [2, 5, 8]:
  161. self.assertIn(MetricKeys.ACCURACY, lines[i])
  162. @unittest.skipIf(not torch.cuda.is_available()
  163. or torch.cuda.device_count() <= 1, 'distributed unittest')
  164. class TrainerTestMultiGpus(DistributedTestCase):
  165. def setUp(self):
  166. print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
  167. self.tmp_dir = tempfile.TemporaryDirectory().name
  168. if not os.path.exists(self.tmp_dir):
  169. os.makedirs(self.tmp_dir)
  170. def tearDown(self):
  171. super().tearDown()
  172. shutil.rmtree(self.tmp_dir)
  173. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  174. def test_multi_gpus(self):
  175. self.start(train_func, num_gpus=2, work_dir=self.tmp_dir, dist=True)
  176. results_files = os.listdir(self.tmp_dir)
  177. json_files = glob.glob(os.path.join(self.tmp_dir, '*.log.json'))
  178. self.assertEqual(len(json_files), 1)
  179. with open(json_files[0], 'r') as f:
  180. lines = [i.strip() for i in f.readlines()]
  181. self.assertDictContainsSubset(
  182. {
  183. LogKeys.MODE: ModeKeys.TRAIN,
  184. LogKeys.EPOCH: 1,
  185. LogKeys.ITER: 10,
  186. LogKeys.LR: 0.01
  187. }, json.loads(lines[0]))
  188. self.assertDictContainsSubset(
  189. {
  190. LogKeys.MODE: ModeKeys.EVAL,
  191. LogKeys.EPOCH: 1,
  192. LogKeys.ITER: 10
  193. }, json.loads(lines[1]))
  194. self.assertDictContainsSubset(
  195. {
  196. LogKeys.MODE: ModeKeys.TRAIN,
  197. LogKeys.EPOCH: 2,
  198. LogKeys.ITER: 10,
  199. LogKeys.LR: 0.001
  200. }, json.loads(lines[2]))
  201. self.assertDictContainsSubset(
  202. {
  203. LogKeys.MODE: ModeKeys.EVAL,
  204. LogKeys.EPOCH: 2,
  205. LogKeys.ITER: 10
  206. }, json.loads(lines[3]))
  207. self.assertDictContainsSubset(
  208. {
  209. LogKeys.MODE: ModeKeys.TRAIN,
  210. LogKeys.EPOCH: 3,
  211. LogKeys.ITER: 10,
  212. LogKeys.LR: 0.001
  213. }, json.loads(lines[4]))
  214. self.assertDictContainsSubset(
  215. {
  216. LogKeys.MODE: ModeKeys.EVAL,
  217. LogKeys.EPOCH: 3,
  218. LogKeys.ITER: 10
  219. }, json.loads(lines[5]))
  220. self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files)
  221. self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files)
  222. self.assertIn(f'{LogKeys.EPOCH}_3.pth', results_files)
  223. for i in [0, 2, 4]:
  224. self.assertIn(LogKeys.DATA_LOAD_TIME, lines[i])
  225. self.assertIn(LogKeys.ITER_TIME, lines[i])
  226. for i in [1, 3, 5]:
  227. self.assertIn(MetricKeys.ACCURACY, lines[i])
  228. if __name__ == '__main__':
  229. unittest.main()