You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_pynative_hccl.py 3.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """test bert thor performance with 8p on mlperf dataset"""
  16. import os
  17. from multiprocessing import Process, Queue
  18. import pytest
  19. import numpy as np
  20. import mindspore.nn as nn
  21. from mindspore import Tensor
  22. from mindspore import dtype as mstype
  23. from mindspore.ops import operations as P
  24. import mindspore.communication.management as D
  25. from mindspore import context
  26. from mindspore.context import ParallelMode
  27. MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
  28. np.random.seed(1)
  29. os.environ['GLOG_v'] = str(2)
  30. class AllReduceNet(nn.Cell):
  31. def __init__(self):
  32. super(AllReduceNet, self).__init__()
  33. self.all_reduce = P.AllReduce()
  34. def construct(self, x):
  35. return self.all_reduce(x)
  36. def train_allreduce_8p(q, device_id, device_num):
  37. os.system("mkdir " + str(device_id))
  38. os.chdir(str(device_id))
  39. context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", device_id=device_id)
  40. os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
  41. os.environ['RANK_ID'] = str(device_id)
  42. os.environ['RANK_SIZE'] = str(device_num)
  43. D.init()
  44. context.reset_auto_parallel_context()
  45. context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
  46. device_num=device_num)
  47. net = AllReduceNet()
  48. input_x = np.ones([32, 255, 255, 3]).astype(np.float32)
  49. except_output = input_x * 8
  50. output = net(Tensor(input_x, mstype.float32))
  51. q.put(np.allclose(output.asnumpy(), except_output))
  52. @pytest.mark.level0
  53. @pytest.mark.platform_arm_ascend_training
  54. @pytest.mark.platform_x86_ascend_training
  55. @pytest.mark.env_single
  56. def test_pynative_hccl_8p():
  57. device_num = 8
  58. process = []
  59. q = Queue()
  60. for i in range(device_num):
  61. device_id = i
  62. process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))
  63. for i in range(device_num):
  64. process[i].start()
  65. print("Waiting for all subprocesses done...")
  66. for i in range(device_num):
  67. process[i].join()
  68. # check result
  69. for i in range(device_num):
  70. assert not q.empty()
  71. assert q.get()
  72. for i in range(device_num):
  73. os.system("rm -rf " + str(i))
  74. print("End training...")
  75. @pytest.mark.level1
  76. @pytest.mark.platform_arm_ascend_training
  77. @pytest.mark.platform_x86_ascend_training
  78. @pytest.mark.env_single
  79. def test_pynative_hccl_8pv2():
  80. os.environ['GRAPH_OP_RUN'] = str(1)
  81. device_num = 8
  82. process = []
  83. q = Queue()
  84. for i in range(device_num):
  85. device_id = i
  86. process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))
  87. for i in range(device_num):
  88. process[i].start()
  89. print("Waiting for all subprocesses done...")
  90. for i in range(device_num):
  91. process[i].join()
  92. # check result
  93. for i in range(device_num):
  94. assert not q.empty()
  95. assert q.get()
  96. for i in range(device_num):
  97. os.system("rm -rf " + str(i))
  98. print("End training...")