You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

launch.py 6.2 kB

5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """launch train script"""
  16. import os
  17. import sys
  18. import json
  19. import subprocess
  20. import shutil
  21. from argparse import ArgumentParser
  22. def parse_args():
  23. """
  24. parse args .
  25. Args:
  26. Returns:
  27. args.
  28. Examples:
  29. >>> parse_args()
  30. """
  31. parser = ArgumentParser(description="mindspore distributed training launch "
  32. "helper utilty that will spawn up "
  33. "multiple distributed processes")
  34. parser.add_argument("--nproc_per_node", type=int, default=1,
  35. help="The number of processes to launch on each node, "
  36. "for D training, this is recommended to be set "
  37. "to the number of D in your system so that "
  38. "each process can be bound to a single D.")
  39. parser.add_argument("--visible_devices", type=str, default="0,1,2,3,4,5,6,7",
  40. help="will use the visible devices sequentially")
  41. parser.add_argument("--server_id", type=str, default="",
  42. help="server ip")
  43. parser.add_argument("--training_script", type=str,
  44. help="The full path to the single D training "
  45. "program/script to be launched in parallel, "
  46. "followed by all the arguments for the "
  47. "training script")
  48. # rest from the training program
  49. args, unknown = parser.parse_known_args()
  50. args.training_script_args = unknown
  51. return args
  52. def main():
  53. print("start", __file__)
  54. args = parse_args()
  55. print(args)
  56. visible_devices = args.visible_devices.split(',')
  57. assert os.path.isfile(args.training_script)
  58. assert len(visible_devices) >= args.nproc_per_node
  59. print('visible_devices:{}'.format(visible_devices))
  60. if not args.server_id:
  61. print('pleaser input server ip!!!')
  62. exit(0)
  63. print('server_id:{}'.format(args.server_id))
  64. # construct hccn_table
  65. hccn_configs = open('/etc/hccn.conf', 'r').readlines()
  66. device_ips = {}
  67. for hccn_item in hccn_configs:
  68. hccn_item = hccn_item.strip()
  69. if hccn_item.startswith('address_'):
  70. device_id, device_ip = hccn_item.split('=')
  71. device_id = device_id.split('_')[1]
  72. device_ips[device_id] = device_ip
  73. print('device_id:{}, device_ip:{}'.format(device_id, device_ip))
  74. hccn_table = {}
  75. hccn_table['board_id'] = '0x0000'
  76. hccn_table['chip_info'] = '910'
  77. hccn_table['deploy_mode'] = 'lab'
  78. hccn_table['group_count'] = '1'
  79. hccn_table['group_list'] = []
  80. instance_list = []
  81. usable_dev = ''
  82. for instance_id in range(args.nproc_per_node):
  83. instance = {}
  84. instance['devices'] = []
  85. device_id = visible_devices[instance_id]
  86. device_ip = device_ips[device_id]
  87. usable_dev += str(device_id)
  88. instance['devices'].append({
  89. 'device_id': device_id,
  90. 'device_ip': device_ip,
  91. })
  92. instance['rank_id'] = str(instance_id)
  93. instance['server_id'] = args.server_id
  94. instance_list.append(instance)
  95. hccn_table['group_list'].append({
  96. 'device_num': str(args.nproc_per_node),
  97. 'server_num': '1',
  98. 'group_name': '',
  99. 'instance_count': str(args.nproc_per_node),
  100. 'instance_list': instance_list,
  101. })
  102. hccn_table['para_plane_nic_location'] = 'device'
  103. hccn_table['para_plane_nic_name'] = []
  104. for instance_id in range(args.nproc_per_node):
  105. eth_id = visible_devices[instance_id]
  106. hccn_table['para_plane_nic_name'].append('eth{}'.format(eth_id))
  107. hccn_table['para_plane_nic_num'] = str(args.nproc_per_node)
  108. hccn_table['status'] = 'completed'
  109. # save hccn_table to file
  110. table_path = os.getcwd()
  111. if not os.path.exists(table_path):
  112. os.mkdir(table_path)
  113. table_fn = os.path.join(table_path,
  114. 'rank_table_{}p_{}_{}.json'.format(args.nproc_per_node, usable_dev, args.server_id))
  115. with open(table_fn, 'w') as table_fp:
  116. json.dump(hccn_table, table_fp, indent=4)
  117. sys.stdout.flush()
  118. # spawn the processes
  119. processes = []
  120. cmds = []
  121. log_files = []
  122. env = os.environ.copy()
  123. env['RANK_SIZE'] = str(args.nproc_per_node)
  124. cur_path = os.getcwd()
  125. for rank_id in range(0, args.nproc_per_node):
  126. os.chdir(cur_path)
  127. device_id = visible_devices[rank_id]
  128. device_dir = os.path.join(cur_path, 'device{}'.format(rank_id))
  129. env['RANK_ID'] = str(rank_id)
  130. env['DEVICE_ID'] = str(device_id)
  131. if args.nproc_per_node > 1:
  132. env['MINDSPORE_HCCL_CONFIG_PATH'] = table_fn
  133. env['RANK_TABLE_FILE'] = table_fn
  134. if os.path.exists(device_dir):
  135. shutil.rmtree(device_dir)
  136. os.mkdir(device_dir)
  137. os.chdir(device_dir)
  138. cmd = [sys.executable, '-u']
  139. cmd.append(args.training_script)
  140. cmd.extend(args.training_script_args)
  141. log_file = open('{dir}/log{id}.log'.format(dir=device_dir, id=rank_id), 'w')
  142. process = subprocess.Popen(cmd, stdout=log_file, stderr=log_file, env=env)
  143. processes.append(process)
  144. cmds.append(cmd)
  145. log_files.append(log_file)
  146. for process, cmd, log_file in zip(processes, cmds, log_files):
  147. process.wait()
  148. if process.returncode != 0:
  149. raise subprocess.CalledProcessError(returncode=process, cmd=cmd)
  150. log_file.close()
  151. if __name__ == "__main__":
  152. main()