You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

launch.py 6.0 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """launch train script"""
  16. import os
  17. import sys
  18. import json
  19. from argparse import ArgumentParser
  20. def parse_args():
  21. """
  22. parse args .
  23. Args:
  24. Returns:
  25. args.
  26. Examples:
  27. >>> parse_args()
  28. """
  29. parser = ArgumentParser(description="mindspore distributed training launch "
  30. "helper utilty that will spawn up "
  31. "multiple distributed processes")
  32. parser.add_argument("--nproc_per_node", type=int, default=1,
  33. help="The number of processes to launch on each node, "
  34. "for D training, this is recommended to be set "
  35. "to the number of D in your system so that "
  36. "each process can be bound to a single D.")
  37. parser.add_argument("--visible_devices", type=str, default="0,1,2,3,4,5,6,7",
  38. help="will use the visible devices sequentially")
  39. parser.add_argument("--server_id", type=str, default="",
  40. help="server ip")
  41. parser.add_argument("--training_script", type=str,
  42. help="The full path to the single D training "
  43. "program/script to be launched in parallel, "
  44. "followed by all the arguments for the "
  45. "training script")
  46. # rest from the training program
  47. args, unknown = parser.parse_known_args()
  48. args.training_script_args = unknown
  49. return args
  50. def main():
  51. print("start", __file__)
  52. args = parse_args()
  53. print(args)
  54. visible_devices = args.visible_devices.split(',')
  55. assert os.path.isfile(args.training_script)
  56. assert len(visible_devices) >= args.nproc_per_node
  57. print('visible_devices:{}'.format(visible_devices))
  58. if not args.server_id:
  59. print('pleaser input server ip!!!')
  60. exit(0)
  61. print('server_id:{}'.format(args.server_id))
  62. # construct hccn_table
  63. hccn_configs = open('/etc/hccn.conf', 'r').readlines()
  64. device_ips = {}
  65. for hccn_item in hccn_configs:
  66. hccn_item = hccn_item.strip()
  67. if hccn_item.startswith('address_'):
  68. device_id, device_ip = hccn_item.split('=')
  69. device_id = device_id.split('_')[1]
  70. device_ips[device_id] = device_ip
  71. print('device_id:{}, device_ip:{}'.format(device_id, device_ip))
  72. hccn_table = {}
  73. hccn_table['board_id'] = '0x0000'
  74. hccn_table['chip_info'] = '910'
  75. hccn_table['deploy_mode'] = 'lab'
  76. hccn_table['group_count'] = '1'
  77. hccn_table['group_list'] = []
  78. instance_list = []
  79. usable_dev = ''
  80. for instance_id in range(args.nproc_per_node):
  81. instance = {}
  82. instance['devices'] = []
  83. device_id = visible_devices[instance_id]
  84. device_ip = device_ips[device_id]
  85. usable_dev += str(device_id)
  86. instance['devices'].append({
  87. 'device_id': device_id,
  88. 'device_ip': device_ip,
  89. })
  90. instance['rank_id'] = str(instance_id)
  91. instance['server_id'] = args.server_id
  92. instance_list.append(instance)
  93. hccn_table['group_list'].append({
  94. 'device_num': str(args.nproc_per_node),
  95. 'server_num': '1',
  96. 'group_name': '',
  97. 'instance_count': str(args.nproc_per_node),
  98. 'instance_list': instance_list,
  99. })
  100. hccn_table['para_plane_nic_location'] = 'device'
  101. hccn_table['para_plane_nic_name'] = []
  102. for instance_id in range(args.nproc_per_node):
  103. eth_id = visible_devices[instance_id]
  104. hccn_table['para_plane_nic_name'].append('eth{}'.format(eth_id))
  105. hccn_table['para_plane_nic_num'] = str(args.nproc_per_node)
  106. hccn_table['status'] = 'completed'
  107. # save hccn_table to file
  108. table_path = os.getcwd()
  109. if not os.path.exists(table_path):
  110. os.mkdir(table_path)
  111. table_fn = os.path.join(table_path,
  112. 'rank_table_{}p_{}_{}.json'.format(args.nproc_per_node, usable_dev, args.server_id))
  113. with open(table_fn, 'w') as table_fp:
  114. json.dump(hccn_table, table_fp, indent=4)
  115. sys.stdout.flush()
  116. # spawn the processes
  117. for rank_id in range(0, args.nproc_per_node):
  118. device_id = visible_devices[rank_id]
  119. device_dir = os.path.join(os.getcwd(), 'device{}'.format(rank_id))
  120. rank_process = 'export RANK_SIZE={} && export RANK_ID={} && export DEVICE_ID={} && '.format(args.nproc_per_node,
  121. rank_id, device_id)
  122. if args.nproc_per_node > 1:
  123. rank_process += 'export MINDSPORE_HCCL_CONFIG_PATH={} && '.format(table_fn)
  124. rank_process += 'export RANK_TABLE_FILE={} && '.format(table_fn)
  125. rank_process += 'rm -rf {dir} && mkdir {dir} && cd {dir} && python {script} '.format(dir=device_dir,
  126. script=args.training_script
  127. )
  128. rank_process += ' '.join(args.training_script_args) + ' > log{}.log 2>&1 &'.format(rank_id)
  129. os.system(rank_process)
  130. if __name__ == "__main__":
  131. main()