You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_distribute_train.sh 2.8 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. #!/bin/bash
  2. # Copyright 2020 Huawei Technologies Co., Ltd
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # ============================================================================
  16. echo "=============================================================================================================="
  17. echo "Please run the scipt as: "
  18. echo "sh run_distribute_train.sh DEVICE_NUM EPOCH_SIZE LR DATASET MINDSPORE_HCCL_CONFIG_PATH PRE_TRAINED PRE_TRAINED_EPOCH_SIZE"
  19. echo "for example: sh run_distribute_train.sh 8 500 0.2 coco /data/hccl.json /opt/ssd-300.ckpt(optional) 200(optional)"
  20. echo "It is better to use absolute path."
  21. echo "================================================================================================================="
  22. if [ $# != 5 ] && [ $# != 7 ]
  23. then
  24. echo "Usage: sh run_distribute_train.sh [DEVICE_NUM] [EPOCH_SIZE] [LR] [DATASET] \
  25. [MINDSPORE_HCCL_CONFIG_PATH] [PRE_TRAINED](optional) [PRE_TRAINED_EPOCH_SIZE](optional)"
  26. exit 1
  27. fi
  28. # Before start distribute train, first create mindrecord files.
  29. BASE_PATH=$(cd "`dirname $0`" || exit; pwd)
  30. cd $BASE_PATH/../ || exit
  31. python train.py --only_create_dataset=1
  32. echo "After running the scipt, the network runs in the background. The log will be generated in LOGx/log.txt"
  33. export RANK_SIZE=$1
  34. EPOCH_SIZE=$2
  35. LR=$3
  36. DATASET=$4
  37. PRE_TRAINED=$6
  38. PRE_TRAINED_EPOCH_SIZE=$7
  39. export MINDSPORE_HCCL_CONFIG_PATH=$5
  40. for((i=0;i<RANK_SIZE;i++))
  41. do
  42. export DEVICE_ID=$i
  43. rm -rf LOG$i
  44. mkdir ./LOG$i
  45. cp ./*.py ./LOG$i
  46. cp -r ./src ./LOG$i
  47. cd ./LOG$i || exit
  48. export RANK_ID=$i
  49. echo "start training for rank $i, device $DEVICE_ID"
  50. env > env.log
  51. if [ $# == 5 ]
  52. then
  53. python train.py \
  54. --distribute=1 \
  55. --lr=$LR \
  56. --dataset=$DATASET \
  57. --device_num=$RANK_SIZE \
  58. --device_id=$DEVICE_ID \
  59. --epoch_size=$EPOCH_SIZE > log.txt 2>&1 &
  60. fi
  61. if [ $# == 7 ]
  62. then
  63. python train.py \
  64. --distribute=1 \
  65. --lr=$LR \
  66. --dataset=$DATASET \
  67. --device_num=$RANK_SIZE \
  68. --device_id=$DEVICE_ID \
  69. --pre_trained=$PRE_TRAINED \
  70. --pre_trained_epoch_size=$PRE_TRAINED_EPOCH_SIZE \
  71. --epoch_size=$EPOCH_SIZE > log.txt 2>&1 &
  72. fi
  73. cd ../
  74. done