You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dockerci.sh 3.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. #!/bin/bash
  2. MODELSCOPE_CACHE_DIR_IN_CONTAINER=/modelscope_cache
  3. CODE_DIR=$PWD
  4. CODE_DIR_IN_CONTAINER=/Maas-lib
  5. echo "$USER"
  6. gpus='7 6 5 4 3 2 1 0'
  7. cpu_sets='0-7 8-15 16-23 24-30 31-37 38-44 45-51 52-58'
  8. cpu_sets_arr=($cpu_sets)
  9. is_get_file_lock=false
  10. # export RUN_CASE_COMMAND='python tests/run.py --run_config tests/run_config.yaml'
  11. CI_COMMAND=${CI_COMMAND:-bash .dev_scripts/ci_container_test.sh $RUN_CASE_BASE_COMMAND}
  12. echo "ci command: $CI_COMMAND"
  13. for gpu in $gpus
  14. do
  15. exec {lock_fd}>"/tmp/gpu$gpu" || exit 1
  16. flock -n "$lock_fd" || { echo "WARN: gpu $gpu is in use!" >&2; continue; }
  17. echo "get gpu lock $gpu"
  18. CONTAINER_NAME="modelscope-ci-$gpu"
  19. let is_get_file_lock=true
  20. # pull image if there are update
  21. docker pull ${IMAGE_NAME}:${IMAGE_VERSION}
  22. if [ "$MODELSCOPE_SDK_DEBUG" == "True" ]; then
  23. docker run --rm --name $CONTAINER_NAME --shm-size=16gb \
  24. --cpuset-cpus=${cpu_sets_arr[$gpu]} \
  25. --gpus="device=$gpu" \
  26. -v $CODE_DIR:$CODE_DIR_IN_CONTAINER \
  27. -v $MODELSCOPE_CACHE:$MODELSCOPE_CACHE_DIR_IN_CONTAINER \
  28. -v $MODELSCOPE_HOME_CACHE/$gpu:/root \
  29. -v /home/admin/pre-commit:/home/admin/pre-commit \
  30. -e CI_TEST=True \
  31. -e TEST_LEVEL=$TEST_LEVEL \
  32. -e MODELSCOPE_CACHE=$MODELSCOPE_CACHE_DIR_IN_CONTAINER \
  33. -e MODELSCOPE_DOMAIN=$MODELSCOPE_DOMAIN \
  34. -e MODELSCOPE_SDK_DEBUG=True \
  35. -e HUB_DATASET_ENDPOINT=$HUB_DATASET_ENDPOINT \
  36. -e TEST_ACCESS_TOKEN_CITEST=$TEST_ACCESS_TOKEN_CITEST \
  37. -e TEST_ACCESS_TOKEN_SDKDEV=$TEST_ACCESS_TOKEN_SDKDEV \
  38. -e TEST_LEVEL=$TEST_LEVEL \
  39. -e TEST_UPLOAD_MS_TOKEN=$TEST_UPLOAD_MS_TOKEN \
  40. -e MODEL_TAG_URL=$MODEL_TAG_URL \
  41. --workdir=$CODE_DIR_IN_CONTAINER \
  42. --net host \
  43. ${IMAGE_NAME}:${IMAGE_VERSION} \
  44. $CI_COMMAND
  45. else
  46. docker run --rm --name $CONTAINER_NAME --shm-size=16gb \
  47. --cpuset-cpus=${cpu_sets_arr[$gpu]} \
  48. --gpus="device=$gpu" \
  49. -v $CODE_DIR:$CODE_DIR_IN_CONTAINER \
  50. -v $MODELSCOPE_CACHE:$MODELSCOPE_CACHE_DIR_IN_CONTAINER \
  51. -v $MODELSCOPE_HOME_CACHE/$gpu:/root \
  52. -v /home/admin/pre-commit:/home/admin/pre-commit \
  53. -e CI_TEST=True \
  54. -e TEST_LEVEL=$TEST_LEVEL \
  55. -e MODELSCOPE_CACHE=$MODELSCOPE_CACHE_DIR_IN_CONTAINER \
  56. -e MODELSCOPE_DOMAIN=$MODELSCOPE_DOMAIN \
  57. -e HUB_DATASET_ENDPOINT=$HUB_DATASET_ENDPOINT \
  58. -e TEST_ACCESS_TOKEN_CITEST=$TEST_ACCESS_TOKEN_CITEST \
  59. -e TEST_ACCESS_TOKEN_SDKDEV=$TEST_ACCESS_TOKEN_SDKDEV \
  60. -e TEST_LEVEL=$TEST_LEVEL \
  61. -e TEST_UPLOAD_MS_TOKEN=$TEST_UPLOAD_MS_TOKEN \
  62. -e MODEL_TAG_URL=$MODEL_TAG_URL \
  63. --workdir=$CODE_DIR_IN_CONTAINER \
  64. --net host \
  65. ${IMAGE_NAME}:${IMAGE_VERSION} \
  66. $CI_COMMAND
  67. fi
  68. if [ $? -ne 0 ]; then
  69. echo "Running test case failed, please check the log!"
  70. exit -1
  71. fi
  72. break
  73. done
  74. if [ "$is_get_file_lock" = false ] ; then
  75. echo 'No free GPU!'
  76. exit 1
  77. fi