You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nccl_wrapper.cc 2.3 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "device/gpu/distribution/nccl_wrapper.h"
  17. namespace mindspore {
  18. namespace device {
  19. namespace gpu {
  20. NCCLWrapper &NCCLWrapper::instance() {
  21. static NCCLWrapper instance;
  22. return instance;
  23. }
  24. ncclUniqueId NCCLWrapper::nccl_unique_id() const {
  25. ncclUniqueId unique_id;
  26. CHECK_RET(ncclGetUniqueId(&unique_id), ncclSuccess, "Failed to create nccl unique id.");
  27. return unique_id;
  28. }
  29. void NCCLWrapper::set_nccl_unique_id(ncclUniqueId unique_id) { unique_id_ = unique_id; }
  30. void NCCLWrapper::set_rank(int rank_id, int rank_size) {
  31. rank_id_ = rank_id;
  32. rank_size_ = rank_size;
  33. }
  34. void NCCLWrapper::InitNCCLComm() {
  35. CHECK_RET(ncclCommInitRank(&comm_, rank_size_, unique_id_, rank_id_), ncclSuccess,
  36. "Failed to init nccl communicator.");
  37. }
  38. ncclResult_t NCCLWrapper::AllReduce(const void *input_addr, void *output_addr, size_t count, ncclDataType_t data_type,
  39. ncclRedOp_t reduce_type, cudaStream_t stream) {
  40. return ncclAllReduce(input_addr, output_addr, count, data_type, reduce_type, comm_, stream);
  41. }
  42. ncclResult_t NCCLWrapper::AllGather(const void *input_addr, void *output_addr, size_t count, ncclDataType_t data_type,
  43. cudaStream_t stream) {
  44. return ncclAllGather(input_addr, output_addr, count, data_type, comm_, stream);
  45. }
  46. ncclResult_t NCCLWrapper::ReduceScatter(const void *input_addr, void *output_addr, size_t count,
  47. ncclDataType_t data_type, ncclRedOp_t reduce_type, cudaStream_t stream) {
  48. return ncclReduceScatter(input_addr, output_addr, count, data_type, reduce_type, comm_, stream);
  49. }
  50. } // namespace gpu
  51. } // namespace device
  52. } // namespace mindspore