You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nccl_wrapper.h 2.1 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_DEVICE_GPU_DISTRIBUTION_NCCL_WRAPPER_H_
  17. #define MINDSPORE_CCSRC_DEVICE_GPU_DISTRIBUTION_NCCL_WRAPPER_H_
  18. #include <stdio.h>
  19. #include <stdlib.h>
  20. #include <nccl.h>
  21. #include "device/gpu/distribution/collective_common.h"
  22. namespace mindspore {
  23. namespace device {
  24. namespace gpu {
  25. class NCCLWrapper {
  26. public:
  27. NCCLWrapper(NCCLWrapper const &) = delete;
  28. NCCLWrapper &operator=(const NCCLWrapper &) = delete;
  29. static NCCLWrapper &instance();
  30. ncclUniqueId nccl_unique_id() const;
  31. void set_nccl_unique_id(ncclUniqueId unique_id);
  32. void set_rank(int rank_id, int rank_size);
  33. void InitNCCLComm();
  34. ncclResult_t AllReduce(const void *input_addr, void *output_addr, size_t count, ncclDataType_t datatype,
  35. ncclRedOp_t op, cudaStream_t stream);
  36. ncclResult_t AllGather(const void *input_addr, void *output_addr, size_t count, ncclDataType_t datatype,
  37. cudaStream_t stream);
  38. ncclResult_t ReduceScatter(const void *input_addr, void *output_addr, size_t count, ncclDataType_t datatype,
  39. ncclRedOp_t op, cudaStream_t stream);
  40. private:
  41. NCCLWrapper() : rank_id_(-1), rank_size_(0) {}
  42. ~NCCLWrapper() = default;
  43. private:
  44. int rank_id_;
  45. int rank_size_;
  46. ncclUniqueId unique_id_;
  47. ncclComm_t comm_;
  48. };
  49. } // namespace gpu
  50. } // namespace device
  51. } // namespace mindspore
  52. #endif // MINDSPORE_CCSRC_DEVICE_GPU_DISTRIBUTION_NCCL_WRAPPER_H_