You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

collective_ops_impl.h 2.5 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. /**
  2. * Copyright 2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_FL_SERVER_COLLECTIVE_OPS_IMPL_H_
  17. #define MINDSPORE_CCSRC_FL_SERVER_COLLECTIVE_OPS_IMPL_H_
  18. #include <memory>
  19. #include <string>
  20. #include <vector>
  21. #include <functional>
  22. #include "proto/ps.pb.h"
  23. #include "ps/ps_context.h"
  24. #include "ps/core/server_node.h"
  25. #include "fl/server/common.h"
  26. namespace mindspore {
  27. namespace fl {
  28. namespace server {
  29. // CollectiveOpsImpl is the collective communication API of the server.
  30. // For now, it implements two AllReduce algorithms: RingAllReduce and BroadcastAllReduce. Elastic AllReduce is also
  31. // supported for the elastic scaling feature of the server.
  32. class CollectiveOpsImpl {
  33. public:
  34. static CollectiveOpsImpl &GetInstance() {
  35. static CollectiveOpsImpl instance;
  36. return instance;
  37. }
  38. void Initialize(const std::shared_ptr<ps::core::ServerNode> &server_node);
  39. template <typename T>
  40. bool AllReduce(const void *sendbuff, void *recvbuff, size_t count);
  41. // Reinitialize the ring for collective communication after scaling operations are done.
  42. bool ReInitForScaling();
  43. private:
  44. CollectiveOpsImpl() : server_node_(nullptr), local_rank_(0), server_num_(0) {}
  45. ~CollectiveOpsImpl() = default;
  46. CollectiveOpsImpl(const CollectiveOpsImpl &) = delete;
  47. CollectiveOpsImpl &operator=(const CollectiveOpsImpl &) = delete;
  48. // Implementation of RingAllReduce.
  49. template <typename T>
  50. bool RingAllReduce(const void *sendbuff, void *recvbuff, size_t count);
  51. // Implementation of BroadcastAllReduce.
  52. template <typename T>
  53. bool ReduceBroadcastAllReduce(const void *sendbuff, void *recvbuff, size_t count);
  54. std::shared_ptr<ps::core::ServerNode> server_node_;
  55. uint32_t local_rank_;
  56. uint32_t server_num_;
  57. // The mutex to ensure that collective communication is threadsafe.
  58. std::mutex mtx_;
  59. };
  60. } // namespace server
  61. } // namespace fl
  62. } // namespace mindspore
  63. #endif // MINDSPORE_CCSRC_FL_SERVER_COLLECTIVE_OPS_IMPL_H_