You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_alltoall_v.py 2.9 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import mindspore as ms
  17. import mindspore.context as context
  18. from mindspore import Tensor, Parameter
  19. import mindspore.nn as nn
  20. from mindspore.common.api import _executor
  21. from mindspore.nn import TrainOneStepCell, Momentum
  22. from mindspore.ops import operations as P
  23. from mindspore.ops.operations._inner_ops import AllToAllv
  24. class MatMulNet(nn.Cell):
  25. def __init__(self, weight1):
  26. super(MatMulNet, self).__init__()
  27. self.matmul = P.MatMul()
  28. self.mul = P.Mul()
  29. self.alltoallv = AllToAllv(send_rank_ids=[0], recv_rank_ids=[1, 2], recv_shapes=([32, 32], [32, 64]),
  30. recv_shapes_backward=([32, 32], [32, 16]), recv_type=ms.float32)
  31. self.weight1 = Parameter(weight1, "w1")
  32. def construct(self, x1, x2):
  33. out = self.matmul(x1, x2)
  34. out = self.mul(out, self.weight1)
  35. out = self.alltoallv((out, x1))
  36. return out[0]
  37. class MatMulNet2(nn.Cell):
  38. def __init__(self, weight1):
  39. super(MatMulNet2, self).__init__()
  40. self.matmul = P.MatMul()
  41. self.mul = P.Mul()
  42. self.alltoallv = AllToAllv(send_rank_ids=[0], recv_rank_ids=[1, 2], recv_shapes=([32, 32], [32, 64]),
  43. recv_shapes_backward=([32, 32],), recv_type=ms.float32)
  44. self.weight1 = Parameter(weight1, "w1")
  45. def construct(self, x1, x2):
  46. out = self.matmul(x1, x2)
  47. out = self.mul(out, self.weight1)
  48. out = self.alltoallv((out,))
  49. return out[0]
  50. _w1 = Tensor(np.ones([32, 32]), dtype=ms.float32)
  51. _x1 = Tensor(np.ones([32, 16]), dtype=ms.float32)
  52. _x2 = Tensor(np.ones([16, 32]), dtype=ms.float32)
  53. def compile_net(net):
  54. context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
  55. optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
  56. train_net = TrainOneStepCell(net, optimizer)
  57. train_net.set_train()
  58. _executor.compile(train_net, _x1, _x2)
  59. def test_AllToAllv_two_inputs():
  60. context.set_auto_parallel_context(device_num=8, global_rank=0)
  61. net = MatMulNet(_w1)
  62. compile_net(net)
  63. def test_AllToAllv_single_input():
  64. context.set_auto_parallel_context(device_num=8, global_rank=0)
  65. net = MatMulNet2(_w1)
  66. compile_net(net)