You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_gather_v2.py 9.6 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import mindspore as ms
  17. import mindspore.nn as nn
  18. from mindspore import Tensor
  19. from mindspore import context
  20. from mindspore.common.api import _cell_graph_executor
  21. from mindspore.ops import composite as C
  22. from mindspore.ops import operations as P
  23. from tests.ut.python.ops.test_math_ops import VirtualLoss
  24. grad_all = C.GradOperation(get_all=True)
  25. class NetWithLoss(nn.Cell):
  26. def __init__(self, network):
  27. super(NetWithLoss, self).__init__()
  28. self.loss = VirtualLoss()
  29. self.network = network
  30. def construct(self, x, y):
  31. predict = self.network(x, y)
  32. return self.loss(predict)
  33. class GradWrap(nn.Cell):
  34. def __init__(self, network):
  35. super(GradWrap, self).__init__()
  36. self.network = network
  37. def construct(self, x, y):
  38. return grad_all(self.network)(x, y)
  39. class Net(nn.Cell):
  40. def __init__(self, axis=0, strategy1=None, strategy2=None, shape=None, target=""):
  41. super().__init__()
  42. if shape is None:
  43. shape = [64, 64]
  44. self.gatherv2 = P.Gather().shard(strategy1).add_prim_attr("primitive_target", target)
  45. self.mul = P.Mul().shard(strategy2)
  46. self.index = Tensor(np.ones(shape), dtype=ms.int32)
  47. self.axis = axis
  48. def construct(self, x, y):
  49. out = self.gatherv2(x, self.index, self.axis)
  50. out = self.mul(out, y)
  51. return out
  52. def compile_graph(net, device_num, parallel_mode, x, y):
  53. context.set_auto_parallel_context(device_num=device_num, global_rank=0, parallel_mode=parallel_mode)
  54. net.set_auto_parallel()
  55. net.set_train()
  56. _cell_graph_executor.compile(net, x, y)
  57. def test_gatherv2_semi_auto0():
  58. """
  59. Feature: distribute operator gather in auto parallel.
  60. Description: gather net with strategy in semi auto parallel, gather axis is 0.
  61. Expectation: compile done without error.
  62. """
  63. strategy1 = ((1, 8), (1, 1))
  64. strategy2 = ((4, 2, 1), (4, 2, 1))
  65. net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2)))
  66. x = Tensor(np.ones([64, 64]), dtype=ms.float32)
  67. y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
  68. compile_graph(net, 8, "semi_auto_parallel", x, y)
  69. def test_gatherv2_semi_auto1():
  70. """
  71. Feature: distribute operator gather in auto parallel.
  72. Description: gather net with strategy in semi auto parallel, gather axis is 0.
  73. Expectation: compile done without error.
  74. """
  75. strategy1 = ((8, 1), (1, 1))
  76. strategy2 = ((4, 2, 1), (4, 2, 1))
  77. net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2)))
  78. x = Tensor(np.ones([64, 64]), dtype=ms.float32)
  79. y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
  80. compile_graph(net, 8, "semi_auto_parallel", x, y)
  81. def test_gatherv2_semi_auto2():
  82. """
  83. Feature: distribute operator gather in auto parallel.
  84. Description: gather net with strategy in semi auto parallel, gather axis is 0.
  85. Expectation: compile done without error.
  86. """
  87. strategy1 = ((2, 4), (1, 1))
  88. strategy2 = ((4, 2, 1), (4, 2, 1))
  89. net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2)))
  90. x = Tensor(np.ones([64, 64]), dtype=ms.float32)
  91. y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
  92. compile_graph(net, 8, "semi_auto_parallel", x, y)
  93. def test_gatherv2_semi_auto3():
  94. """
  95. Feature: distribute operator gather in auto parallel.
  96. Description: gather net with strategy in semi auto parallel, gather axis is 1.
  97. Expectation: compile done without error.
  98. """
  99. strategy1 = ((1, 8), (1, 1))
  100. strategy2 = ((4, 2, 1), (4, 2, 1))
  101. net = GradWrap(NetWithLoss(Net(1, strategy1, strategy2)))
  102. x = Tensor(np.ones([64, 64]), dtype=ms.float32)
  103. y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
  104. compile_graph(net, 8, "semi_auto_parallel", x, y)
  105. def test_gatherv2_semi_auto4():
  106. """
  107. Feature: distribute operator gather in auto parallel.
  108. Description: gather net with strategy in semi auto parallel, gather axis is 1.
  109. Expectation: compile done without error.
  110. """
  111. strategy1 = ((8, 1), (1, 1))
  112. strategy2 = ((4, 2, 1), (4, 2, 1))
  113. net = GradWrap(NetWithLoss(Net(1, strategy1, strategy2)))
  114. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  115. y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
  116. compile_graph(net, 8, "semi_auto_parallel", x, y)
  117. def test_gatherv2_semi_auto5():
  118. """
  119. Feature: distribute operator gather in auto parallel.
  120. Description: gather net with strategy in semi auto parallel, gather axis is 1.
  121. Expectation: compile done without error.
  122. """
  123. strategy1 = ((2, 4), (1, 1))
  124. strategy2 = ((4, 2, 1), (4, 2, 1))
  125. net = GradWrap(NetWithLoss(Net(1, strategy1, strategy2)))
  126. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  127. y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
  128. compile_graph(net, 8, "semi_auto_parallel", x, y)
  129. def test_gatherv2_semi_auto6():
  130. """
  131. Feature: distribute operator gather in auto parallel.
  132. Description: gather net with strategy in semi auto parallel, gather axis is 0.
  133. Expectation: compile done without error.
  134. """
  135. strategy2 = ((4, 2, 1), (4, 2, 1))
  136. net = GradWrap(NetWithLoss(Net(0, None, strategy2)))
  137. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  138. y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
  139. compile_graph(net, 8, "semi_auto_parallel", x, y)
  140. def test_gatherv2_semi_auto7():
  141. """
  142. Feature: distribute operator gather in auto parallel.
  143. Description: gather net with strategy in semi auto parallel, gather axis is 1.
  144. Expectation: compile done without error.
  145. """
  146. strategy2 = ((4, 2, 1), (4, 2, 1))
  147. net = GradWrap(NetWithLoss(Net(1, None, strategy2)))
  148. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  149. y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
  150. compile_graph(net, 8, "semi_auto_parallel", x, y)
  151. def test_gatherv2_semi_auto8():
  152. """
  153. Feature: distribute operator gather in auto parallel.
  154. Description: gather net with strategy in semi auto parallel, gather axis is 0.
  155. Expectation: compile done without error.
  156. """
  157. strategy1 = ((8,), (1, 1))
  158. strategy2 = ((4, 2), (4, 2))
  159. net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2)))
  160. x = Tensor(np.ones([64]), dtype=ms.float32)
  161. y = Tensor(np.ones([64, 64]), dtype=ms.float32)
  162. compile_graph(net, 8, "semi_auto_parallel", x, y)
  163. def test_gatherv2_forward_all_reduce():
  164. """
  165. Feature: distribute operator gather in auto parallel.
  166. Description: gather net using forward all_reduce in semi auto parallel, gather axis is 0.
  167. Expectation: compile done without error.
  168. """
  169. strategy1 = ((8, 1), (1, 1))
  170. strategy2 = ((2, 4, 1), (2, 4, 1))
  171. net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2, shape=[2, 64])))
  172. x = Tensor(np.ones([64, 64]), dtype=ms.float32)
  173. y = Tensor(np.ones([2, 64, 64]), dtype=ms.float32)
  174. compile_graph(net, 8, "semi_auto_parallel", x, y)
  175. def test_gatherv2_shard_batch_and_axis():
  176. """
  177. Feature: distribute operator gather in auto parallel.
  178. Description: gather net with batch and axis sharding strategy in semi auto parallel, gather axis is 0.
  179. Expectation: compile done without error.
  180. """
  181. strategy1 = ((4, 1), (2, 1))
  182. strategy2 = ((2, 4, 1), (2, 4, 1))
  183. net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2, shape=[2, 64])))
  184. x = Tensor(np.ones([64, 64]), dtype=ms.float32)
  185. y = Tensor(np.ones([2, 64, 64]), dtype=ms.float32)
  186. compile_graph(net, 8, "semi_auto_parallel", x, y)
  187. def test_gatherv2_split_axis_0_repeat_calc():
  188. """
  189. Feature: distribute operator gather in auto parallel.
  190. Description: gather net with repeat calculate strategy in semi auto parallel, gather axis is 0.
  191. Expectation: compile done without error.
  192. """
  193. strategy1 = ((4, 1), (1, 1))
  194. strategy2 = ((2, 4, 1), (2, 4, 1))
  195. net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2, shape=[2, 64])))
  196. x = Tensor(np.ones([64, 64]), dtype=ms.float32)
  197. y = Tensor(np.ones([2, 64, 64]), dtype=ms.float32)
  198. compile_graph(net, 8, "semi_auto_parallel", x, y)
  199. def test_gatherv2_auto0():
  200. """
  201. Feature: distribute operator gather in auto parallel.
  202. Description: gather net without strategy in auto parallel, gather axis is 0.
  203. Expectation: compile done without error.
  204. """
  205. net = GradWrap(NetWithLoss(Net(0)))
  206. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  207. y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
  208. compile_graph(net, 8, "auto_parallel", x, y)
  209. def test_gatherv2_auto1():
  210. """
  211. Feature: distribute operator gather in auto parallel.
  212. Description: gather net without strategy in auto parallel, gather axis is 1.
  213. Expectation: compile done without error.
  214. """
  215. net = GradWrap(NetWithLoss(Net(1)))
  216. x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  217. y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
  218. compile_graph(net, 8, "auto_parallel", x, y)