You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_two_matmul.py 14 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import mindspore as ms
  16. import mindspore.nn as nn
  17. from mindspore import Tensor
  18. from mindspore import context
  19. from mindspore.common.api import _cell_graph_executor
  20. from mindspore.ops import composite as C
  21. from mindspore.ops import operations as P
  22. from tests.ut.python.ops.test_math_ops import VirtualLoss
  23. grad_all = C.GradOperation(get_all=True)
  24. class NetWithLoss(nn.Cell):
  25. def __init__(self, network):
  26. super(NetWithLoss, self).__init__()
  27. self.loss = VirtualLoss()
  28. self.network = network
  29. def construct(self, x, y, b):
  30. predict = self.network(x, y, b)
  31. return self.loss(predict)
  32. class GradWrap(nn.Cell):
  33. def __init__(self, network):
  34. super(GradWrap, self).__init__()
  35. self.network = network
  36. def construct(self, x, y, b):
  37. return grad_all(self.network)(x, y, b)
  38. def compile_net(net, x, y, b):
  39. net.set_auto_parallel()
  40. net.set_train()
  41. _cell_graph_executor.compile(net, x, y, b)
  42. # model_parallel test
  43. def test_two_matmul():
  44. class Net(nn.Cell):
  45. def __init__(self, strategy1, strategy2):
  46. super().__init__()
  47. self.matmul1 = P.MatMul().shard(strategy1)
  48. self.matmul2 = P.MatMul().shard(strategy2)
  49. def construct(self, x, y, b):
  50. out = self.matmul1(x, y)
  51. out = self.matmul2(out, b)
  52. return out
  53. context.set_auto_parallel_context(device_num=8, global_rank=0, gradients_mean=True)
  54. strategy1 = ((4, 2), (2, 1))
  55. strategy2 = ((2, 4), (4, 1))
  56. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  57. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
  58. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  59. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  60. b = Tensor(np.ones([64, 64]), dtype=ms.float32)
  61. compile_net(net, x, y, b)
  62. def test_two_matmul_repeated_calculation1():
  63. class Net(nn.Cell):
  64. def __init__(self, strategy1, strategy2):
  65. super().__init__()
  66. self.matmul1 = P.MatMul().shard(strategy1)
  67. self.matmul2 = P.MatMul().shard(strategy2)
  68. def construct(self, x, y, b):
  69. out = self.matmul1(x, y)
  70. out = self.matmul2(out, b)
  71. return out
  72. context.set_auto_parallel_context(device_num=64, global_rank=5, gradients_mean=True)
  73. strategy1 = ((2, 4), (4, 8))
  74. strategy2 = ((1, 1), (1, 1))
  75. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  76. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
  77. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  78. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  79. b = Tensor(np.ones([64, 64]), dtype=ms.float32)
  80. compile_net(net, x, y, b)
  81. def test_two_matmul_repeated_calculation2():
  82. class Net(nn.Cell):
  83. def __init__(self, strategy1, strategy2):
  84. super().__init__()
  85. self.matmul1 = P.MatMul().shard(strategy1)
  86. self.matmul2 = P.MatMul().shard(strategy2)
  87. def construct(self, x, y, b):
  88. out = self.matmul1(x, y)
  89. out = self.matmul2(out, b)
  90. return out
  91. context.set_auto_parallel_context(device_num=64, global_rank=15)
  92. strategy1 = ((2, 4), (4, 8))
  93. strategy2 = ((2, 2), (2, 1))
  94. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  95. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
  96. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  97. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  98. b = Tensor(np.ones([64, 64]), dtype=ms.float32)
  99. compile_net(net, x, y, b)
  100. def test_matmul_output_strategy_reduce_scatter():
  101. """
  102. Feature: test output strategy for matmul operator
  103. Description: transpose_b is false, set output strategy and use reduce scatter
  104. Expectation: compile success
  105. """
  106. class Net(nn.Cell):
  107. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  108. super().__init__()
  109. self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
  110. self.mul = P.Mul().shard(mul_strategy)
  111. def construct(self, x, y, b):
  112. out = self.matmul(x, y)
  113. out = self.mul(out, b)
  114. return out
  115. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  116. matmul_in_strategy = ((2, 2), (2, 2))
  117. matmul_out_strategy = ((4, 2),)
  118. mul_strategy = ((4, 2), (4, 2))
  119. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  120. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  121. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  122. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  123. compile_net(net, x, y, b)
  124. def test_matmul_output_strategy_reduce_scatter_transpose():
  125. """
  126. Feature: test output strategy for matmul operator
  127. Description: transpose_b is true, set output strategy and use reduce scatter
  128. Expectation: compile success
  129. """
  130. class Net(nn.Cell):
  131. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  132. super().__init__()
  133. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  134. self.mul = P.Mul().shard(mul_strategy)
  135. def construct(self, x, y, b):
  136. out = self.matmul(x, y)
  137. out = self.mul(out, b)
  138. return out
  139. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  140. matmul_in_strategy = ((2, 4), (2, 4))
  141. matmul_out_strategy = ((8, 2),)
  142. mul_strategy = ((8, 2), (8, 2))
  143. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  144. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  145. y = Tensor(np.ones([64, 32]), dtype=ms.float32)
  146. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  147. compile_net(net, x, y, b)
  148. def test_matmul_output_strategy_all_reduce():
  149. """
  150. Feature: test output strategy for matmul operator
  151. Description: transpose_b is false, set output strategy and use all reduce
  152. Expectation: compile success
  153. """
  154. class Net(nn.Cell):
  155. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  156. super().__init__()
  157. self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
  158. self.mul = P.Mul().shard(mul_strategy)
  159. def construct(self, x, y, b):
  160. out = self.matmul(x, y)
  161. out = self.mul(out, b)
  162. return out
  163. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  164. matmul_in_strategy = ((2, 2), (2, 2))
  165. matmul_out_strategy = ((2, 2),)
  166. mul_strategy = ((4, 2), (4, 2))
  167. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  168. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  169. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  170. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  171. compile_net(net, x, y, b)
  172. def test_matmul_output_strategy_all_reduce_transpose():
  173. """
  174. Feature: test output strategy for matmul operator
  175. Description: transpose_b is true, set output strategy and use all reduce
  176. Expectation: compile success
  177. """
  178. class Net(nn.Cell):
  179. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  180. super().__init__()
  181. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  182. self.mul = P.Mul().shard(mul_strategy)
  183. def construct(self, x, y, b):
  184. out = self.matmul(x, y)
  185. out = self.mul(out, b)
  186. return out
  187. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  188. matmul_in_strategy = ((2, 2), (2, 2))
  189. matmul_out_strategy = ((2, 2),)
  190. mul_strategy = ((4, 2), (4, 2))
  191. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  192. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  193. y = Tensor(np.ones([64, 32]), dtype=ms.float32)
  194. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  195. compile_net(net, x, y, b)
  196. def test_matmul_output_strategy_reduce_scatter_repeat_calc():
  197. """
  198. Feature: test output strategy for matmul operator
  199. Description: transpose_b is false, set output strategy use reduce scatter and repeated calculation
  200. Expectation: compile success
  201. """
  202. class Net(nn.Cell):
  203. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  204. super().__init__()
  205. self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
  206. self.mul = P.Mul().shard(mul_strategy)
  207. def construct(self, x, y, b):
  208. out = self.matmul(x, y)
  209. out = self.mul(out, b)
  210. return out
  211. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  212. matmul_in_strategy = ((2, 2), (2, 2))
  213. matmul_out_strategy = ((4, 2),)
  214. mul_strategy = ((4, 2), (4, 2))
  215. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  216. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  217. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  218. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  219. compile_net(net, x, y, b)
  220. def test_matmul_output_strategy_reduce_scatter_transpose_repeat_calc():
  221. """
  222. Feature: test output strategy for matmul operator
  223. Description: transpose_b is true, set output strategy use reduce scatter and repeated calculation
  224. Expectation: compile success
  225. """
  226. class Net(nn.Cell):
  227. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  228. super().__init__()
  229. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  230. self.mul = P.Mul().shard(mul_strategy)
  231. def construct(self, x, y, b):
  232. out = self.matmul(x, y)
  233. out = self.mul(out, b)
  234. return out
  235. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=0)
  236. matmul_in_strategy = ((2, 4), (2, 4))
  237. matmul_out_strategy = ((8, 2),)
  238. mul_strategy = ((8, 2), (8, 2))
  239. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  240. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  241. y = Tensor(np.ones([64, 32]), dtype=ms.float32)
  242. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  243. compile_net(net, x, y, b)
  244. def test_matmul_output_strategy_all_reduce_repeat_calc():
  245. """
  246. Feature: test output strategy for matmul operator
  247. Description: transpose_b is false, set output strategy use all reduce and repeated calculation
  248. Expectation: compile success
  249. """
  250. class Net(nn.Cell):
  251. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  252. super().__init__()
  253. self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
  254. self.mul = P.Mul().shard(mul_strategy)
  255. def construct(self, x, y, b):
  256. out = self.matmul(x, y)
  257. out = self.mul(out, b)
  258. return out
  259. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  260. matmul_in_strategy = ((2, 2), (2, 2))
  261. matmul_out_strategy = ((2, 2),)
  262. mul_strategy = ((4, 2), (4, 2))
  263. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  264. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  265. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  266. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  267. compile_net(net, x, y, b)
  268. def test_matmul_output_strategy_all_reduce_transpose_repeat_calc():
  269. """
  270. Feature: test output strategy for matmul operator
  271. Description: transpose_b is true, set output strategy use all reduce and repeated calculation
  272. Expectation: compile success
  273. """
  274. class Net(nn.Cell):
  275. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  276. super().__init__()
  277. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  278. self.mul = P.Mul().shard(mul_strategy)
  279. def construct(self, x, y, b):
  280. out = self.matmul(x, y)
  281. out = self.mul(out, b)
  282. return out
  283. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  284. matmul_in_strategy = ((2, 2), (2, 2))
  285. matmul_out_strategy = ((2, 2),)
  286. mul_strategy = ((4, 2), (4, 2))
  287. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  288. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  289. y = Tensor(np.ones([64, 32]), dtype=ms.float32)
  290. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  291. compile_net(net, x, y, b)