You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_two_matmul.py 16 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import pytest
  16. import mindspore as ms
  17. import mindspore.nn as nn
  18. from mindspore import Tensor
  19. from mindspore import context
  20. from mindspore.common.api import _cell_graph_executor
  21. from mindspore.ops import composite as C
  22. from mindspore.ops import operations as P
  23. from tests.ut.python.ops.test_math_ops import VirtualLoss
  24. grad_all = C.GradOperation(get_all=True)
  25. class NetWithLoss(nn.Cell):
  26. def __init__(self, network):
  27. super(NetWithLoss, self).__init__()
  28. self.loss = VirtualLoss()
  29. self.network = network
  30. def construct(self, x, y, b):
  31. predict = self.network(x, y, b)
  32. return self.loss(predict)
  33. class GradWrap(nn.Cell):
  34. def __init__(self, network):
  35. super(GradWrap, self).__init__()
  36. self.network = network
  37. def construct(self, x, y, b):
  38. return grad_all(self.network)(x, y, b)
  39. def compile_net(net, x, y, b):
  40. net.set_auto_parallel()
  41. net.set_train()
  42. _cell_graph_executor.compile(net, x, y, b)
  43. # model_parallel test
  44. def test_two_matmul():
  45. class Net(nn.Cell):
  46. def __init__(self, strategy1, strategy2):
  47. super().__init__()
  48. self.matmul1 = P.MatMul().shard(strategy1)
  49. self.matmul2 = P.MatMul().shard(strategy2)
  50. def construct(self, x, y, b):
  51. out = self.matmul1(x, y)
  52. out = self.matmul2(out, b)
  53. return out
  54. context.set_auto_parallel_context(device_num=8, global_rank=0, gradients_mean=True)
  55. strategy1 = ((4, 2), (2, 1))
  56. strategy2 = ((2, 4), (4, 1))
  57. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  58. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
  59. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  60. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  61. b = Tensor(np.ones([64, 64]), dtype=ms.float32)
  62. compile_net(net, x, y, b)
  63. def test_two_matmul_repeated_calculation1():
  64. class Net(nn.Cell):
  65. def __init__(self, strategy1, strategy2):
  66. super().__init__()
  67. self.matmul1 = P.MatMul().shard(strategy1)
  68. self.matmul2 = P.MatMul().shard(strategy2)
  69. def construct(self, x, y, b):
  70. out = self.matmul1(x, y)
  71. out = self.matmul2(out, b)
  72. return out
  73. context.set_auto_parallel_context(device_num=64, global_rank=5, gradients_mean=True)
  74. strategy1 = ((2, 4), (4, 8))
  75. strategy2 = ((1, 1), (1, 1))
  76. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  77. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
  78. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  79. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  80. b = Tensor(np.ones([64, 64]), dtype=ms.float32)
  81. compile_net(net, x, y, b)
  82. def test_two_matmul_repeated_calculation2():
  83. class Net(nn.Cell):
  84. def __init__(self, strategy1, strategy2):
  85. super().__init__()
  86. self.matmul1 = P.MatMul().shard(strategy1)
  87. self.matmul2 = P.MatMul().shard(strategy2)
  88. def construct(self, x, y, b):
  89. out = self.matmul1(x, y)
  90. out = self.matmul2(out, b)
  91. return out
  92. context.set_auto_parallel_context(device_num=64, global_rank=15)
  93. strategy1 = ((2, 4), (4, 8))
  94. strategy2 = ((2, 2), (2, 1))
  95. net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
  96. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
  97. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  98. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  99. b = Tensor(np.ones([64, 64]), dtype=ms.float32)
  100. compile_net(net, x, y, b)
  101. def test_matmul_output_strategy_reduce_scatter():
  102. """
  103. Feature: test output strategy for matmul operator
  104. Description: transpose_b is false, set output strategy and use reduce scatter
  105. Expectation: compile success
  106. """
  107. class Net(nn.Cell):
  108. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  109. super().__init__()
  110. self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
  111. self.mul = P.Mul().shard(mul_strategy)
  112. def construct(self, x, y, b):
  113. out = self.matmul(x, y)
  114. out = self.mul(out, b)
  115. return out
  116. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  117. matmul_in_strategy = ((2, 2), (2, 2))
  118. matmul_out_strategy = ((4, 2),)
  119. mul_strategy = ((4, 2), (4, 2))
  120. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  121. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  122. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  123. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  124. compile_net(net, x, y, b)
  125. def test_matmul_output_strategy_reduce_scatter_transpose():
  126. """
  127. Feature: test output strategy for matmul operator
  128. Description: transpose_b is true, set output strategy and use reduce scatter
  129. Expectation: compile success
  130. """
  131. class Net(nn.Cell):
  132. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  133. super().__init__()
  134. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  135. self.mul = P.Mul().shard(mul_strategy)
  136. def construct(self, x, y, b):
  137. out = self.matmul(x, y)
  138. out = self.mul(out, b)
  139. return out
  140. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  141. matmul_in_strategy = ((2, 4), (2, 4))
  142. matmul_out_strategy = ((8, 2),)
  143. mul_strategy = ((8, 2), (8, 2))
  144. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  145. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  146. y = Tensor(np.ones([64, 32]), dtype=ms.float32)
  147. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  148. compile_net(net, x, y, b)
  149. def test_matmul_output_strategy_all_reduce():
  150. """
  151. Feature: test output strategy for matmul operator
  152. Description: transpose_b is false, set output strategy and use all reduce
  153. Expectation: compile success
  154. """
  155. class Net(nn.Cell):
  156. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  157. super().__init__()
  158. self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
  159. self.mul = P.Mul().shard(mul_strategy)
  160. def construct(self, x, y, b):
  161. out = self.matmul(x, y)
  162. out = self.mul(out, b)
  163. return out
  164. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  165. matmul_in_strategy = ((2, 2), (2, 2))
  166. matmul_out_strategy = ((2, 2),)
  167. mul_strategy = ((4, 2), (4, 2))
  168. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  169. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  170. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  171. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  172. compile_net(net, x, y, b)
  173. def test_matmul_output_strategy_all_reduce_transpose():
  174. """
  175. Feature: test output strategy for matmul operator
  176. Description: transpose_b is true, set output strategy and use all reduce
  177. Expectation: compile success
  178. """
  179. class Net(nn.Cell):
  180. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  181. super().__init__()
  182. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  183. self.mul = P.Mul().shard(mul_strategy)
  184. def construct(self, x, y, b):
  185. out = self.matmul(x, y)
  186. out = self.mul(out, b)
  187. return out
  188. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  189. matmul_in_strategy = ((2, 2), (2, 2))
  190. matmul_out_strategy = ((2, 2),)
  191. mul_strategy = ((4, 2), (4, 2))
  192. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  193. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  194. y = Tensor(np.ones([64, 32]), dtype=ms.float32)
  195. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  196. compile_net(net, x, y, b)
  197. def test_matmul_output_strategy_reduce_scatter_repeat_calc():
  198. """
  199. Feature: test output strategy for matmul operator
  200. Description: transpose_b is false, set output strategy use reduce scatter and repeated calculation
  201. Expectation: compile success
  202. """
  203. class Net(nn.Cell):
  204. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  205. super().__init__()
  206. self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
  207. self.mul = P.Mul().shard(mul_strategy)
  208. def construct(self, x, y, b):
  209. out = self.matmul(x, y)
  210. out = self.mul(out, b)
  211. return out
  212. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  213. matmul_in_strategy = ((2, 2), (2, 2))
  214. matmul_out_strategy = ((4, 2),)
  215. mul_strategy = ((4, 2), (4, 2))
  216. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  217. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  218. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  219. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  220. compile_net(net, x, y, b)
  221. def test_matmul_output_strategy_reduce_scatter_transpose_repeat_calc():
  222. """
  223. Feature: test output strategy for matmul operator
  224. Description: transpose_b is true, set output strategy use reduce scatter and repeated calculation
  225. Expectation: compile success
  226. """
  227. class Net(nn.Cell):
  228. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  229. super().__init__()
  230. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  231. self.mul = P.Mul().shard(mul_strategy)
  232. def construct(self, x, y, b):
  233. out = self.matmul(x, y)
  234. out = self.mul(out, b)
  235. return out
  236. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=0)
  237. matmul_in_strategy = ((2, 4), (2, 4))
  238. matmul_out_strategy = ((8, 2),)
  239. mul_strategy = ((8, 2), (8, 2))
  240. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  241. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  242. y = Tensor(np.ones([64, 32]), dtype=ms.float32)
  243. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  244. compile_net(net, x, y, b)
  245. def test_matmul_output_strategy_all_reduce_repeat_calc():
  246. """
  247. Feature: test output strategy for matmul operator
  248. Description: transpose_b is false, set output strategy use all reduce and repeated calculation
  249. Expectation: compile success
  250. """
  251. class Net(nn.Cell):
  252. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  253. super().__init__()
  254. self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
  255. self.mul = P.Mul().shard(mul_strategy)
  256. def construct(self, x, y, b):
  257. out = self.matmul(x, y)
  258. out = self.mul(out, b)
  259. return out
  260. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  261. matmul_in_strategy = ((2, 2), (2, 2))
  262. matmul_out_strategy = ((2, 2),)
  263. mul_strategy = ((4, 2), (4, 2))
  264. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  265. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  266. y = Tensor(np.ones([32, 64]), dtype=ms.float32)
  267. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  268. compile_net(net, x, y, b)
  269. def test_matmul_output_strategy_all_reduce_transpose_repeat_calc():
  270. """
  271. Feature: test output strategy for matmul operator
  272. Description: transpose_b is true, set output strategy use all reduce and repeated calculation
  273. Expectation: compile success
  274. """
  275. class Net(nn.Cell):
  276. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  277. super().__init__()
  278. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  279. self.mul = P.Mul().shard(mul_strategy)
  280. def construct(self, x, y, b):
  281. out = self.matmul(x, y)
  282. out = self.mul(out, b)
  283. return out
  284. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  285. matmul_in_strategy = ((2, 2), (2, 2))
  286. matmul_out_strategy = ((2, 2),)
  287. mul_strategy = ((4, 2), (4, 2))
  288. net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  289. x = Tensor(np.ones([128, 32]), dtype=ms.float32)
  290. y = Tensor(np.ones([64, 32]), dtype=ms.float32)
  291. b = Tensor(np.ones([128, 64]), dtype=ms.float32)
  292. compile_net(net, x, y, b)
  293. def test_matmul_in_strategy_not_int():
  294. """
  295. Feature: the type of in_strategy's value is not int
  296. Description:
  297. Expectation: rasise TypeError
  298. """
  299. class Net(nn.Cell):
  300. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  301. super().__init__()
  302. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  303. self.mul = P.Mul().shard(mul_strategy)
  304. def construct(self, x, y, b):
  305. out = self.matmul(x, y)
  306. out = self.mul(out, b)
  307. return out
  308. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  309. matmul_in_strategy = ((2.0, 2), (2, 2))
  310. matmul_out_strategy = ((2, 2),)
  311. mul_strategy = ((4, 2), (4, 2))
  312. with pytest.raises(TypeError):
  313. GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  314. def test_matmul_out_strategy_not_int():
  315. """
  316. Feature: the type of out_strategy's value is not int
  317. Description:
  318. Expectation: rasise TypeError
  319. """
  320. class Net(nn.Cell):
  321. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  322. super().__init__()
  323. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  324. self.mul = P.Mul().shard(mul_strategy)
  325. def construct(self, x, y, b):
  326. out = self.matmul(x, y)
  327. out = self.mul(out, b)
  328. return out
  329. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  330. matmul_in_strategy = ((2, 2), (2, 2))
  331. matmul_out_strategy = ((2.0, 2),)
  332. mul_strategy = ((4, 2), (4, 2))
  333. with pytest.raises(TypeError):
  334. GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
  335. def test_matmul_in_strategy_is_none_and_out_strategy_is_not_none():
  336. """
  337. Feature: the in_strategy is none and out_strategy is not none
  338. Description:
  339. Expectation: rasise ValueError
  340. """
  341. class Net(nn.Cell):
  342. def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
  343. super().__init__()
  344. self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
  345. self.mul = P.Mul().shard(mul_strategy)
  346. def construct(self, x, y, b):
  347. out = self.matmul(x, y)
  348. out = self.mul(out, b)
  349. return out
  350. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  351. matmul_in_strategy = None
  352. matmul_out_strategy = ((2, 2),)
  353. mul_strategy = ((4, 2), (4, 2))
  354. with pytest.raises(ValueError):
  355. GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))