You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_reshape_shard_propagation.py 14 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import pytest
  16. import mindspore as ms
  17. import mindspore.nn as nn
  18. from mindspore import Tensor, context
  19. from mindspore.common import dtype as mstype
  20. from mindspore.common import Parameter
  21. from mindspore.common.api import _cell_graph_executor
  22. from mindspore.ops import composite as C
  23. from mindspore.ops import operations as P
  24. from mindspore.ops import functional as F
  25. from tests.ut.python.ops.test_math_ops import VirtualLoss
  26. grad_all = C.GradOperation(get_all=True)
  27. class NetWithLoss(nn.Cell):
  28. def __init__(self, network):
  29. super(NetWithLoss, self).__init__()
  30. self.loss = VirtualLoss()
  31. self.network = network
  32. def construct(self, x):
  33. predict = self.network(x)
  34. return self.loss(predict)
  35. class GradWrap(nn.Cell):
  36. def __init__(self, network):
  37. super(GradWrap, self).__init__()
  38. self.network = network
  39. def construct(self, x):
  40. return grad_all(self.network)(x)
  41. class NetWithLossTwoInput(nn.Cell):
  42. def __init__(self, network):
  43. super(NetWithLossTwoInput, self).__init__()
  44. self.loss = VirtualLoss()
  45. self.network = network
  46. def construct(self, x, y):
  47. predict = self.network(x, y)
  48. return self.loss(predict)
  49. class GradWrapTwoInput(nn.Cell):
  50. def __init__(self, network):
  51. super(GradWrapTwoInput, self).__init__()
  52. self.network = network
  53. def construct(self, x, y):
  54. return grad_all(self.network)(x, y)
  55. def compile_graph(net, device_num, x):
  56. context.set_auto_parallel_context(device_num=device_num, global_rank=0, parallel_mode="auto_parallel",
  57. search_mode="sharding_propagation")
  58. net.set_auto_parallel()
  59. net.set_train()
  60. _cell_graph_executor.compile(net, x)
  61. def compile_graph_two_input(net, device_num, x, y):
  62. context.set_auto_parallel_context(device_num=device_num, global_rank=0, parallel_mode="auto_parallel",
  63. search_mode="sharding_propagation")
  64. net.set_auto_parallel()
  65. net.set_train()
  66. _cell_graph_executor.compile(net, x, y)
  67. def test_reshape_reshape():
  68. """
  69. Feature: Sharding propagation for Reshape.
  70. Description: ReLU->Reshape
  71. Expectation: compile done without error.
  72. """
  73. device_num = 8
  74. class Net(nn.Cell):
  75. def __init__(self):
  76. super().__init__()
  77. self.reshape = P.Reshape()
  78. self.relu = P.ReLU().shard(((1, 1, 1, 1),))
  79. def construct(self, x):
  80. x = self.relu(x)
  81. out = self.reshape(x, (64, 28))
  82. out = self.reshape(out, (64, 28, 1))
  83. return out
  84. x = Tensor(np.ones([device_num * 8, 28, 1, 1]), dtype=ms.float32)
  85. net = GradWrap(NetWithLoss(Net()))
  86. compile_graph(net, device_num, x)
  87. def test_reshape_auto_1():
  88. """
  89. Feature: Sharding propagation for Reshape.
  90. Description: ReLU->Reshape->MatMul
  91. Expectation: compile done without error.
  92. """
  93. device_num = 8
  94. class Net(nn.Cell):
  95. def __init__(self):
  96. super().__init__()
  97. self.relu = P.ReLU().shard(((1, 1, 1, 1),))
  98. self.reshape = P.Reshape()
  99. self.matmul = P.MatMul().shard(((2, 1), (1, 4)))
  100. self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight")
  101. def construct(self, x):
  102. x = self.relu(x)
  103. out = self.reshape(x, (64, 28))
  104. out = self.matmul(out, self.matmul_weight)
  105. return out
  106. x = Tensor(np.ones([device_num * 8, 28, 1, 1]), dtype=ms.float32)
  107. net = GradWrap(NetWithLoss(Net()))
  108. compile_graph(net, device_num, x)
  109. def test_reshape_auto_2():
  110. """
  111. Feature: Sharding propagation for Reshape.
  112. Description: ReLU->Reshape->MatMul->Reshape->Add
  113. Expectation: compile done without error.
  114. """
  115. device_num = 8
  116. class Net(nn.Cell):
  117. def __init__(self):
  118. super().__init__()
  119. self.relu = P.ReLU()
  120. self.relu2 = P.ReLU()
  121. self.reshape = P.Reshape()
  122. self.matmul = P.MatMul().shard(((2, 1), (1, 4)))
  123. self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight")
  124. self.add = P.Add().shard(((2, 4), (2, 4)))
  125. self.add_weight = Parameter(Tensor(np.ones([128, 32]), dtype=ms.float32), name="weight1")
  126. def construct(self, x):
  127. out = self.relu(x)
  128. out = self.relu2(out)
  129. out = self.reshape(out, (64, 28))
  130. out = self.matmul(out, self.matmul_weight)
  131. out = self.reshape(out, (128, 32))
  132. out = self.add(out, self.add_weight)
  133. return out
  134. x = Tensor(np.ones([device_num * 8, 28, 1, 1]), dtype=ms.float32)
  135. net = GradWrap(NetWithLoss(Net()))
  136. compile_graph(net, device_num, x)
  137. def test_reshape_auto_3():
  138. """
  139. Feature: Sharding propagation for Reshape.
  140. Description: Mul->Add->Cast->Reshape->Cast->ReduceMean
  141. Expectation: compile done without error.
  142. """
  143. device_num = 8
  144. class Net(nn.Cell):
  145. def __init__(self):
  146. super().__init__()
  147. self.gamma = Parameter(Tensor(np.ones([1024]), dtype=ms.float32), name="gamma")
  148. self.beta = Parameter(Tensor(np.ones([1024]), dtype=ms.float32), name="beta")
  149. self.add = P.TensorAdd().shard(((8, 1, 1), (1,)))
  150. self.mul = P.Mul().shard(((8, 1, 1), (1,)))
  151. self.mean = P.ReduceMean(keep_dims=True).shard(((8, 1),))
  152. self.reshape = P.Reshape()
  153. self.dtype1 = mstype.float16
  154. self.dtype2 = mstype.float32
  155. def construct(self, x):
  156. out = self.add(self.mul(x, self.gamma), self.beta)
  157. out = F.cast(out, self.dtype1)
  158. out = self.reshape(out, (-1, 1024))
  159. out = F.cast(out, self.dtype2)
  160. out = self.mean(out, -1)
  161. return out
  162. x = Tensor(np.ones([2048, 30, 1024]), dtype=ms.float32)
  163. net = GradWrap(NetWithLoss(Net()))
  164. compile_graph(net, device_num, x)
  165. def test_reshape_auto_4():
  166. """
  167. Feature: Sharding propagation for Reshape.
  168. Description: Mul->Add->Cast->Reshape->Cast->ReduceMean
  169. Expectation: compile done without error.
  170. """
  171. device_num = 8
  172. class Net(nn.Cell):
  173. def __init__(self):
  174. super().__init__()
  175. self.gamma = Parameter(Tensor(np.ones([1024]), dtype=ms.float32), name="gamma")
  176. self.beta = Parameter(Tensor(np.ones([1024]), dtype=ms.float32), name="beta")
  177. self.add = P.TensorAdd().shard(((8, 1, 1), (1,)))
  178. self.mul = P.Mul().shard(((8, 1, 1), (1,)))
  179. self.mean = P.ReduceMean(keep_dims=True)
  180. self.reshape = P.Reshape()
  181. self.dtype1 = mstype.float16
  182. self.dtype2 = mstype.float32
  183. def construct(self, x):
  184. out = self.add(self.mul(x, self.gamma), self.beta)
  185. out = F.cast(out, self.dtype1)
  186. out = self.reshape(out, (-1, 1024))
  187. out = F.cast(out, self.dtype2)
  188. out = self.mean(out, -1)
  189. return out
  190. x = Tensor(np.ones([2048, 30, 1024]), dtype=ms.float32)
  191. net = GradWrap(NetWithLoss(Net()))
  192. compile_graph(net, device_num, x)
  193. def test_reshape_auto_5():
  194. """
  195. Feature: Sharding propagation for Reshape.
  196. Description: Mul->Add->Cast->Reshape->Cast->ReduceMean
  197. Expectation: compile done without error.
  198. """
  199. device_num = 8
  200. class Net(nn.Cell):
  201. def __init__(self):
  202. super().__init__()
  203. self.gamma = Parameter(Tensor(np.ones([1024]), dtype=ms.float32), name="gamma")
  204. self.beta = Parameter(Tensor(np.ones([1024]), dtype=ms.float32), name="beta")
  205. self.add = P.TensorAdd().shard(((8, 1, 1), (1,)))
  206. self.mul = P.Mul()
  207. self.mean = P.ReduceMean(keep_dims=True).shard(((2, 4),))
  208. self.reshape = P.Reshape()
  209. self.dtype1 = mstype.float16
  210. self.dtype2 = mstype.float32
  211. def construct(self, x):
  212. out = self.add(self.mul(x, self.gamma), self.beta)
  213. out = self.reshape(out, (-1, 1024))
  214. out = self.mean(out, -1)
  215. return out
  216. x = Tensor(np.ones([2048, 30, 1024]), dtype=ms.float32)
  217. net = GradWrap(NetWithLoss(Net()))
  218. compile_graph(net, device_num, x)
  219. def test_reshape_auto_6():
  220. """
  221. Feature: Sharding propagation for Reshape.
  222. Description: Reshape->ReLU->Mul->Reshape->Add->Mul->Reshape->Add
  223. Expectation: compile done without error.
  224. """
  225. device_num = 8
  226. class Net(nn.Cell):
  227. def __init__(self):
  228. super().__init__()
  229. self.relu = P.ReLU()
  230. self.mul = P.Mul().shard(((8, 1, 1), (8, 1, 1)))
  231. self.reshape = P.Reshape()
  232. self.reduce_sum = P.ReduceSum()
  233. self.wide_w = Parameter(Tensor(np.ones([8, 1024*8, 64]), dtype=ms.float32), name="weight")
  234. def construct(self, x, y):
  235. mask = self.reshape(y, (8, 1024*8, 1))
  236. w_id = self.relu(x)
  237. wx = self.mul(w_id, mask)
  238. wide_out = self.reshape(self.reduce_sum(wx, 1), (-1, 1))
  239. deep_id = x + self.wide_w
  240. vx = self.mul(deep_id, mask)
  241. deep_in = self.reshape(vx, (-1, 1024*8*64))
  242. out = wide_out + deep_in
  243. return out
  244. x = Tensor(np.ones([8, 1024*device_num, 1]), dtype=ms.float32)
  245. y = Tensor(np.ones([8, 1024*device_num]), dtype=ms.float32)
  246. net = GradWrapTwoInput(NetWithLossTwoInput(Net()))
  247. compile_graph_two_input(net, device_num, x, y)
  248. def test_reshape_depend_reshape():
  249. """
  250. Feature: Sharding propagation for Reshape.
  251. Description: Mul->ReLU->Reshape->Reshape->Add
  252. Expectation: compile with error.
  253. """
  254. device_num = 8
  255. class Net(nn.Cell):
  256. def __init__(self):
  257. super().__init__()
  258. self.reshape1 = P.Reshape()
  259. self.reshape2 = P.Reshape()
  260. self.relu = P.ReLU()
  261. self.depend = P.Depend()
  262. self.mul = P.Mul().shard(((2, 4), (2, 4)))
  263. self.mul_weight = Parameter(Tensor(np.ones([128, 96]), dtype=ms.float32), name="weight")
  264. self.add = P.Add().shard(((4, 2), (4, 2)))
  265. def construct(self, x, y):
  266. out1 = self.mul(x, self.mul_weight)
  267. y = self.relu(y)
  268. out2 = self.reshape1(y, (96, 32, 4))
  269. out3 = self.depend(out2, out1)
  270. out3 = self.reshape2(out3, (128, 96))
  271. out = out1 + out3
  272. return out
  273. class NetWithLoss1(nn.Cell):
  274. def __init__(self, network):
  275. super(NetWithLoss1, self).__init__()
  276. self.mean = P.ReduceMean(keep_dims=False)
  277. self.network = network
  278. def construct(self, x, y):
  279. predict = self.network(x, y)
  280. return self.mean(predict, ())
  281. x = Tensor(np.ones([128, 96]), dtype=ms.float32)
  282. y = Tensor(np.ones([256, 48]), dtype=ms.float32)
  283. net = GradWrapTwoInput(NetWithLoss1(Net()))
  284. with pytest.raises(RuntimeError):
  285. compile_graph_two_input(net, device_num, x, y)
  286. def test_reshape_auto_8():
  287. """
  288. Feature: Sharding propagation for common parameter being used by multiple ops.
  289. Description: relu->add->mul->mean
  290. Expectation: compile done without error.
  291. """
  292. device_num = 8
  293. class Net(nn.Cell):
  294. def __init__(self):
  295. super().__init__()
  296. self.gamma = Parameter(Tensor(np.ones([2048, 2048]), dtype=ms.float32), name="gamma")
  297. self.add = P.TensorAdd()
  298. self.relu = P.ReLU().shard(((1, 1),))
  299. self.mul2 = P.MatMul().shard(((1, 1), (1, 8)))
  300. self.mean = P.ReduceMean(keep_dims=True)
  301. def construct(self, x):
  302. out = self.add(x, self.relu(self.gamma))
  303. out = self.mul2(out, self.gamma)
  304. out = self.mean(out, -1)
  305. return out
  306. x = Tensor(np.ones([2048, 2048]), dtype=ms.float32)
  307. net = GradWrap(NetWithLoss(Net()))
  308. with pytest.raises(RuntimeError):
  309. compile_graph(net, device_num, x)
  310. def test_reshape_auto_9():
  311. """
  312. Feature: Sharding propagation for common parameter being used by multiple ops.
  313. Description: relu->add->mul->mean
  314. Expectation: compile done without error.
  315. """
  316. device_num = 8
  317. class Net(nn.Cell):
  318. def __init__(self):
  319. super().__init__()
  320. self.gamma = Parameter(Tensor(np.ones([2048, 2048]), dtype=ms.float32), name="gamma")
  321. self.add = P.TensorAdd()
  322. self.relu = P.ReLU().shard(((1, 1),))
  323. self.mul2 = P.MatMul().shard(((8, 1), (1, 1)))
  324. self.mean = P.ReduceMean(keep_dims=True)
  325. def construct(self, x):
  326. out = self.add(x, self.relu(self.gamma))
  327. out = self.mul2(out, self.gamma)
  328. out = self.mean(out, -1)
  329. return out
  330. x = Tensor(np.ones([2048, 2048]), dtype=ms.float32)
  331. net = GradWrap(NetWithLoss(Net()))
  332. compile_graph(net, device_num, x)