You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_virtual_output.py 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import re
  16. import numpy as np
  17. import mindspore as ms
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore import context
  21. from mindspore.common.api import _executor
  22. from mindspore.ops import operations as P
  23. from mindspore.common.parameter import Parameter
  24. context.set_context(mode=context.GRAPH_MODE)
  25. class DenseMutMulNet(nn.Cell):
  26. def __init__(self):
  27. super(DenseMutMulNet, self).__init__()
  28. self.fc1 = nn.Dense(128, 768)
  29. self.fc2 = nn.Dense(128, 768)
  30. self.fc3 = nn.Dense(128, 768)
  31. self.fc4 = nn.Dense(768, 768, has_bias=False)
  32. self.relu4 = nn.ReLU()
  33. self.relu5 = nn.ReLU()
  34. self.transpose = P.Transpose()
  35. self.matmul1 = P.MatMul()
  36. self.matmul2 = P.MatMul()
  37. self.fc4.matmul.shard(((1, 1), (8, 1)))
  38. def construct(self, x):
  39. q = self.fc1(x)
  40. k = self.fc2(x)
  41. v = self.fc3(x)
  42. k = self.transpose(k, (1, 0))
  43. c = self.relu4(self.matmul1(q, k))
  44. s = self.relu5(self.matmul2(c, v))
  45. s = self.fc4(s)
  46. return s
  47. class MulNegTwoOutputNet(nn.Cell):
  48. def __init__(self):
  49. super().__init__()
  50. self.mul = P.Mul().shard(((2, 4), (2, 4)))
  51. self.neg = P.Neg().shard(((2, 4),))
  52. self.mul_weight = Parameter(Tensor(np.ones([32, 128]), dtype=ms.float32), name="weight")
  53. def construct(self, x):
  54. out1 = self.mul(x, self.mul_weight)
  55. out2 = self.neg(out1)
  56. return out1, out2
  57. class ReshapeMatMulNet(nn.Cell):
  58. def __init__(self, strategy1, strategy2):
  59. super().__init__()
  60. self.reshape = P.Reshape()
  61. self.matmul = P.MatMul().shard(strategy2)
  62. self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight")
  63. # x (64, 4, 7)
  64. def construct(self, x):
  65. out = self.reshape(x, (64, 28))
  66. out = self.matmul(out, self.matmul_weight)
  67. return out
  68. class MatMulReshapeNet(nn.Cell):
  69. def __init__(self, strategy1, strategy2):
  70. super().__init__()
  71. self.reshape = P.Reshape()
  72. self.matmul = P.MatMul().shard(strategy1)
  73. self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight")
  74. # x (128, 28)
  75. def construct(self, x):
  76. out = self.matmul(x, self.matmul_weight)
  77. out = self.reshape(out, (64, -1))
  78. return out
  79. class ReshapeMulNet(nn.Cell):
  80. def __init__(self):
  81. super().__init__()
  82. self.reshape = P.Reshape()
  83. self.mul = P.Mul().shard(((1, 2, 4), (2, 4)))
  84. self.mul_weight = Parameter(Tensor(np.ones([128, 96]), dtype=ms.float32), name="weight")
  85. def construct(self, x):
  86. weight = self.reshape(self.mul_weight, (1, 128, 96))
  87. out = self.mul(weight, self.mul_weight)
  88. return out
  89. class ParallelMulNet(nn.Cell):
  90. def __init__(self, dense_in_channel=2048, dense_out_channel=250):
  91. super().__init__()
  92. weight_np = np.full((dense_out_channel, dense_in_channel), 0.01, dtype=np.float32)
  93. bias_np = np.full((dense_out_channel,), 0.01, dtype=np.float32)
  94. self.flat = nn.Flatten()
  95. self.dense = nn.Dense(in_channels=dense_in_channel,
  96. out_channels=dense_out_channel,
  97. weight_init=Tensor(weight_np),
  98. bias_init=Tensor(bias_np),
  99. has_bias=True)
  100. self.mul = P.Mul()
  101. def construct(self, inputs):
  102. x = self.flat(inputs)
  103. x = self.dense(x)
  104. x = self.mul(x, x)
  105. return x
  106. def compile_graph(x, net):
  107. net.set_auto_parallel()
  108. net.set_train(False)
  109. _executor.compile(net, x, auto_parallel_mode=True)
  110. strategies = _executor._get_shard_strategy(net)
  111. return strategies
  112. def compile_graph_two_input(x, y, net):
  113. net.set_auto_parallel()
  114. net.set_train(False)
  115. _executor.compile(net, x, y, auto_parallel_mode=True)
  116. strategies = _executor._get_shard_strategy(net)
  117. return strategies
  118. def test_dense_relu_semi_auto():
  119. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel", full_batch=False)
  120. net = DenseMutMulNet()
  121. x = Tensor(np.ones([32, 128]).astype(np.float32) * 0.01)
  122. strategies = compile_graph(x, net)
  123. for (k, v) in strategies.items():
  124. if re.search('VirtualOutput-op', k) is not None:
  125. assert v[0][0] == 8
  126. def test_dense_relu_semi_auto_full_batch():
  127. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel", full_batch=True)
  128. net = DenseMutMulNet()
  129. x = Tensor(np.ones([32, 128]).astype(np.float32) * 0.01)
  130. strategies = compile_graph(x, net)
  131. for (k, v) in strategies.items():
  132. if re.search('VirtualOutput-op', k) is not None:
  133. assert v[0][0] == 1
  134. def test_dense_relu_auto():
  135. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel", full_batch=False)
  136. net = DenseMutMulNet()
  137. x = Tensor(np.ones([32, 128]).astype(np.float32) * 0.01)
  138. strategies = compile_graph(x, net)
  139. for (k, v) in strategies.items():
  140. if re.search('VirtualOutput-op', k) is not None:
  141. assert v[0][0] == 8
  142. def test_dense_relu_auto_full_batch():
  143. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel", full_batch=True)
  144. net = DenseMutMulNet()
  145. x = Tensor(np.ones([32, 128]).astype(np.float32) * 0.01)
  146. strategies = compile_graph(x, net)
  147. for (k, v) in strategies.items():
  148. if re.search('VirtualOutput-op', k) is not None:
  149. assert v[0][0] == 1
  150. def test_mul_neg_two_output_semi_auto():
  151. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel", full_batch=False)
  152. net = MulNegTwoOutputNet()
  153. x = Tensor(np.ones([32, 128]).astype(np.float32) * 0.01)
  154. strategies = compile_graph(x, net)
  155. count = 0
  156. for (k, v) in strategies.items():
  157. if re.search('VirtualOutput-op', k) is not None:
  158. count += 1
  159. assert v[0][0] == 8
  160. assert count == 2
  161. def test_mul_neg_two_output_semi_auto_full_batch():
  162. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel", full_batch=True)
  163. net = MulNegTwoOutputNet()
  164. x = Tensor(np.ones([32, 128]).astype(np.float32) * 0.01)
  165. strategies = compile_graph(x, net)
  166. count = 0
  167. for (k, v) in strategies.items():
  168. if re.search('VirtualOutput-op', k) is not None:
  169. count += 1
  170. assert v[0][0] == 1
  171. assert count == 2
  172. def test_mul_neg_two_output_auto():
  173. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel", full_batch=False)
  174. net = MulNegTwoOutputNet()
  175. x = Tensor(np.ones([32, 128]).astype(np.float32) * 0.01)
  176. strategies = compile_graph(x, net)
  177. count = 0
  178. for (k, v) in strategies.items():
  179. if re.search('VirtualOutput-op', k) is not None:
  180. count += 1
  181. assert v[0][0] == 8
  182. assert count == 2
  183. def test_mul_neg_two_output_full_batch():
  184. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel", full_batch=True)
  185. net = MulNegTwoOutputNet()
  186. x = Tensor(np.ones([32, 128]).astype(np.float32) * 0.01)
  187. strategies = compile_graph(x, net)
  188. count = 0
  189. for (k, v) in strategies.items():
  190. if re.search('VirtualOutput-op', k) is not None:
  191. count += 1
  192. assert v[0][0] == 1
  193. assert count == 2
  194. def test_reshape_matmul_semi_auto():
  195. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel", full_batch=False)
  196. strategy1 = None
  197. strategy2 = ((1, 1), (1, 8))
  198. net = ReshapeMatMulNet(strategy1, strategy2)
  199. x = Tensor(np.ones([64, 4, 7]), ms.float32)
  200. strategies = compile_graph(x, net)
  201. for (k, v) in strategies.items():
  202. if re.search('VirtualOutput-op', k) is not None:
  203. assert v[0][0] == 8
  204. def test_reshape_matmul_auto():
  205. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel", full_batch=False)
  206. strategy1 = None
  207. strategy2 = ((1, 1), (1, 8))
  208. net = ReshapeMatMulNet(strategy1, strategy2)
  209. x = Tensor(np.ones([64, 4, 7]), ms.float32)
  210. strategies = compile_graph(x, net)
  211. for (k, v) in strategies.items():
  212. if re.search('VirtualOutput-op', k) is not None:
  213. assert v[0][0] == 8
  214. def test_matmul_reshape_semi_auto():
  215. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel", full_batch=False)
  216. strategy2 = None
  217. strategy1 = ((1, 1), (1, 8))
  218. net = MatMulReshapeNet(strategy1, strategy2)
  219. x = Tensor(np.ones([128, 28]), ms.float32)
  220. strategies = compile_graph(x, net)
  221. for (k, v) in strategies.items():
  222. if re.search('VirtualOutput-op', k) is not None:
  223. assert v[0][0] == 8
  224. def test_matmul_reshape_auto():
  225. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel", full_batch=False)
  226. strategy2 = None
  227. strategy1 = ((1, 1), (1, 8))
  228. net = MatMulReshapeNet(strategy1, strategy2)
  229. x = Tensor(np.ones([128, 28]), ms.float32)
  230. strategies = compile_graph(x, net)
  231. for (k, v) in strategies.items():
  232. if re.search('VirtualOutput-op', k) is not None:
  233. assert v[0][0] == 8
  234. def test_reshape_mul_semi_auto():
  235. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel", full_batch=True)
  236. net = ReshapeMulNet()
  237. x = Tensor(np.ones([64, 4]), ms.float32)
  238. strategies = compile_graph(x, net)
  239. for (k, v) in strategies.items():
  240. if re.search('VirtualOutput-op', k) is not None:
  241. assert v[0][0] == 1
  242. def test_reshape_mul_auto():
  243. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel", full_batch=True)
  244. net = ReshapeMulNet()
  245. x = Tensor(np.ones([64, 4]), ms.float32)
  246. strategies = compile_graph(x, net)
  247. for (k, v) in strategies.items():
  248. if re.search('VirtualOutput-op', k) is not None:
  249. assert v[0][0] == 1
  250. def test_scalar_output_semi_auto():
  251. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel", full_batch=False)
  252. net = ParallelMulNet()
  253. loss_fn = nn.SoftmaxCrossEntropyWithLogits(reduction='mean')
  254. eval_net = nn.WithEvalCell(net, loss_fn)
  255. x = Tensor(np.ones([4096, 1, 2, 1024]).astype(np.float32)*0.01)
  256. label = Tensor(np.ones([4096, 250]).astype(np.float32)*0.01)
  257. strategies = compile_graph_two_input(x, label, eval_net)
  258. count = 0
  259. for (k, v) in strategies.items():
  260. if re.search('VirtualOutput-op', k) is not None:
  261. assert v[0][0] == 8
  262. count += 1
  263. assert count == 1
  264. def test_scalar_output_auto():
  265. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel", full_batch=False)
  266. net = ParallelMulNet()
  267. loss_fn = nn.SoftmaxCrossEntropyWithLogits(reduction='mean')
  268. eval_net = nn.WithEvalCell(net, loss_fn)
  269. x = Tensor(np.ones([4096, 1, 2, 1024]).astype(np.float32)*0.01)
  270. label = Tensor(np.ones([4096, 250]).astype(np.float32)*0.01)
  271. strategies = compile_graph_two_input(x, label, eval_net)
  272. count = 0
  273. for (k, v) in strategies.items():
  274. if re.search('VirtualOutput-op', k) is not None:
  275. assert v[0][0] == 8
  276. count += 1
  277. assert count == 1