You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_conv2d_transpose.py 9.0 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import pytest
  16. import mindspore as ms
  17. from mindspore import context, Tensor, Parameter
  18. from mindspore.common.api import _cell_graph_executor
  19. from mindspore.nn import Cell, TrainOneStepCell, Momentum
  20. from mindspore.ops import operations as P
  21. class Net(Cell):
  22. def __init__(self, conv2d_weight, out_channel, kernel_size, pad_mode, stride,
  23. strategy1=None, strategy2=None):
  24. super().__init__()
  25. self.conv2d_transpose = P.Conv2DTranspose(out_channel=out_channel, kernel_size=kernel_size,
  26. pad_mode=pad_mode, stride=stride).shard(strategy1)
  27. self.neg = P.Neg().shard(strategy2)
  28. self.weight = Parameter(conv2d_weight, "w1")
  29. self.add = P.Add()
  30. self.add_w = Parameter(Tensor(np.ones([32, 8, 8, 8]), dtype=ms.float32), "add_w")
  31. def construct(self, x, b):
  32. out = self.add(x, self.add_w)
  33. out = self.conv2d_transpose(out, self.weight, (32, 16, 8, 8))
  34. out = self.neg(out)
  35. return out
  36. class Net2(Cell):
  37. def __init__(self, conv2d_weight, out_channel, kernel_size, pad_mode, stride, group=1, dilation=1,
  38. strategy1=None, strategy2=None):
  39. super().__init__()
  40. self.conv2d_transpose = P.Conv2DTranspose(out_channel=out_channel, kernel_size=kernel_size, pad_mode=pad_mode,
  41. stride=stride, group=group, dilation=dilation).shard(strategy1)
  42. self.neg = P.Neg().shard(strategy2)
  43. self.weight = Parameter(conv2d_weight, "w1")
  44. def construct(self, x, b):
  45. out = self.conv2d_transpose(x, self.weight, (32, 16, 16, 16))
  46. out = self.neg(out)
  47. return out
  48. _x = Tensor(np.ones([32, 8, 8, 8]), dtype=ms.float32)
  49. _w1 = Tensor(np.ones([8, 16, 2, 2]), dtype=ms.float32)
  50. _w2 = Tensor(np.ones([8, 16, 4, 4]), dtype=ms.float32)
  51. _w3 = Tensor(np.ones([8, 16, 10, 10]), dtype=ms.float32)
  52. _w4 = Tensor(np.ones([8, 16, 3, 3]), dtype=ms.float32)
  53. _w5 = Tensor(np.ones([8, 8, 4, 4]), dtype=ms.float32)
  54. _b = Tensor(np.ones([32, 16, 8, 8]), dtype=ms.float32)
  55. def compile_net(net):
  56. optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
  57. train_net = TrainOneStepCell(net, optimizer)
  58. train_net.set_auto_parallel()
  59. train_net.set_train()
  60. _cell_graph_executor.compile(train_net, _x, _b)
  61. context.reset_auto_parallel_context()
  62. def test_conv2d_transpose_data_parallel():
  63. """
  64. Feature: test data parallel strategy
  65. Description: only shard batch dimension
  66. Expectation: compile success
  67. """
  68. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  69. strategy1 = ((8, 1, 1, 1), (1, 1, 1, 1))
  70. strategy2 = ((8, 1, 1, 1),)
  71. net = Net(_w1, out_channel=8, kernel_size=2, pad_mode="same", stride=1, strategy1=strategy1, strategy2=strategy2)
  72. compile_net(net)
  73. def test_conv2d_transpose_group():
  74. """
  75. Feature: test group is not 1
  76. Description: shard n/h/w, and group is 2
  77. Expectation: compile success
  78. """
  79. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  80. strategy1 = ((2, 1, 2, 2), (1, 1, 1, 1))
  81. strategy2 = ((8, 1, 1, 1),)
  82. net = Net2(_w5, out_channel=8, kernel_size=4, pad_mode="same", stride=2, group=2, strategy1=strategy1,
  83. strategy2=strategy2)
  84. compile_net(net)
  85. def test_conv2d_transpose_model_parallel1():
  86. """
  87. Feature: test model parallel strategy
  88. Description: only shard batch dimension and channel dimension
  89. Expectation: compile success
  90. """
  91. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  92. strategy1 = ((2, 2, 1, 1), (2, 2, 1, 1))
  93. strategy2 = ((8, 1, 1, 1),)
  94. net = Net(_w1, out_channel=8, kernel_size=2, pad_mode="same", stride=1, strategy1=strategy1, strategy2=strategy2)
  95. compile_net(net)
  96. def test_conv2d_transpose_model_parallel2():
  97. """
  98. Feature: test model parallel strategy
  99. Description: shard batch dimension and w dimension
  100. Expectation: compile success
  101. """
  102. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  103. strategy1 = ((2, 1, 1, 4), (1, 1, 1, 1))
  104. strategy2 = ((2, 1, 1, 4),)
  105. net = Net2(_w2, out_channel=8, kernel_size=(4, 4), pad_mode="same", stride=2,
  106. strategy1=strategy1, strategy2=strategy2)
  107. compile_net(net)
  108. def test_conv2d_transpose_model_parallel_dilation():
  109. """
  110. Feature: test model parallel strategy and dilation is 2
  111. Description: shard n/h/w
  112. Expectation: compile success
  113. """
  114. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  115. strategy1 = ((2, 1, 2, 2), (1, 1, 1, 1))
  116. strategy2 = ((2, 1, 2, 2),)
  117. net = Net2(_w4, out_channel=8, kernel_size=(3, 3), pad_mode="same", stride=2, dilation=2,
  118. strategy1=strategy1, strategy2=strategy2)
  119. compile_net(net)
  120. def test_conv2d_transpose_model_parallel3():
  121. """
  122. Feature: test model parallel strategy
  123. Description: shard batch dimension, channel dimension and w dimension
  124. Expectation: compile success
  125. """
  126. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  127. strategy1 = ((2, 2, 1, 4), (2, 1, 1, 1))
  128. strategy2 = ((2, 2, 1, 4),)
  129. net = Net2(_w2, out_channel=8, kernel_size=(4, 4), pad_mode="same", stride=2,
  130. strategy1=strategy1, strategy2=strategy2)
  131. compile_net(net)
  132. def test_conv2d_transpose_model_parallel4():
  133. """
  134. Feature: test model parallel strategy
  135. Description: shard batch dimension, channel dimension and w dimension
  136. Expectation: compile success
  137. """
  138. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  139. strategy1 = ((1, 1, 2, 4), (1, 1, 1, 1))
  140. strategy2 = ((2, 2, 1, 4),)
  141. net = Net2(_w2, out_channel=8, kernel_size=(4, 4), pad_mode="same", stride=2,
  142. strategy1=strategy1, strategy2=strategy2)
  143. compile_net(net)
  144. def test_conv2d_transpose_all_rank_no_need_overlap():
  145. """
  146. Feature: test model parallel strategy
  147. Description: shard batch dimension, channel dimension and w dimension
  148. Expectation: compile success
  149. """
  150. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  151. strategy1 = ((2, 2, 1, 4), (2, 1, 1, 1))
  152. strategy2 = ((2, 2, 1, 4),)
  153. net = Net2(_w1, out_channel=8, kernel_size=(2, 2), pad_mode="same", stride=2,
  154. strategy1=strategy1, strategy2=strategy2)
  155. compile_net(net)
  156. def test_conv2d_transpose_split_h_or_w_in_pad_mode():
  157. """
  158. Feature: test pad mode
  159. Description: shard batch dimension, channel dimension and w dimension in pad mode
  160. Expectation: compile failed
  161. """
  162. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  163. strategy1 = ((2, 2, 1, 4), (2, 1, 1, 1))
  164. strategy2 = ((2, 2, 1, 4),)
  165. net = Net2(_w1, out_channel=8, kernel_size=(2, 2), pad_mode="pad", stride=2,
  166. strategy1=strategy1, strategy2=strategy2)
  167. with pytest.raises(RuntimeError):
  168. compile_net(net)
  169. def test_conv2d_transpose_split_h_in_same_mode():
  170. """
  171. Feature: test split h dimension
  172. Description: shard h dimension in same mode
  173. Expectation: compile success
  174. """
  175. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  176. strategy1 = ((2, 2, 4, 1), (2, 1, 1, 1))
  177. strategy2 = ((2, 2, 4, 1),)
  178. net = Net2(_w1, out_channel=8, kernel_size=(2, 2), pad_mode="same", stride=2,
  179. strategy1=strategy1, strategy2=strategy2)
  180. compile_net(net)
  181. def test_conv2d_transpose_overlap_size_too_large():
  182. """
  183. Feature: test overlap size is too large
  184. Description: shard w dimension and overlap size larger than slice shape
  185. Expectation: compile failed
  186. """
  187. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  188. strategy1 = ((1, 1, 1, 8), (1, 1, 1, 1))
  189. strategy2 = ((1, 1, 1, 8),)
  190. net = Net2(_w3, out_channel=8, kernel_size=(10, 10), pad_mode="same", stride=2,
  191. strategy1=strategy1, strategy2=strategy2)
  192. with pytest.raises(RuntimeError):
  193. compile_net(net)