You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_conv2d_transpose.py 7.2 kB

4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import pytest
  16. import mindspore as ms
  17. from mindspore import context, Tensor, Parameter
  18. from mindspore.common.api import _cell_graph_executor
  19. from mindspore.nn import Cell, TrainOneStepCell, Momentum
  20. from mindspore.ops import operations as P
  21. class Net(Cell):
  22. def __init__(self, conv2d_weight, out_channel, kernel_size, pad_mode, stride,
  23. strategy1=None, strategy2=None):
  24. super().__init__()
  25. self.conv2d_transpose = P.Conv2DTranspose(out_channel=out_channel, kernel_size=kernel_size,
  26. pad_mode=pad_mode, stride=stride).shard(strategy1)
  27. self.neg = P.Neg().shard(strategy2)
  28. self.weight = Parameter(conv2d_weight, "w1")
  29. def construct(self, x, b):
  30. out = self.conv2d_transpose(x, self.weight, (32, 16, 8, 8))
  31. out = self.neg(out)
  32. return out
  33. class Net2(Cell):
  34. def __init__(self, conv2d_weight, out_channel, kernel_size, pad_mode, stride,
  35. strategy1=None, strategy2=None):
  36. super().__init__()
  37. self.conv2d_transpose = P.Conv2DTranspose(out_channel=out_channel, kernel_size=kernel_size,
  38. pad_mode=pad_mode, stride=stride).shard(strategy1)
  39. self.neg = P.Neg().shard(strategy2)
  40. self.weight = Parameter(conv2d_weight, "w1")
  41. def construct(self, x, b):
  42. out = self.conv2d_transpose(x, self.weight, (32, 16, 16, 16))
  43. out = self.neg(out)
  44. return out
  45. _x = Tensor(np.ones([32, 8, 8, 8]), dtype=ms.float32)
  46. _w1 = Tensor(np.ones([8, 16, 2, 2]), dtype=ms.float32)
  47. _w2 = Tensor(np.ones([8, 16, 4, 4]), dtype=ms.float32)
  48. _w3 = Tensor(np.ones([8, 16, 10, 10]), dtype=ms.float32)
  49. _w4 = Tensor(np.ones([8, 16, 3, 3]), dtype=ms.float32)
  50. _b = Tensor(np.ones([32, 16, 8, 8]), dtype=ms.float32)
  51. def compile_net(net):
  52. optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
  53. train_net = TrainOneStepCell(net, optimizer)
  54. train_net.set_auto_parallel()
  55. train_net.set_train()
  56. _cell_graph_executor.compile(train_net, _x, _b)
  57. context.reset_auto_parallel_context()
  58. def test_conv2d_transpose_data_parallel():
  59. """
  60. Feature: test data parallel strategy
  61. Description: only shard batch dimension
  62. Expectation: compile success
  63. """
  64. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  65. strategy1 = ((8, 1, 1, 1), (1, 1, 1, 1))
  66. strategy2 = ((8, 1, 1, 1),)
  67. net = Net(_w1, out_channel=8, kernel_size=2, pad_mode="same", stride=1, strategy1=strategy1, strategy2=strategy2)
  68. compile_net(net)
  69. def test_conv2d_transpose_model_parallel1():
  70. """
  71. Feature: test model parallel strategy
  72. Description: only shard batch dimension and channel dimension
  73. Expectation: compile success
  74. """
  75. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  76. strategy1 = ((2, 2, 1, 1), (2, 2, 1, 1))
  77. strategy2 = ((8, 1, 1, 1),)
  78. net = Net(_w1, out_channel=8, kernel_size=2, pad_mode="same", stride=1, strategy1=strategy1, strategy2=strategy2)
  79. compile_net(net)
  80. def test_conv2d_transpose_model_parallel2():
  81. """
  82. Feature: test model parallel strategy
  83. Description: shard batch dimension and w dimension
  84. Expectation: compile success
  85. """
  86. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  87. strategy1 = ((2, 1, 1, 4), (1, 1, 1, 1))
  88. strategy2 = ((2, 1, 1, 4),)
  89. net = Net2(_w2, out_channel=8, kernel_size=(4, 4), pad_mode="same", stride=2,
  90. strategy1=strategy1, strategy2=strategy2)
  91. compile_net(net)
  92. def test_conv2d_transpose_model_parallel3():
  93. """
  94. Feature: test model parallel strategy
  95. Description: shard batch dimension, channel dimension and w dimension
  96. Expectation: compile success
  97. """
  98. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  99. strategy1 = ((2, 2, 1, 4), (2, 1, 1, 1))
  100. strategy2 = ((2, 2, 1, 4),)
  101. net = Net2(_w2, out_channel=8, kernel_size=(4, 4), pad_mode="same", stride=2,
  102. strategy1=strategy1, strategy2=strategy2)
  103. compile_net(net)
  104. def test_conv2d_transpose_all_rank_no_need_overlap():
  105. """
  106. Feature: test model parallel strategy
  107. Description: shard batch dimension, channel dimension and w dimension
  108. Expectation: compile success
  109. """
  110. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  111. strategy1 = ((2, 2, 1, 4), (2, 1, 1, 1))
  112. strategy2 = ((2, 2, 1, 4),)
  113. net = Net2(_w1, out_channel=8, kernel_size=(2, 2), pad_mode="same", stride=2,
  114. strategy1=strategy1, strategy2=strategy2)
  115. compile_net(net)
  116. def test_conv2d_transpose_split_h_or_w_in_pad_mode():
  117. """
  118. Feature: test pad mode
  119. Description: shard batch dimension, channel dimension and w dimension in pad mode
  120. Expectation: compile failed
  121. """
  122. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  123. strategy1 = ((2, 2, 1, 4), (2, 1, 1, 1))
  124. strategy2 = ((2, 2, 1, 4),)
  125. net = Net2(_w1, out_channel=8, kernel_size=(2, 2), pad_mode="pad", stride=2,
  126. strategy1=strategy1, strategy2=strategy2)
  127. with pytest.raises(RuntimeError):
  128. compile_net(net)
  129. def test_conv2d_transpose_split_h_in_same_mode():
  130. """
  131. Feature: test split h dimension
  132. Description: shard h dimension in same mode
  133. Expectation: compile failed
  134. """
  135. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  136. strategy1 = ((2, 2, 4, 1), (2, 1, 1, 1))
  137. strategy2 = ((2, 2, 1, 4),)
  138. net = Net2(_w1, out_channel=8, kernel_size=(2, 2), pad_mode="same", stride=2,
  139. strategy1=strategy1, strategy2=strategy2)
  140. with pytest.raises(RuntimeError):
  141. compile_net(net)
  142. def test_conv2d_transpose_overlap_size_too_large():
  143. """
  144. Feature: test overlap size is too large
  145. Description: shard w dimension and overlap size larger than slice shape
  146. Expectation: compile failed
  147. """
  148. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  149. strategy1 = ((1, 1, 1, 8), (1, 1, 1, 1))
  150. strategy2 = ((1, 1, 1, 8),)
  151. net = Net2(_w3, out_channel=8, kernel_size=(10, 10), pad_mode="same", stride=2,
  152. strategy1=strategy1, strategy2=strategy2)
  153. with pytest.raises(RuntimeError):
  154. compile_net(net)