You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_print.py 4.9 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import mindspore as ms
  16. from mindspore import context, Tensor, Parameter
  17. from mindspore.common.api import _cell_graph_executor
  18. from mindspore.nn import Cell, TrainOneStepCell, Momentum, BatchNorm2d, BatchNorm1d
  19. from mindspore.ops import operations as P
  20. from tests.security_utils import security_off_wrap
  21. class Net(Cell):
  22. def __init__(self, conv2d_weight, out_channel, kernel_size, pad_mode, stride,
  23. strategy1=None, strategy2=None):
  24. super().__init__()
  25. self.conv2d = P.Conv2D(out_channel=out_channel, kernel_size=kernel_size,
  26. pad_mode=pad_mode, stride=stride).shard(strategy1)
  27. self.conv2d_weight = Parameter(conv2d_weight, "w1")
  28. self.bn = BatchNorm2d(8)
  29. self.bn.bn_train.shard(strategy2)
  30. self.print = P.Print()
  31. def construct(self, x, b):
  32. out = self.conv2d(x, self.conv2d_weight)
  33. self.print("output is", out)
  34. out = self.bn(out)
  35. return out
  36. _x = Tensor(np.ones([32, 16, 8, 8]), dtype=ms.float32)
  37. _w1 = Tensor(np.ones([8, 16, 2, 2]), dtype=ms.float32)
  38. _b = Tensor(np.ones([32, 16, 8, 8]), dtype=ms.float32)
  39. def compile_net(net):
  40. optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
  41. train_net = TrainOneStepCell(net, optimizer)
  42. train_net.set_auto_parallel()
  43. train_net.set_train()
  44. _cell_graph_executor.compile(train_net, _x, _b)
  45. context.reset_auto_parallel_context()
  46. @security_off_wrap
  47. def test_batchnorm_data_parallel():
  48. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  49. strategy1 = ((8, 1, 1, 1), (1, 1, 1, 1))
  50. strategy2 = ((8, 1, 1, 1), (1,), (1,), (1,), (1,))
  51. net = Net(_w1, out_channel=8, kernel_size=2, pad_mode="same", stride=1, strategy1=strategy1, strategy2=strategy2)
  52. compile_net(net)
  53. @security_off_wrap
  54. def test_batchnorm_model_parallel1():
  55. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  56. strategy1 = ((2, 2, 1, 1), (2, 2, 1, 1))
  57. strategy2 = ((2, 1, 2, 2), (1,), (1,), (1,), (1,))
  58. net = Net(_w1, out_channel=8, kernel_size=2, pad_mode="same", stride=1, strategy1=strategy1, strategy2=strategy2)
  59. compile_net(net)
  60. @security_off_wrap
  61. def test_batchnorm_model_parallel2():
  62. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=0)
  63. strategy1 = ((2, 2, 2, 2), (2, 2, 1, 1))
  64. strategy2 = ((1, 8, 1, 1), (8,), (8,), (8,), (8,))
  65. net = Net(_w1, out_channel=8, kernel_size=2, pad_mode="same", stride=2, strategy1=strategy1, strategy2=strategy2)
  66. compile_net(net)
  67. class Net2(Cell):
  68. def __init__(self, strategy1=None, strategy2=None):
  69. super().__init__()
  70. self.bn = BatchNorm1d(8)
  71. self.bn.bn_train.shard(strategy1)
  72. self.relu = P.ReLU().shard(strategy2)
  73. def construct(self, x, b):
  74. out = self.bn(x)
  75. out = self.relu(out)
  76. return out
  77. _x1 = Tensor(np.ones([32, 8]), dtype=ms.float32)
  78. _b1 = Tensor(np.ones([32, 8]), dtype=ms.float32)
  79. def compile_net2(net):
  80. optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
  81. train_net = TrainOneStepCell(net, optimizer)
  82. train_net.set_auto_parallel()
  83. train_net.set_train()
  84. _cell_graph_executor.compile(train_net, _x1, _b1)
  85. context.reset_auto_parallel_context()
  86. def test_batchnorm1d_data_parallel():
  87. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  88. strategy1 = ((8, 1), (1,), (1,), (1,), (1,))
  89. strategy2 = ((8, 1),)
  90. net = Net2(strategy1=strategy1, strategy2=strategy2)
  91. compile_net2(net)
  92. def test_batchnorm1d_model_parallel1():
  93. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
  94. strategy1 = ((1, 8), (8,), (8,), (8,), (8,))
  95. strategy2 = ((1, 8),)
  96. net = Net2(strategy1=strategy1, strategy2=strategy2)
  97. compile_net2(net)
  98. def test_batchnorm1d_model_parallel2():
  99. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=0)
  100. strategy1 = ((2, 4), (4,), (4,), (4,), (4,))
  101. strategy2 = ((2, 4),)
  102. net = Net2(strategy1=strategy1, strategy2=strategy2)
  103. compile_net2(net)