You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_bn_prelu_cell.py 8.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from mindspore.train.model import Model
  15. from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
  16. from mindspore.nn.optim.momentum import Momentum
  17. from mindspore import Tensor
  18. import mindspore as ms
  19. import numpy as np
  20. from mindspore.ops import operations as P
  21. import mindspore.nn as nn
  22. from mindspore.common.parameter import Parameter
  23. from mindspore.common.initializer import initializer
  24. from mindspore.ops import functional as F
  25. from mindspore.nn import WithLossCell
  26. import mindspore.common.dtype as DT
  27. from tests.dataset_mock import MindData
  28. from mindspore.train.parallel_utils import ParallelMode
  29. from mindspore import context
  30. class Dataset(MindData):
  31. def __init__(self, predict, label, length=3):
  32. super(Dataset, self).__init__(size=length)
  33. self.predict = predict
  34. self.label = label
  35. self.index = 0
  36. self.length = length
  37. def __iter__(self):
  38. return self
  39. def __next__(self):
  40. if self.index >= self.length:
  41. raise StopIteration
  42. self.index += 1
  43. return self.predict, self.label
  44. def reset(self):
  45. self.index = 0
  46. class FusedBatchNorm(nn.Cell):
  47. """Batch Normalization base class."""
  48. def __init__(self,
  49. num_features,
  50. eps=1e-5,
  51. momentum=0.1,
  52. affine=True,
  53. gamma_init='ones',
  54. beta_init='zeros',
  55. moving_mean_init='zeros',
  56. moving_var_init='ones'):
  57. super(FusedBatchNorm, self).__init__()
  58. if num_features < 1:
  59. raise ValueError("num_features must be at least 1")
  60. if momentum < 0 or momentum > 1:
  61. raise ValueError("momentum should be a number in range [0, 1], but got {}".format(momentum))
  62. self.num_features = num_features
  63. self.eps = eps
  64. self.momentum = Tensor(1.0 - momentum, DT.float32)
  65. self.gamma = Parameter(initializer(
  66. gamma_init, num_features), name="gamma", requires_grad=affine)
  67. self.beta = Parameter(initializer(
  68. beta_init, num_features), name="beta", requires_grad=affine)
  69. self.moving_mean = Parameter(initializer(
  70. moving_mean_init, num_features), name="mean", requires_grad=False)
  71. self.moving_variance = Parameter(initializer(
  72. moving_var_init, num_features), name="variance", requires_grad=False)
  73. self.bn_train = P.BatchNorm(is_training=True,
  74. epsilon=self.eps)
  75. self.bn_infer = P.BatchNorm(is_training=False,
  76. epsilon=self.eps)
  77. self.sub_mean = P.Sub().set_strategy(((1), (1)))
  78. self.sub_var = P.Sub().set_strategy(((1), (1)))
  79. self.mul_mean = P.Mul().set_strategy(((1, ), ()))
  80. self.mul_var = P.Mul().set_strategy(((1, ), ()))
  81. self.assign_sub_mean = P.AssignSub().set_strategy(((1, ), (1,)))
  82. self.assign_sub_var = P.AssignSub().set_strategy(((1), (1)))
  83. self.sub_mean2 = P.Sub().set_strategy(((1), (1)))
  84. self.sub_var2 = P.Sub().set_strategy(((1), (1)))
  85. def set_strategy(self, strategy):
  86. self.bn_train.set_strategy(strategy)
  87. self.bn_infer.set_strategy(strategy)
  88. def _check_data_dim(self, x):
  89. raise NotImplementedError
  90. def construct(self, x):
  91. if self.training:
  92. y, batch_mean, batch_var, _, _ = \
  93. self.bn_train(x,
  94. self.gamma,
  95. self.beta,
  96. None,
  97. None)
  98. mean_sub = self.sub_mean(self.moving_mean, batch_mean)
  99. temp_mean = self.mul_mean(mean_sub, self.momentum)
  100. mean_sub2 = self.sub_var(self.moving_variance, batch_var)
  101. temp_variance = self.mul_var(mean_sub2, self.momentum)
  102. y = F.depend(y, self.assign_sub_mean(self.moving_mean, temp_mean))
  103. y = F.depend(y, self.assign_sub_var(self.moving_variance, temp_variance))
  104. else:
  105. y = self.bn_infer(x,
  106. self.gamma,
  107. self.beta,
  108. self.moving_mean,
  109. self.moving_variance)[0]
  110. return y
  111. def extend_repr(self):
  112. return 'num_features={}, eps={}, momentum={}, ' \
  113. 'beta={}, gamma={}, ' \
  114. 'moving_mean={}, moving_variance={} ' \
  115. .format(self.num_features,
  116. self.eps,
  117. self.momentum,
  118. self.beta,
  119. self.gamma,
  120. self.moving_mean,
  121. self.moving_variance)
  122. class PReLU(nn.Cell):
  123. """
  124. PReLU activation function.
  125. Computes prelu value of a 4-dim tensor(NCHW).
  126. PReLU: out = max(0, A) + min(0, wA)
  127. Args:
  128. channel: Integer. The dimensionality of w. Default: 1.
  129. w: Float. The initial value of w. Default: 0.25.
  130. Returns:
  131. Tensor, has the same type as features.
  132. Examples:
  133. prelu = nn.PReLU(1, [np.float32(0.25)]) # or prelu = nn.PReLU(33, Tensor(np.random.rand(33), ms.float32)])
  134. input_data = Tensor(np.random.rand(1, 33, 4, 4), ms.float32)
  135. output = prelu.construct(input_data)
  136. """
  137. def __init__(self, channel=1, w=0.25):
  138. super(PReLU, self).__init__()
  139. if isinstance(w, (np.float32, float)):
  140. tmp = np.empty((channel,), dtype=np.float32)
  141. tmp.fill(w)
  142. w = tmp
  143. elif isinstance(w, (int, bool, complex, str)):
  144. raise TypeError("w only support input type float32 and float")
  145. if not isinstance(w, Tensor):
  146. w = Tensor(w)
  147. self.w = Parameter(initializer(w, [channel,]), name='a')
  148. self.prelu = P.PReLU()
  149. self.relu = P.ReLU().set_strategy(((1)))
  150. def construct(self, x):
  151. self.w = self.relu(self.w)
  152. return self.prelu(x, self.w)
  153. class BNNet(nn.Cell):
  154. def __init__(self, strategy0, strategy1, strategy2):
  155. super(BNNet, self).__init__()
  156. self.bn = FusedBatchNorm(512)
  157. self.prelu = PReLU(512)
  158. def construct(self, x):
  159. x = self.bn(x)
  160. x = self.prelu(x)
  161. return x
  162. def bn_net(strategy0, strategy1, strategy2):
  163. return BNNet(strategy0=strategy0, strategy1=strategy1, strategy2=strategy2)
  164. def bn_common(parallel_mode, train_flag, strategy0=None, strategy1=None, strategy2=None, strategy_loss=None):
  165. context.set_context(mode=context.GRAPH_MODE)
  166. batch_size = 32
  167. learning_rate = 0.1
  168. momentum = 0.9
  169. epoch_size = 2
  170. rank_size = 8
  171. predict = Tensor(np.ones([32, 512]), dtype=ms.float32)
  172. label = Tensor(np.ones([32]), dtype=ms.int32)
  173. dataset = Dataset(predict, label, 2)
  174. net = bn_net(strategy0, strategy1, strategy2)
  175. loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
  176. loss.softmax_cross_entropy.set_strategy(strategy_loss)
  177. opt = Momentum(net.trainable_params(), learning_rate, momentum, 0.0001, 1024 * rank_size)
  178. if not train_flag:
  179. net = WithLossCell(net, loss)
  180. net.set_train()
  181. if parallel_mode == ParallelMode.DATA_PARALLEL:
  182. context.set_auto_parallel_context(parameter_broadcast=True)
  183. context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
  184. model = Model(net, loss, opt)
  185. if train_flag:
  186. model.train(epoch_size, dataset, dataset_sink_mode=False)
  187. else:
  188. model._predict(predict, label)
  189. def test_data_parallel():
  190. parallel_mode = ParallelMode.DATA_PARALLEL
  191. train_flag = True
  192. bn_common(parallel_mode, train_flag)
  193. def auto_parallel():
  194. train_flag = True
  195. parallel_mode = ParallelMode.AUTO_PARALLEL
  196. bn_common(parallel_mode, train_flag)
  197. def Xtest_data_parallel_predict():
  198. parallel_mode = ParallelMode.DATA_PARALLEL
  199. train_flag = False
  200. bn_common(parallel_mode, train_flag)
  201. def Xtest_semi_auto_parallel_predict():
  202. train_flag = False
  203. parallel_mode = ParallelMode.SEMI_AUTO_PARALLEL
  204. bn_common(parallel_mode, train_flag)
  205. def Xtest_auto_parallel_predict():
  206. train_flag = False
  207. parallel_mode = ParallelMode.AUTO_PARALLEL
  208. bn_common(parallel_mode, train_flag)
  209. if __name__ == '__main__':
  210. auto_parallel()