You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_fused_adam.py 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.common.api import ms_function
  21. from mindspore.ops import operations as P
  22. from mindspore.ops import functional as F
  23. from mindspore.common import dtype as mstype
  24. from mindspore.common.parameter import Parameter
  25. class Net(nn.Cell):
  26. def __init__(self, decay_flag=True):
  27. super(Net, self).__init__()
  28. self.decay_flag = decay_flag
  29. self.op_mul = P.Mul()
  30. self.op_square = P.Square()
  31. self.op_sqrt = P.Sqrt()
  32. self.op_cast = P.Cast()
  33. self.op_reshape = P.Reshape()
  34. self.op_shape = P.Shape()
  35. self.param = Parameter(
  36. Tensor(np.array([1, 3, 5]).astype(np.float32)), name='param')
  37. self.m = Parameter(
  38. Tensor(np.array([0.11, 0.33, 0.55]).astype(np.float32)), name='m')
  39. self.v = Parameter(
  40. Tensor(np.array([1.2, 3.4, 5.6]).astype(np.float32)), name='v')
  41. @ms_function
  42. def construct(self, beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, lr):
  43. param_fp32 = self.op_cast(self.param, mstype.float32)
  44. m_fp32 = self.op_cast(self.m, mstype.float32)
  45. v_fp32 = self.op_cast(self.v, mstype.float32)
  46. gradient_fp32 = self.op_cast(gradient, mstype.float32)
  47. next_m = self.op_mul(beta1, m_fp32) + \
  48. self.op_mul(self.op_cast(one_sub_beta_1,
  49. mstype.float32), gradient_fp32)
  50. next_v = self.op_mul(beta2, v_fp32) + self.op_mul(self.op_cast(one_sub_beta_2,
  51. mstype.float32), self.op_square(gradient_fp32))
  52. update = next_m / (eps + self.op_sqrt(next_v))
  53. if self.decay_flag:
  54. update = self.op_mul(weight_decay_tensor, param_fp32) + update
  55. update_with_lr = self.op_mul(lr, update)
  56. next_param = param_fp32 - \
  57. self.op_reshape(update_with_lr, self.op_shape(param_fp32))
  58. depend_v = F.depend(next_param, F.assign(self.param, next_param))
  59. depend_v = F.depend(depend_v, F.assign(self.m, next_m))
  60. depend_v = F.depend(depend_v, F.assign(self.v, next_v))
  61. return depend_v
  62. class SideEffectFusedAdamNet(nn.Cell):
  63. def __init__(self, decay_flag=True):
  64. super(SideEffectFusedAdamNet, self).__init__()
  65. self.decay_flag = decay_flag
  66. self.op_mul = P.Mul()
  67. self.op_square = P.Square()
  68. self.op_sqrt = P.Sqrt()
  69. self.op_cast = P.Cast()
  70. self.op_reshape = P.Reshape()
  71. self.op_shape = P.Shape()
  72. self.param = Parameter(
  73. Tensor(np.array([0, 0, 0]).astype(np.float32)), name='param')
  74. self.m = Parameter(
  75. Tensor(np.array([0.11, 0.33, 0.55]).astype(np.float32)), name='m')
  76. self.v = Parameter(
  77. Tensor(np.array([1.2, 3.4, 5.6]).astype(np.float32)), name='v')
  78. self.x = Parameter(
  79. Tensor(np.array([1, 3, 5]).astype(np.float32)), name='x')
  80. @ms_function
  81. def construct(self, beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, lr):
  82. F.assign(self.param, self.x)
  83. param_fp32 = self.op_cast(self.param, mstype.float32)
  84. m_fp32 = self.op_cast(self.m, mstype.float32)
  85. v_fp32 = self.op_cast(self.v, mstype.float32)
  86. gradient_fp32 = self.op_cast(gradient, mstype.float32)
  87. next_m = self.op_mul(beta1, m_fp32) + \
  88. self.op_mul(self.op_cast(one_sub_beta_1,
  89. mstype.float32), gradient_fp32)
  90. next_v = self.op_mul(beta2, v_fp32) + self.op_mul(self.op_cast(one_sub_beta_2,
  91. mstype.float32), self.op_square(gradient_fp32))
  92. update = next_m / (eps + self.op_sqrt(next_v))
  93. if self.decay_flag:
  94. update = self.op_mul(weight_decay_tensor, param_fp32) + update
  95. update_with_lr = self.op_mul(lr, update)
  96. next_param = param_fp32 - \
  97. self.op_reshape(update_with_lr, self.op_shape(param_fp32))
  98. depend_v = F.depend(next_param, F.assign(self.param, next_param))
  99. depend_v = F.depend(depend_v, F.assign(self.m, next_m))
  100. depend_v = F.depend(depend_v, F.assign(self.v, next_v))
  101. F.assign(self.x, self.m)
  102. return depend_v
  103. def CalFusedAdam(beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, lr, param, m, v,
  104. is_weight_decay=False):
  105. m_expect = beta1 * m + one_sub_beta_1 * gradient
  106. v_expect = beta2 * v + one_sub_beta_2 * gradient * gradient
  107. update = m_expect / (np.sqrt(v_expect) + eps)
  108. if is_weight_decay:
  109. update += weight_decay_tensor * param
  110. param_expect = param - lr * update
  111. return param_expect, m_expect, v_expect
  112. def test_adam():
  113. np.random.seed(0)
  114. beta1 = np.array([0.9]).astype(np.float32)
  115. beta2 = np.array([0.999]).astype(np.float32)
  116. one_sub_beta_1 = (np.array([1.0]) - np.array([0.9])).astype(np.float32)
  117. one_sub_beta_2 = (np.array([1.0]) - np.array([0.999])).astype(np.float32)
  118. lr = np.array([0.012]).astype(np.float32)
  119. eps = np.array([1e-6]).astype(np.float32)
  120. weight_decay_tensor = np.array([0.021]).astype(np.float32)
  121. gradient = np.array([0.01, 0.03, 0.05]).astype(np.float32)
  122. m = np.array([0.11, 0.33, 0.55]).astype(np.float32)
  123. v = np.array([1.2, 3.4, 5.6]).astype(np.float32)
  124. param = np.array([1, 3, 5]).astype(np.float32)
  125. is_weight_decay = False
  126. opt = Net(is_weight_decay)
  127. _ = opt(Tensor(beta1), Tensor(beta2), Tensor(one_sub_beta_1), Tensor(one_sub_beta_2), Tensor(gradient), Tensor(eps),
  128. Tensor(weight_decay_tensor), Tensor(lr))
  129. param_expect, m_expect, v_expect = CalFusedAdam(
  130. beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, lr,
  131. param, m, v, is_weight_decay)
  132. assert np.allclose(opt.param.data.asnumpy(), param_expect,
  133. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  134. assert np.allclose(opt.m.data.asnumpy(), m_expect,
  135. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  136. assert np.allclose(opt.v.data.asnumpy(), v_expect,
  137. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  138. def test_adam_weight_decay():
  139. np.random.seed(0)
  140. beta1 = np.array([0.9]).astype(np.float32)
  141. beta2 = np.array([0.999]).astype(np.float32)
  142. one_sub_beta_1 = (np.array([1.0]) - np.array([0.9])).astype(np.float32)
  143. one_sub_beta_2 = (np.array([1.0]) - np.array([0.999])).astype(np.float32)
  144. lr = np.array([0.012]).astype(np.float32)
  145. eps = np.array([1e-6]).astype(np.float32)
  146. weight_decay_tensor = np.array([0.021]).astype(np.float32)
  147. gradient = np.array([0.01, 0.03, 0.05]).astype(np.float32)
  148. m = np.array([0.11, 0.33, 0.55]).astype(np.float32)
  149. v = np.array([1.2, 3.4, 5.6]).astype(np.float32)
  150. param = np.array([1, 3, 5]).astype(np.float32)
  151. is_weight_decay = True
  152. opt = Net(is_weight_decay)
  153. _ = opt(Tensor(beta1), Tensor(beta2), Tensor(one_sub_beta_1), Tensor(one_sub_beta_2), Tensor(gradient), Tensor(eps),
  154. Tensor(weight_decay_tensor), Tensor(lr))
  155. param_expect, m_expect, v_expect = CalFusedAdam(
  156. beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, lr,
  157. param, m, v, is_weight_decay)
  158. assert np.allclose(opt.param.data.asnumpy(), param_expect,
  159. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  160. assert np.allclose(opt.m.data.asnumpy(), m_expect,
  161. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  162. assert np.allclose(opt.v.data.asnumpy(), v_expect,
  163. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  164. def test_adam_side_effect():
  165. np.random.seed(0)
  166. beta1 = np.array([0.9]).astype(np.float32)
  167. beta2 = np.array([0.999]).astype(np.float32)
  168. one_sub_beta_1 = (np.array([1.0]) - np.array([0.9])).astype(np.float32)
  169. one_sub_beta_2 = (np.array([1.0]) - np.array([0.999])).astype(np.float32)
  170. lr = np.array([0.012]).astype(np.float32)
  171. eps = np.array([1e-6]).astype(np.float32)
  172. weight_decay_tensor = np.array([0.021]).astype(np.float32)
  173. gradient = np.array([0.01, 0.03, 0.05]).astype(np.float32)
  174. m = np.array([0.11, 0.33, 0.55]).astype(np.float32)
  175. v = np.array([1.2, 3.4, 5.6]).astype(np.float32)
  176. param = np.array([1, 3, 5]).astype(np.float32)
  177. is_weight_decay = False
  178. opt = SideEffectFusedAdamNet(is_weight_decay)
  179. _ = opt(Tensor(beta1), Tensor(beta2), Tensor(one_sub_beta_1), Tensor(one_sub_beta_2), Tensor(gradient), Tensor(eps),
  180. Tensor(weight_decay_tensor), Tensor(lr))
  181. param_expect, m_expect, v_expect = CalFusedAdam(
  182. beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, lr,
  183. param, m, v, is_weight_decay)
  184. assert np.allclose(opt.param.data.asnumpy(), param_expect,
  185. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  186. assert np.allclose(opt.m.data.asnumpy(), m_expect,
  187. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  188. assert np.allclose(opt.v.data.asnumpy(), v_expect,
  189. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  190. assert np.allclose(opt.x.data.asnumpy(), m_expect,
  191. rtol=1.e-4, atol=1.e-8, equal_nan=True)
  192. @pytest.mark.level0
  193. @pytest.mark.platform_x86_gpu_training
  194. @pytest.mark.env_onecard
  195. def test_adam_gpu():
  196. context.set_context(mode=context.GRAPH_MODE,
  197. enable_graph_kernel=True, device_target="GPU")
  198. test_adam()
  199. def test_adam_ascend():
  200. context.set_context(mode=context.GRAPH_MODE,
  201. enable_graph_kernel=True, device_target="Ascend")
  202. test_adam()
  203. @pytest.mark.level0
  204. @pytest.mark.platform_x86_gpu_training
  205. @pytest.mark.env_onecard
  206. def test_adam_weight_decay_gpu():
  207. context.set_context(mode=context.GRAPH_MODE,
  208. enable_graph_kernel=True, device_target="GPU")
  209. test_adam_weight_decay()
  210. def test_adam_weight_decay_ascend():
  211. context.set_context(mode=context.GRAPH_MODE,
  212. enable_graph_kernel=True, device_target="Ascend")
  213. test_adam_weight_decay()
  214. @pytest.mark.level0
  215. @pytest.mark.platform_x86_gpu_training
  216. @pytest.mark.env_onecard
  217. def test_adam_side_effect_gpu():
  218. context.set_context(mode=context.GRAPH_MODE,
  219. enable_graph_kernel=True, device_target="GPU")
  220. test_adam_side_effect()
  221. @pytest.mark.level2
  222. @pytest.mark.platform_arm_ascend_training
  223. @pytest.mark.platform_x86_ascend_training
  224. @pytest.mark.env_onecard
  225. def test_adam_side_effect_ascend():
  226. context.set_context(mode=context.GRAPH_MODE,
  227. enable_graph_kernel=True, device_target="Ascend")
  228. test_adam_side_effect()