You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_rmsprop.py 6.7 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.ops import operations as P
  21. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  22. class NetCenteredRMSProp(nn.Cell):
  23. def __init__(self, lr, decay, momentum, epsilon):
  24. super(NetCenteredRMSProp, self).__init__()
  25. self.rms_opt = P.ApplyCenteredRMSProp()
  26. self.lr = lr
  27. self.decay = decay
  28. self.momentum = momentum
  29. self.epsilon = epsilon
  30. def construct(self, var, g, mg, rms, mom):
  31. return self.rms_opt(var, mg, rms, mom, g, self.lr, self.decay, self.momentum, self.epsilon)
  32. class NetRMSProp(nn.Cell):
  33. def __init__(self, lr, decay, momentum, epsilon):
  34. super(NetRMSProp, self).__init__()
  35. self.lr = lr
  36. self.decay = decay
  37. self.momentum = momentum
  38. self.epsilon = epsilon
  39. self.rms_opt = P.ApplyRMSProp()
  40. def construct(self, var, g, mg, rms, mom):
  41. return self.rms_opt(var, rms, mom, self.lr, g, self.decay, self.momentum, self.epsilon)
  42. def rmsprop_numpy(variable, gradients, mean_square, moment,
  43. learning_rate, decay, momentum, epsilon):
  44. mean_square = mean_square * decay + (1.0 - decay) * gradients * gradients
  45. moment = momentum * moment + learning_rate / np.sqrt(mean_square + epsilon) * gradients
  46. variable = variable - moment
  47. def rmspropcented_numpy(variable, gradients, mean_gradients, mean_square, moment,
  48. learning_rate, decay, momentum, epsilon):
  49. mean_gradients = mean_gradients * decay + (1.0 - decay) * gradients
  50. mean_square = mean_square * decay + (1.0 - decay) * gradients * gradients
  51. moment = momentum * moment + learning_rate / np.sqrt(
  52. mean_square - mean_gradients * mean_gradients + epsilon) * gradients
  53. variable = variable - moment
  54. @pytest.mark.level0
  55. @pytest.mark.platform_x86_gpu_training
  56. @pytest.mark.env_onecard
  57. def test_rmsprop():
  58. learning_rate, decay, momentum, epsilon, centered = [0.5, 0.8, 0.9, 1e-3, True]
  59. variable_np = np.array([1.0, 2.0], dtype=np.float32)
  60. gradients_np = np.array([0.1, 0.2], dtype=np.float32)
  61. mean_gradients_np = np.array([0.0, 0.0], dtype=np.float32)
  62. mean_square_np = np.array([epsilon, epsilon], dtype=np.float32)
  63. moment_np = np.array([0.0, 0.0], dtype=np.float32)
  64. variable_ms = Tensor(variable_np)
  65. gradients_ms = Tensor(gradients_np)
  66. mean_gradients_ms = Tensor(mean_gradients_np)
  67. mean_square_ms = Tensor(mean_square_np)
  68. moment_ms = Tensor(moment_np)
  69. if centered:
  70. rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np,
  71. learning_rate, decay, momentum, epsilon)
  72. net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon)
  73. _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)
  74. else:
  75. rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np,
  76. learning_rate, decay, momentum, epsilon)
  77. net = NetRMSProp(learning_rate, decay, momentum, epsilon)
  78. _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)
  79. error = np.ones(shape=variable_np.shape) * 10e-6
  80. diff = variable_ms.asnumpy() - variable_np
  81. assert np.all(diff < error)
  82. error = np.ones(shape=gradients_np.shape) * 10e-6
  83. diff = gradients_ms.asnumpy() - gradients_np
  84. assert np.all(diff < error)
  85. error = np.ones(shape=mean_gradients_np.shape) * 10e-6
  86. diff = mean_gradients_ms.asnumpy() - mean_gradients_np
  87. assert np.all(diff < error)
  88. error = np.ones(shape=mean_square_np.shape) * 10e-6
  89. diff = mean_square_ms.asnumpy() - mean_square_np
  90. assert np.all(diff < error)
  91. error = np.ones(shape=moment_np.shape) * 10e-6
  92. diff = moment_ms.asnumpy() - moment_np
  93. assert np.all(diff < error)
  94. @pytest.mark.level0
  95. @pytest.mark.platform_x86_gpu_training
  96. @pytest.mark.env_onecard
  97. def test_rmspropcenter():
  98. learning_rate, decay, momentum, epsilon, centered = [0.1, 0.3, 0.9, 1.0, False]
  99. variable_np = np.array([1.0, 2.0], dtype=np.float32)
  100. gradients_np = np.array([0.1, 0.2], dtype=np.float32)
  101. mean_gradients_np = np.array([0.0, 0.0], dtype=np.float32)
  102. mean_square_np = np.array([epsilon, epsilon], dtype=np.float32)
  103. moment_np = np.array([0.0, 0.0], dtype=np.float32)
  104. variable_ms = Tensor(variable_np)
  105. gradients_ms = Tensor(gradients_np)
  106. mean_gradients_ms = Tensor(mean_gradients_np)
  107. mean_square_ms = Tensor(mean_square_np)
  108. moment_ms = Tensor(moment_np)
  109. if centered:
  110. rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np,
  111. learning_rate, decay, momentum, epsilon)
  112. net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon)
  113. _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)
  114. else:
  115. rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np,
  116. learning_rate, decay, momentum, epsilon)
  117. net = NetRMSProp(learning_rate, decay, momentum, epsilon)
  118. _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)
  119. error = np.ones(shape=variable_np.shape) * 10e-6
  120. diff = variable_ms.asnumpy() - variable_np
  121. assert np.all(diff < error)
  122. error = np.ones(shape=gradients_np.shape) * 10e-6
  123. diff = gradients_ms.asnumpy() - gradients_np
  124. assert np.all(diff < error)
  125. error = np.ones(shape=mean_gradients_np.shape) * 10e-6
  126. diff = mean_gradients_ms.asnumpy() - mean_gradients_np
  127. assert np.all(diff < error)
  128. error = np.ones(shape=mean_square_np.shape) * 10e-6
  129. diff = mean_square_ms.asnumpy() - mean_square_np
  130. assert np.all(diff < error)
  131. error = np.ones(shape=moment_np.shape) * 10e-6
  132. diff = moment_ms.asnumpy() - moment_np
  133. assert np.all(diff < error)