You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_rmsprop.py 7.1 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.ops import operations as P
  21. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  22. class NetCenteredRMSProp(nn.Cell):
  23. def __init__(self, lr, decay, momentum, epsilon):
  24. super(NetCenteredRMSProp, self).__init__()
  25. self.rms_opt = P.ApplyCenteredRMSProp()
  26. self.lr = lr
  27. self.decay = decay
  28. self.momentum = momentum
  29. self.epsilon = epsilon
  30. def construct(self, var, g, mg, rms, mom):
  31. return self.rms_opt(var, mg, rms, mom, g, self.lr, self.decay, self.momentum, self.epsilon)
  32. class NetRMSProp(nn.Cell):
  33. def __init__(self, lr, decay, momentum, epsilon):
  34. super(NetRMSProp, self).__init__()
  35. self.lr = lr
  36. self.decay = decay
  37. self.momentum = momentum
  38. self.epsilon = epsilon
  39. self.rms_opt = P.ApplyRMSProp()
  40. def construct(self, var, g, mg, rms, mom):
  41. return self.rms_opt(var, rms, mom, self.lr, g, self.decay, self.momentum, self.epsilon)
  42. def rmsprop_numpy(variable, gradients, mean_square, moment,
  43. learning_rate, decay, momentum, epsilon):
  44. mean_square = mean_square * decay + (1.0 - decay) * gradients * gradients
  45. moment = momentum * moment + learning_rate / np.sqrt(mean_square + epsilon) * gradients
  46. variable = variable - moment
  47. return variable, gradients, mean_square, moment
  48. def rmspropcented_numpy(variable, gradients, mean_gradients, mean_square, moment,
  49. learning_rate, decay, momentum, epsilon):
  50. mean_gradients = mean_gradients * decay + (1.0 - decay) * gradients
  51. mean_square = mean_square * decay + (1.0 - decay) * gradients * gradients
  52. moment = momentum * moment + learning_rate / np.sqrt(
  53. mean_square - mean_gradients * mean_gradients + epsilon) * gradients
  54. variable = variable - moment
  55. return variable, gradients, mean_gradients, mean_square, moment
  56. @pytest.mark.level0
  57. @pytest.mark.platform_x86_gpu_training
  58. @pytest.mark.env_onecard
  59. def test_rmsprop():
  60. learning_rate, decay, momentum, epsilon, centered = [0.5, 0.8, 0.9, 1e-3, True]
  61. variable_np = np.array([1.0, 2.0], dtype=np.float32)
  62. gradients_np = np.array([0.1, 0.2], dtype=np.float32)
  63. mean_gradients_np = np.array([0.0, 0.0], dtype=np.float32)
  64. mean_square_np = np.array([epsilon, epsilon], dtype=np.float32)
  65. moment_np = np.array([0.0, 0.0], dtype=np.float32)
  66. variable_ms = Tensor(variable_np)
  67. gradients_ms = Tensor(gradients_np)
  68. mean_gradients_ms = Tensor(mean_gradients_np)
  69. mean_square_ms = Tensor(mean_square_np)
  70. moment_ms = Tensor(moment_np)
  71. if centered:
  72. variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np = \
  73. rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np,
  74. learning_rate, decay, momentum, epsilon)
  75. net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon)
  76. _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)
  77. else:
  78. variable_np, gradients_np, mean_square_np, moment_np = \
  79. rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np,
  80. learning_rate, decay, momentum, epsilon)
  81. net = NetRMSProp(learning_rate, decay, momentum, epsilon)
  82. _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)
  83. error = np.ones(shape=variable_np.shape) * 10e-6
  84. diff = variable_ms.asnumpy() - variable_np
  85. assert np.all(diff < error)
  86. error = np.ones(shape=gradients_np.shape) * 10e-6
  87. diff = gradients_ms.asnumpy() - gradients_np
  88. assert np.all(diff < error)
  89. error = np.ones(shape=mean_gradients_np.shape) * 10e-6
  90. diff = mean_gradients_ms.asnumpy() - mean_gradients_np
  91. assert np.all(diff < error)
  92. error = np.ones(shape=mean_square_np.shape) * 10e-6
  93. diff = mean_square_ms.asnumpy() - mean_square_np
  94. assert np.all(diff < error)
  95. error = np.ones(shape=moment_np.shape) * 10e-6
  96. diff = moment_ms.asnumpy() - moment_np
  97. assert np.all(diff < error)
  98. @pytest.mark.level0
  99. @pytest.mark.platform_x86_gpu_training
  100. @pytest.mark.env_onecard
  101. def test_rmspropcenter():
  102. learning_rate, decay, momentum, epsilon, centered = [0.1, 0.3, 0.9, 1.0, False]
  103. variable_np = np.array([1.0, 2.0], dtype=np.float32)
  104. gradients_np = np.array([0.1, 0.2], dtype=np.float32)
  105. mean_gradients_np = np.array([0.0, 0.0], dtype=np.float32)
  106. mean_square_np = np.array([epsilon, epsilon], dtype=np.float32)
  107. moment_np = np.array([0.0, 0.0], dtype=np.float32)
  108. variable_ms = Tensor(variable_np)
  109. gradients_ms = Tensor(gradients_np)
  110. mean_gradients_ms = Tensor(mean_gradients_np)
  111. mean_square_ms = Tensor(mean_square_np)
  112. moment_ms = Tensor(moment_np)
  113. if centered:
  114. variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np = \
  115. rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np,
  116. learning_rate, decay, momentum, epsilon)
  117. net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon)
  118. _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)
  119. else:
  120. variable_np, gradients_np, mean_square_np, moment_np = \
  121. rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np,
  122. learning_rate, decay, momentum, epsilon)
  123. net = NetRMSProp(learning_rate, decay, momentum, epsilon)
  124. _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)
  125. error = np.ones(shape=variable_np.shape) * 10e-6
  126. diff = variable_ms.asnumpy() - variable_np
  127. assert np.all(diff < error)
  128. error = np.ones(shape=gradients_np.shape) * 10e-6
  129. diff = gradients_ms.asnumpy() - gradients_np
  130. assert np.all(diff < error)
  131. error = np.ones(shape=mean_gradients_np.shape) * 10e-6
  132. diff = mean_gradients_ms.asnumpy() - mean_gradients_np
  133. assert np.all(diff < error)
  134. error = np.ones(shape=mean_square_np.shape) * 10e-6
  135. diff = mean_square_ms.asnumpy() - mean_square_np
  136. assert np.all(diff < error)
  137. error = np.ones(shape=moment_np.shape) * 10e-6
  138. diff = moment_ms.asnumpy() - moment_np
  139. assert np.all(diff < error)