You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_adafactor.py 2.7 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import mindspore as ms
  16. from mindspore import context, Tensor, Parameter
  17. from mindspore.common.api import _cell_graph_executor
  18. from mindspore.nn import Cell, TrainOneStepCell
  19. from mindspore.nn.optim.adafactor import AdaFactor
  20. from mindspore.ops import operations as P
  21. class Net(Cell):
  22. def __init__(self, matmul_weight, add_weight, strategy1=None, strategy2=None):
  23. super().__init__()
  24. self.matmul = P.MatMul().shard(strategy1)
  25. self.add = P.BiasAdd().shard(strategy2)
  26. self.mul_weight = Parameter(matmul_weight, "w1")
  27. self.bias = Parameter(add_weight, "bias")
  28. def construct(self, x, b):
  29. out = self.matmul(x, self.mul_weight)
  30. out = self.add(out, self.bias)
  31. return out
  32. _x = Tensor(np.ones([64, 32]), dtype=ms.float32)
  33. _w1 = Tensor(np.ones([32, 32]), dtype=ms.float32)
  34. _w2 = Tensor(np.ones([32]), dtype=ms.float32)
  35. _b = Tensor(np.ones([64, 32]), dtype=ms.float32)
  36. def compile_net(net):
  37. scale_parameter = False
  38. relative_step = True
  39. warmup_init = True
  40. compression = True
  41. optimizer = AdaFactor(net.trainable_params(), learning_rate=None, weight_decay=0.9,
  42. scale_parameter=scale_parameter, relative_step=relative_step,
  43. warmup_init=warmup_init, compression=compression)
  44. train_net = TrainOneStepCell(net, optimizer)
  45. train_net.set_auto_parallel()
  46. train_net.set_train()
  47. _cell_graph_executor.compile(train_net, _x, _b)
  48. context.reset_auto_parallel_context()
  49. def test_opt_data_parallel():
  50. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  51. strategy1 = ((16, 1), (1, 1))
  52. strategy2 = ((16, 1), (1,))
  53. net = Net(_w1, _w2, strategy1, strategy2)
  54. compile_net(net)
  55. def test_opt_model_parallel():
  56. context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
  57. strategy1 = ((4, 2), (2, 2))
  58. strategy2 = ((4, 2), (2,))
  59. net = Net(_w1, _w2, strategy1, strategy2)
  60. compile_net(net)