You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_sparse_feature_bprop.py 4.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test sparse feature bprop """
  16. import numpy as np
  17. import mindspore as ms
  18. import mindspore.nn as nn
  19. from mindspore import context
  20. from mindspore.common.parameter import Parameter
  21. from mindspore.common.tensor import Tensor
  22. from mindspore.ops import composite as C, operations as P
  23. from mindspore.ops.operations.comm_ops import AllReduce
  24. from mindspore.common.api import _executor
  25. from mindspore.nn import TrainOneStepCell, Adam
  26. grad_all = C.GradOperation(get_all=True)
  27. class GradWrap(nn.Cell):
  28. def __init__(self, network):
  29. super(GradWrap, self).__init__()
  30. self.network = network
  31. def construct(self, x):
  32. return grad_all(self.network)(x)
  33. def test_bprop_with_sparse_feature_allreduce():
  34. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="hybrid_parallel")
  35. context.set_context(enable_sparse=True)
  36. class Net(nn.Cell):
  37. def __init__(self, axis=0, shape=None):
  38. super(Net, self).__init__()
  39. if shape is None:
  40. shape = [8, 8]
  41. self.all_reduce = AllReduce()
  42. self.gatherv2 = P.SparseGatherV2()
  43. self.index = Tensor(np.ones(shape), dtype=ms.int32)
  44. self.axis = axis
  45. def construct(self, x):
  46. out = self.all_reduce(x)
  47. out = self.gatherv2(out, self.index, self.axis)
  48. return out
  49. net = GradWrap(Net())
  50. x = Tensor(np.ones([64, 64]), dtype=ms.float32)
  51. _executor.compile(net, x)
  52. def test_bprop_with_sparse_feature_mirror():
  53. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
  54. context.set_context(enable_sparse=True)
  55. class Net(nn.Cell):
  56. def __init__(self, shape=None):
  57. super(Net, self).__init__()
  58. if shape is None:
  59. shape = [8, 8]
  60. self.index = Tensor(np.ones(shape), dtype=ms.int32)
  61. self.embeddinglookup = nn.EmbeddingLookup(64, 64, param_init='ones')
  62. self.embeddinglookup.embeddinglookup.shard(((1, 1), (8, 1)))
  63. def construct(self, x, b):
  64. out = self.embeddinglookup(self.index)
  65. return out
  66. _x = Tensor(np.ones([126, 64, 32]), dtype=ms.float32)
  67. _b = Tensor(np.ones([126, 64, 32]), dtype=ms.float32)
  68. def compile_net(net):
  69. optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9)
  70. train_net = TrainOneStepCell(net, optimizer)
  71. _executor.compile(train_net, _x, _b)
  72. net = Net()
  73. compile_net(net)
  74. def test_bprop_with_sparse_feature_dataparallel():
  75. context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="data_parallel")
  76. context.set_context(enable_sparse=True)
  77. class Net(nn.Cell):
  78. def __init__(self, axis=0, shape=None):
  79. super(Net, self).__init__()
  80. if shape is None:
  81. shape = [8, 8]
  82. weight = Tensor(np.ones([64, 64]), dtype=ms.float32)
  83. self.weight = Parameter(weight, "w")
  84. self.index = Tensor(np.ones(shape), dtype=ms.int32)
  85. self.axis = axis
  86. self.gatherv2 = P.SparseGatherV2()
  87. def construct(self, x, b):
  88. out = self.gatherv2(self.weight, self.index, self.axis)
  89. return out
  90. _x = Tensor(np.ones([126, 64, 32]), dtype=ms.float32)
  91. _b = Tensor(np.ones([126, 64, 32]), dtype=ms.float32)
  92. def compile_net(net):
  93. optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9)
  94. train_net = TrainOneStepCell(net, optimizer)
  95. _executor.compile(train_net, _x, _b)
  96. net = Net()
  97. compile_net(net)