You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

mindspore.nn.DistributedGradReducer.rst 4.1 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. mindspore.nn.DistributedGradReducer
  2. ===================================
  3. .. py:class:: mindspore.nn.DistributedGradReducer(parameters, mean=True, degree=None, fusion_type=1, group=GlobalComm.WORLD_COMM_GROUP)
  4. 分布式优化器。
  5. 用于数据并行模式中,对所有卡的梯度利用AllReduce进行聚合。
  6. **参数:**
  7. - **parameters** (list) - 需要更新的参数。
  8. - **mean** (bool) - 当mean为True时,对AllReduce之后的梯度求均值。默认值:False。
  9. - **degree** (int) - 平均系数,通常等于设备编号。默认值:None。
  10. - **fusion_type** (int) - AllReduce算子的融合类型。默认值:1。
  11. **异常:**
  12. **ValueError**:如果degree不是int或小于0。
  13. **支持平台:**
  14. ``Ascend`` ``GPU``
  15. **样例:**
  16. >>> #此示例应与多个进程一起运行。
  17. >>> #请参考Mindpore.cn上的“教程>分布式训练”。
  18. >>> import numpy as np
  19. >>> from mindspore.communication import init
  20. >>> from mindspore import ops
  21. >>> from mindspore import context
  22. >>> from mindspore.context import ParallelMode
  23. >>> from mindspore import Parameter, Tensor
  24. >>> from mindspore import nn
  25. >>>
  26. >>> context.set_context(mode=context.GRAPH_MODE)
  27. >>> init()
  28. >>> context.reset_auto_parallel_context()
  29. >>> context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
  30. >>>
  31. >>> class TrainingWrapper(nn.Cell):
  32. ... def __init__(self, network, optimizer, sens=1.0):
  33. ... super(TrainingWrapper, self).__init__(auto_prefix=False)
  34. ... self.network = network
  35. ... self.network.add_flags(defer_inline=True)
  36. ... self.weights = optimizer.parameters
  37. ... self.optimizer = optimizer
  38. ... self.grad = ops.GradOperation(get_by_list=True, sens_param=True)
  39. ... self.sens = sens
  40. ... self.reducer_flag = False
  41. ... self.grad_reducer = None
  42. ... self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
  43. ... if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
  44. ... self.reducer_flag = True
  45. ... if self.reducer_flag:
  46. ... mean = context.get_auto_parallel_context("gradients_mean")
  47. ... degree = context.get_auto_parallel_context("device_num")
  48. ... self.grad_reducer = nn.DistributedGradReducer(optimizer.parameters, mean, degree)
  49. ...
  50. ... def construct(self, *args):
  51. ... weights = self.weights
  52. ... loss = self.network(*args)
  53. ... sens = ops.Fill()(ops.DType()(loss), ops.Shape()(loss), self.sens)
  54. ... grads = self.grad(self.network, weights)(*args, sens)
  55. ... if self.reducer_flag:
  56. ... # apply grad reducer on grads
  57. ... grads = self.grad_reducer(grads)
  58. ... return ops.Depend(loss, self.optimizer(grads))
  59. >>>
  60. >>> class Net(nn.Cell):
  61. ... def __init__(self, in_features, out_features):
  62. ... super(Net, self).__init__()
  63. ... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
  64. ... name='weight')
  65. ... self.matmul = ops.MatMul()
  66. ...
  67. ... def construct(self, x):
  68. ... output = self.matmul(x, self.weight)
  69. ... return output
  70. >>>
  71. >>> size, in_features, out_features = 16, 16, 10
  72. >>> network = Net(in_features, out_features)
  73. >>> loss = nn.MSELoss()
  74. >>> net_with_loss = nn.WithLossCell(network, loss)
  75. >>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9)
  76. >>> train_cell = TrainingWrapper(net_with_loss, optimizer)
  77. >>> inputs = Tensor(np.ones([size, in_features]).astype(np.float32))
  78. >>> label = Tensor(np.zeros([size, out_features]).astype(np.float32))
  79. >>> grads = train_cell(inputs, label)
  80. >>> print(grads)
  81. 256.0