|
|
|
@@ -79,13 +79,13 @@ class DynamicLossScaleUpdateCell(Cell): |
|
|
|
>>> net_with_loss = Net() |
|
|
|
>>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9) |
|
|
|
>>> manager = nn.DynamicLossScaleUpdateCell(loss_scale_value=2**12, scale_factor=2, scale_window=1000) |
|
|
|
>>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=manager) |
|
|
|
>>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager) |
|
|
|
>>> train_network.set_train() |
|
|
|
>>> |
|
|
|
>>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) |
|
|
|
>>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) |
|
|
|
>>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) |
|
|
|
>>> output = train_network(inputs, label, scaling_sens) |
|
|
|
>>> output = train_network(inputs, label, scale_sense=scaling_sens) |
|
|
|
""" |
|
|
|
|
|
|
|
def __init__(self, |
|
|
|
@@ -145,13 +145,13 @@ class FixedLossScaleUpdateCell(Cell): |
|
|
|
>>> net_with_loss = Net() |
|
|
|
>>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9) |
|
|
|
>>> manager = nn.FixedLossScaleUpdateCell(loss_scale_value=2**12) |
|
|
|
>>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=manager) |
|
|
|
>>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager) |
|
|
|
>>> train_network.set_train() |
|
|
|
>>> |
|
|
|
>>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) |
|
|
|
>>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) |
|
|
|
>>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) |
|
|
|
>>> output = train_network(inputs, label, scaling_sens) |
|
|
|
>>> output = train_network(inputs, label, scale_sense=scaling_sens) |
|
|
|
""" |
|
|
|
|
|
|
|
def __init__(self, loss_scale_value): |
|
|
|
|