From: @lijiaqi0612 Reviewed-by: @liangchenghui,@kingxian Signed-off-by: @liangchenghuitags/v1.1.0
| @@ -337,7 +337,7 @@ class Adam(Optimizer): | |||||
| """If the input value is set to "CPU", the parameters will be updated on the host using the Fused | """If the input value is set to "CPU", the parameters will be updated on the host using the Fused | ||||
| optimizer operation.""" | optimizer operation.""" | ||||
| if not isinstance(value, str): | if not isinstance(value, str): | ||||
| raise ValueError("The value must be str type, but got value type is {}".format(type(value))) | |||||
| raise TypeError("The value must be str type, but got value type is {}".format(type(value))) | |||||
| if value not in ('CPU', 'Ascend'): | if value not in ('CPU', 'Ascend'): | ||||
| raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) | raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) | ||||
| @@ -190,7 +190,7 @@ class FTRL(Optimizer): | |||||
| """If the input value is set to "CPU", the parameters will be updated on the host using the Fused | """If the input value is set to "CPU", the parameters will be updated on the host using the Fused | ||||
| optimizer operation.""" | optimizer operation.""" | ||||
| if not isinstance(value, str): | if not isinstance(value, str): | ||||
| raise ValueError("The value must be str type, but got value type is {}".format(type(value))) | |||||
| raise TypeError("The value must be str type, but got value type is {}".format(type(value))) | |||||
| if value not in ('CPU', 'Ascend'): | if value not in ('CPU', 'Ascend'): | ||||
| raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) | raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) | ||||
| @@ -255,7 +255,7 @@ class LazyAdam(Optimizer): | |||||
| """If the input value is set to "CPU", the parameters will be updated on the host using the Fused | """If the input value is set to "CPU", the parameters will be updated on the host using the Fused | ||||
| optimizer operation.""" | optimizer operation.""" | ||||
| if not isinstance(value, str): | if not isinstance(value, str): | ||||
| raise ValueError("The value must be str type, but got value type is {}".format(type(value))) | |||||
| raise TypeError("The value must be str type, but got value type is {}".format(type(value))) | |||||
| if value not in ('CPU', 'Ascend'): | if value not in ('CPU', 'Ascend'): | ||||
| raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) | raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) | ||||
| @@ -159,7 +159,7 @@ class ProximalAdagrad(Optimizer): | |||||
| """If the input value is set to "CPU", the parameters will be updated on the host using the Fused | """If the input value is set to "CPU", the parameters will be updated on the host using the Fused | ||||
| optimizer operation.""" | optimizer operation.""" | ||||
| if not isinstance(value, str): | if not isinstance(value, str): | ||||
| raise ValueError("The value must be str type, but got value type is {}".format(type(value))) | |||||
| raise TypeError("The value must be str type, but got value type is {}".format(type(value))) | |||||
| if value not in ('CPU', 'Ascend'): | if value not in ('CPU', 'Ascend'): | ||||
| raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) | raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) | ||||
| @@ -79,13 +79,13 @@ class DynamicLossScaleUpdateCell(Cell): | |||||
| >>> net_with_loss = Net() | >>> net_with_loss = Net() | ||||
| >>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> manager = nn.DynamicLossScaleUpdateCell(loss_scale_value=2**12, scale_factor=2, scale_window=1000) | >>> manager = nn.DynamicLossScaleUpdateCell(loss_scale_value=2**12, scale_factor=2, scale_window=1000) | ||||
| >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=manager) | |||||
| >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager) | |||||
| >>> train_network.set_train() | >>> train_network.set_train() | ||||
| >>> | >>> | ||||
| >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) | >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) | >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) | ||||
| >>> output = train_network(inputs, label, scaling_sens) | |||||
| >>> output = train_network(inputs, label, scale_sense=scaling_sens) | |||||
| """ | """ | ||||
| def __init__(self, | def __init__(self, | ||||
| @@ -145,13 +145,13 @@ class FixedLossScaleUpdateCell(Cell): | |||||
| >>> net_with_loss = Net() | >>> net_with_loss = Net() | ||||
| >>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> manager = nn.FixedLossScaleUpdateCell(loss_scale_value=2**12) | >>> manager = nn.FixedLossScaleUpdateCell(loss_scale_value=2**12) | ||||
| >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=manager) | |||||
| >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager) | |||||
| >>> train_network.set_train() | >>> train_network.set_train() | ||||
| >>> | >>> | ||||
| >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) | ||||
| >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) | >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) | ||||
| >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) | >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) | ||||
| >>> output = train_network(inputs, label, scaling_sens) | |||||
| >>> output = train_network(inputs, label, scale_sense=scaling_sens) | |||||
| """ | """ | ||||
| def __init__(self, loss_scale_value): | def __init__(self, loss_scale_value): | ||||