Merge pull request !7554 from chenzhongming/zomi_mastertags/v1.1.0
| @@ -14,7 +14,7 @@ | |||
| # ============================================================================ | |||
| """.. MindSpore package.""" | |||
| from ._version_check import check_version_and_env_config | |||
| from ._check_version import check_version_and_env_config | |||
| from . import common, train | |||
| from .common import * | |||
| from .ops import _op_impl | |||
| @@ -505,7 +505,7 @@ class Validator: | |||
| reduce(_check_types_same, map(_check_argument_type, args.items())) | |||
| @staticmethod | |||
| def check_value_type(arg_name, arg_value, valid_types, prim_name): | |||
| def check_value_type(arg_name, arg_value, valid_types, prim_name=None): | |||
| """Checks whether a value is instance of some types.""" | |||
| valid_types = valid_types if isinstance(valid_types, Iterable) else (valid_types,) | |||
| @@ -972,7 +972,7 @@ void ExportGraph(const std::string &file_name, const std::string &, const std::s | |||
| #if (ENABLE_GE || ENABLE_D) | |||
| ExportDFGraph(file_name, phase); | |||
| #else | |||
| MS_EXCEPTION(ValueError) << "Only MindSpore with Ascend backend support exporting file in 'AIR' format."; | |||
| MS_EXCEPTION(ValueError) << "Only support export file in 'AIR' format with Ascend backend."; | |||
| #endif | |||
| } | |||
| @@ -514,8 +514,9 @@ py::object ExecDFGraph(const std::map<std::string, ExecutorInfoPtr> &info, const | |||
| MS_LOG(EXCEPTION) << "Exec graph failed"; | |||
| } | |||
| } | |||
| void ExportDFGraph(const std::string &file_name, const std::string &phase) { | |||
| MS_LOG(DEBUG) << "ExportGraph Begin"; | |||
| MS_LOG(DEBUG) << "Export graph begin."; | |||
| transform::DfGraphWrapperPtr wrap_ptr = DfGraphManager::GetInstance().GetGraphByName(phase); | |||
| if (wrap_ptr == nullptr) { | |||
| MS_LOG(ERROR) << "Get graph form DfGraphManager failed!"; | |||
| @@ -524,13 +525,12 @@ void ExportDFGraph(const std::string &file_name, const std::string &phase) { | |||
| transform::DfGraphPtr ge_graph = wrap_ptr->graph_ptr_; | |||
| if (nullptr == ge_graph) { | |||
| MS_LOG(ERROR) << "The export graph is null"; | |||
| MS_LOG(ERROR) << "Graph is null!"; | |||
| return; | |||
| } | |||
| (void)ge_graph->SaveToFile(file_name); | |||
| MS_LOG(DEBUG) << "ExportGraph End"; | |||
| MS_LOG(DEBUG) << "Export graph end."; | |||
| } | |||
| } // namespace pipeline | |||
| } // namespace mindspore | |||
| @@ -41,9 +41,9 @@ class Cell(Cell_): | |||
| Note: | |||
| In general, the autograd algorithm will automatically generate the implementation of the gradient function, | |||
| but if bprop method is implemented, the gradient function | |||
| will be replaced by the bprop. The bprop implementation will receive a Tensor `dout` containing the gradient | |||
| of the loss w.r.t. the output, and a Tensor `out` containing the forward result. The bprop needs to compute the | |||
| but if back-propagation(bprop) method is implemented, the gradient function will be replaced by the bprop. | |||
| The bprop implementation will receive a Tensor `dout` containing the gradient of the loss w.r.t. | |||
| the output, and a Tensor `out` containing the forward result. The bprop needs to compute the | |||
| gradient of the loss w.r.t. the inputs, gradient of the loss w.r.t. Parameter variables are not supported | |||
| currently. | |||
| @@ -117,8 +117,7 @@ class Cell(Cell_): | |||
| @property | |||
| def _cell_tag(self): | |||
| # `<class 'xxxxxxx'>` | |||
| # -> `xxxxxxx` | |||
| # `<class 'xxxxxxx'>` to `xxxxxxx` | |||
| return str(self.__class__)[8:-2] | |||
| @already_run.setter | |||
| @@ -449,7 +448,7 @@ class Cell(Cell_): | |||
| for key in params: | |||
| tensor = params[key].data | |||
| if key not in self.parameter_layout_dict: | |||
| logger.info("layout dict does not contain the key %s", key) | |||
| logger.info("layout dict does not contain the key %s.", key) | |||
| continue | |||
| if params[key].sliced: | |||
| logger.debug("Param %s is already sliced.", key) | |||
| @@ -458,8 +457,7 @@ class Cell(Cell_): | |||
| new_tensor = _load_tensor_by_layout(tensor, layout) | |||
| params[key].set_data(new_tensor, True) | |||
| else: | |||
| raise TypeError('Parameters need OrderedDict type, but got {}'. | |||
| format(type(params))) | |||
| raise TypeError("Parameters need OrderedDict type, but got {}.".format(type(params))) | |||
| def _load_inputs(self, *inputs): | |||
| """ | |||
| @@ -480,7 +478,7 @@ class Cell(Cell_): | |||
| key = self._construct_inputs_names[i] | |||
| # if input is not used, self.parameter_layout_dict may not contain the key | |||
| if key not in self.parameter_layout_dict: | |||
| logger.warning("layout dict does not contain the key %s", key) | |||
| logger.warning("Layout dict does not contain the key %s.", key) | |||
| parallel_inputs_run.append(tensor) | |||
| else: | |||
| layout = self.parameter_layout_dict[key] | |||
| @@ -739,7 +737,7 @@ class Cell(Cell_): | |||
| """ | |||
| Returns all untrainable parameters. | |||
| Returns a list of all untrainable parmeters. | |||
| Returns a list of all untrainable parameters. | |||
| Args: | |||
| recurse (bool): Whether contains the untrainable parameters of subcells. Default: True. | |||
| @@ -16,7 +16,6 @@ | |||
| import math | |||
| from mindspore._checkparam import Validator as validator | |||
| from mindspore._checkparam import Rel | |||
| def piecewise_constant_lr(milestone, learning_rates): | |||
| @@ -44,8 +43,8 @@ def piecewise_constant_lr(milestone, learning_rates): | |||
| >>> piecewise_constant_lr(milestone, learning_rates) | |||
| [0.1, 0.1, 0.05, 0.05, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01] | |||
| """ | |||
| validator.check_value_type('milestone', milestone, (tuple, list), None) | |||
| validator.check_value_type('learning_rates', learning_rates, (tuple, list), None) | |||
| validator.check_value_type('milestone', milestone, (tuple, list)) | |||
| validator.check_value_type('learning_rates', learning_rates, (tuple, list)) | |||
| if len(milestone) != len(learning_rates): | |||
| raise ValueError('The size of `milestone` must be same with the size of `learning_rates`.') | |||
| @@ -70,7 +69,7 @@ def _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_e | |||
| validator.check_is_float(learning_rate, 'learning_rate') | |||
| validator.check_positive_float(decay_rate, 'decay_rate') | |||
| validator.check_is_float(decay_rate, 'decay_rate') | |||
| validator.check_value_type('is_stair', is_stair, [bool], None) | |||
| validator.check_value_type('is_stair', is_stair, [bool]) | |||
| def exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair=False): | |||
| @@ -309,7 +308,7 @@ def polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_e | |||
| validator.check_positive_int(total_step, 'total_step') | |||
| validator.check_positive_int(step_per_epoch, 'step_per_epoch') | |||
| validator.check_positive_int(decay_epoch, 'decay_epoch') | |||
| validator.check_value_type('update_decay_epoch', update_decay_epoch, [bool], None) | |||
| validator.check_value_type('update_decay_epoch', update_decay_epoch, [bool]) | |||
| origin_decay_epoch = decay_epoch | |||
| function = lambda x, y: (x, min(x, y)) | |||
| @@ -45,7 +45,7 @@ def op_info_register(op_info): | |||
| op_info_real = json.dumps(op_info) | |||
| else: | |||
| op_info_real = op_info | |||
| validator.check_value_type("op_info", op_info_real, [str], None) | |||
| validator.check_value_type("op_info", op_info_real, [str]) | |||
| op_lib = Oplib() | |||
| file_path = os.path.realpath(inspect.getfile(func)) | |||
| # keep the path custom ops implementation. | |||
| @@ -82,11 +82,11 @@ def _check_kwargs(key_words): | |||
| validator.check_type_name('cast_model_type', key_words['cast_model_type'], | |||
| [mstype.float16, mstype.float32], None) | |||
| if 'keep_batchnorm_fp32' in key_words: | |||
| validator.check_value_type('keep_batchnorm_fp32', key_words['keep_batchnorm_fp32'], bool, None) | |||
| validator.check_value_type('keep_batchnorm_fp32', key_words['keep_batchnorm_fp32'], bool) | |||
| if 'loss_scale_manager' in key_words: | |||
| loss_scale_manager = key_words['loss_scale_manager'] | |||
| if loss_scale_manager: | |||
| validator.check_value_type('loss_scale_manager', loss_scale_manager, LossScaleManager, None) | |||
| validator.check_value_type('loss_scale_manager', loss_scale_manager, LossScaleManager) | |||
| def _add_loss_network(network, loss_fn, cast_model_type): | |||
| @@ -104,7 +104,7 @@ def _add_loss_network(network, loss_fn, cast_model_type): | |||
| label = F.mixed_precision_cast(mstype.float32, label) | |||
| return self._loss_fn(F.mixed_precision_cast(mstype.float32, out), label) | |||
| validator.check_value_type('loss_fn', loss_fn, nn.Cell, None) | |||
| validator.check_value_type('loss_fn', loss_fn, nn.Cell) | |||
| if cast_model_type == mstype.float16: | |||
| network = WithLossCell(network, loss_fn) | |||
| else: | |||
| @@ -140,9 +140,9 @@ def build_train_network(network, optimizer, loss_fn=None, level='O0', **kwargs): | |||
| loss_scale_manager (Union[None, LossScaleManager]): If None, not scale the loss, or else | |||
| scale the loss by LossScaleManager. If set, overwrite the level setting. | |||
| """ | |||
| validator.check_value_type('network', network, nn.Cell, None) | |||
| validator.check_value_type('optimizer', optimizer, nn.Optimizer, None) | |||
| validator.check('level', level, "", ['O0', 'O2', 'O3', "auto"], Rel.IN, None) | |||
| validator.check_value_type('network', network, nn.Cell) | |||
| validator.check_value_type('optimizer', optimizer, nn.Optimizer) | |||
| validator.check('level', level, "", ['O0', 'O2', 'O3', "auto"], Rel.IN) | |||
| if level == "auto": | |||
| device_target = context.get_context('device_target') | |||
| @@ -19,7 +19,6 @@ python eval.py --data_path /YourDataPath --ckpt_path Your.ckpt | |||
| """ | |||
| import os | |||
| import ast | |||
| import argparse | |||
| import mindspore.nn as nn | |||
| from mindspore import context | |||
| @@ -38,8 +37,6 @@ if __name__ == "__main__": | |||
| help='path where the dataset is saved') | |||
| parser.add_argument('--ckpt_path', type=str, default="", help='if mode is test, must provide\ | |||
| path where the trained ckpt file') | |||
| parser.add_argument('--dataset_sink_mode', type=ast.literal_eval, | |||
| default=False, help='dataset_sink_mode is False or True') | |||
| args = parser.parse_args() | |||
| @@ -60,5 +57,5 @@ if __name__ == "__main__": | |||
| if ds_eval.get_dataset_size() == 0: | |||
| raise ValueError("Please check dataset size > 0 and batch_size <= dataset size") | |||
| acc = model.eval(ds_eval, dataset_sink_mode=args.dataset_sink_mode) | |||
| acc = model.eval(ds_eval) | |||
| print("============== {} ==============".format(acc)) | |||
| @@ -46,8 +46,12 @@ class LeNet5(nn.Cell): | |||
| def construct(self, x): | |||
| x = self.max_pool2d(self.relu(self.conv1(x))) | |||
| x = self.max_pool2d(self.relu(self.conv2(x))) | |||
| x = self.conv1(x) | |||
| x = self.relu(x) | |||
| x = self.max_pool2d(x) | |||
| x = self.conv2(x) | |||
| x = self.relu(x) | |||
| x = self.max_pool2d(x) | |||
| if not self.include_top: | |||
| return x | |||
| x = self.flatten(x) | |||
| @@ -19,7 +19,6 @@ python train.py --data_path /YourDataPath | |||
| """ | |||
| import os | |||
| import ast | |||
| import argparse | |||
| from src.config import mnist_cfg as cfg | |||
| from src.dataset import create_dataset | |||
| @@ -41,8 +40,6 @@ if __name__ == "__main__": | |||
| help='path where the dataset is saved') | |||
| parser.add_argument('--ckpt_path', type=str, default="./ckpt", help='if is test, must provide\ | |||
| path where the trained ckpt file') | |||
| parser.add_argument('--dataset_sink_mode', type=ast.literal_eval, default=True, | |||
| help='dataset_sink_mode is False or True') | |||
| args = parser.parse_args() | |||
| @@ -67,5 +64,4 @@ if __name__ == "__main__": | |||
| model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}, amp_level="O2") | |||
| print("============== Starting Training ==============") | |||
| model.train(cfg['epoch_size'], ds_train, callbacks=[time_cb, ckpoint_cb, LossMonitor()], | |||
| dataset_sink_mode=args.dataset_sink_mode) | |||
| model.train(cfg['epoch_size'], ds_train, callbacks=[time_cb, ckpoint_cb, LossMonitor()]) | |||
| @@ -232,7 +232,7 @@ def conv2d(x, weight, bias=None, stride=1, pad=0, | |||
| dilation=1, groups=1, padding_mode='zeros'): | |||
| """Convolution 2D.""" | |||
| # pylint: disable=unused-argument | |||
| validator.check_value_type('stride', stride, (int, tuple), None) | |||
| validator.check_value_type('stride', stride, (int, tuple)) | |||
| if isinstance(stride, int): | |||
| stride = (stride, stride) | |||
| elif len(stride) == 4: | |||
| @@ -244,7 +244,7 @@ def conv2d(x, weight, bias=None, stride=1, pad=0, | |||
| f"a tuple of two positive int numbers, but got {stride}") | |||
| stride_h = stride[0] | |||
| stride_w = stride[1] | |||
| validator.check_value_type('dilation', dilation, (int, tuple), None) | |||
| validator.check_value_type('dilation', dilation, (int, tuple)) | |||
| if isinstance(dilation, int): | |||
| dilation = (dilation, dilation) | |||
| elif len(dilation) == 4: | |||