| @@ -181,7 +181,7 @@ def pytype_to_dtype(obj): | |||
| return obj | |||
| if isinstance(obj, type) and obj in _simple_types: | |||
| return _simple_types[obj] | |||
| raise NotImplementedError(f"Unsupported type {obj} for `pytype_to_dtype`.") | |||
| raise NotImplementedError(f"Unsupported convert python type {obj} to MindSpore type.") | |||
| def get_py_obj_dtype(obj): | |||
| @@ -197,10 +197,12 @@ def _calculate_gain(nonlinearity, param=None): | |||
| # True/False are instances of int, hence check above | |||
| negative_slope = param | |||
| else: | |||
| raise ValueError("negative_slope {} not a valid number".format(param)) | |||
| raise ValueError("negative_slope {} is not a valid number. " | |||
| "It should be bool, int, or float type.".format(param)) | |||
| res = math.sqrt(2.0 / (1 + negative_slope ** 2)) | |||
| else: | |||
| raise ValueError("Unsupported nonlinearity {}".format(nonlinearity)) | |||
| raise ValueError("Unsupported nonlinearity {}, the argument 'nonlinearity' should be one of " | |||
| "'sigmoid', 'tanh', 'relu' or 'leaky_relu'.".format(nonlinearity)) | |||
| return res | |||
| @@ -495,7 +497,8 @@ def initializer(init, shape=None, dtype=mstype.float32): | |||
| >>> tensor3 = initializer(0, [1, 2, 3], mindspore.float32) | |||
| """ | |||
| if not isinstance(init, (Tensor, numbers.Number, str, Initializer)): | |||
| raise TypeError("Unsupported init type '{}'.".format(type(init))) | |||
| raise TypeError("Unsupported init type '{}', init should be 'Tensor', 'number', 'str' " | |||
| "or 'initializer' type".format(type(init))) | |||
| if isinstance(init, Tensor): | |||
| init_shape = init.shape | |||
| @@ -512,7 +515,7 @@ def initializer(init, shape=None, dtype=mstype.float32): | |||
| for value in shape if shape is not None else (): | |||
| if not isinstance(value, int) or value <= 0: | |||
| raise ValueError(f"shape is invalid, shape value must be positive integer, shape:{shape}") | |||
| raise ValueError(f"Shape is invalid, the value of shape must be positive integer, but got shape:{shape}") | |||
| if isinstance(init, str): | |||
| init = _INITIALIZER_ALIAS[init.lower()]() | |||
| @@ -50,10 +50,10 @@ def init_to_value(init): | |||
| return 0.0 | |||
| if init == 'ones': | |||
| return 1.0 | |||
| raise ValueError("init should be one of values in 'zeros', 'ones'.") | |||
| raise ValueError("The 'init' argument should be one of values in 'zeros', 'ones'.") | |||
| if isinstance(init, numbers.Number): | |||
| return float(init) | |||
| raise ValueError("init should be number or string") | |||
| raise ValueError("The 'init' argument should be number or string.") | |||
| class Parameter(Tensor_): | |||
| @@ -161,8 +161,8 @@ class Parameter(Tensor_): | |||
| elif isinstance(default_input, (np.ndarray, list)): | |||
| Tensor_.__init__(self, default_input) | |||
| else: | |||
| raise TypeError(f"Parameter input must be [`Tensor`, `int`, `float`, `numpy.ndarray`, `list`]." | |||
| f"But with type {type(default_input)}.") | |||
| raise TypeError(f"The 'default_input' argument must be [`Tensor`, `int`, `float`, `numpy.ndarray`, `list`]." | |||
| f"But got type {type(default_input)}.") | |||
| def __deepcopy__(self, memodict): | |||
| new_obj = Parameter(self) | |||
| @@ -224,7 +224,8 @@ class Parameter(Tensor_): | |||
| if not(_is_role_worker() or _is_role_pserver() or _is_role_sched()): | |||
| raise RuntimeError("Must complete following two steps before calling set_param_ps: \ | |||
| 1. set_ps_context(enable_ps=True) \ | |||
| 2. export MS_ROLE environment variable.") | |||
| 2. export MS_ROLE environment variable \ | |||
| Please refer to the official website for detailed usage.") | |||
| if init_in_server and (not self.name.endswith("embedding_table")): | |||
| raise RuntimeError("Can not initialize parameter '{}' in server, only parameters of " | |||
| "sparse operator support initialization in server.".format(self.name)) | |||
| @@ -283,7 +284,7 @@ class Parameter(Tensor_): | |||
| raise ValueError("The length of the '{}' name should be less than {}.". | |||
| format(name_, PARAMETER_NAME_PREFIX_MAX_LEN)) | |||
| else: | |||
| raise ValueError("The type of the name should be `str` or `None`.") | |||
| raise ValueError("The type of the parameter's name should be `str` or `None`.") | |||
| if _is_role_worker() and self.cache_enable: | |||
| if len(self.shape) != 2: | |||
| @@ -415,7 +416,7 @@ class Parameter(Tensor_): | |||
| @layerwise_parallel.setter | |||
| def layerwise_parallel(self, value=True): | |||
| if not isinstance(value, bool): | |||
| raise TypeError("`layerwise_parallel` parameter must be bool type") | |||
| raise TypeError("The argument `layerwise_parallel` must be bool type.") | |||
| self.param_info.layerwise_parallel = value | |||
| @property | |||
| @@ -429,7 +430,7 @@ class Parameter(Tensor_): | |||
| @parallel_optimizer.setter | |||
| def parallel_optimizer(self, value=True): | |||
| if not isinstance(value, bool): | |||
| raise TypeError("`parallel_optimizer` parameter must be bool type") | |||
| raise TypeError("The argument `parallel_optimizer` must be bool type.") | |||
| self.param_info.parallel_optimizer = value | |||
| @property | |||
| @@ -440,7 +441,7 @@ class Parameter(Tensor_): | |||
| @cache_enable.setter | |||
| def cache_enable(self, value=True): | |||
| if not isinstance(value, bool): | |||
| raise TypeError("`cache_enable` parameter must be bool type") | |||
| raise TypeError("The argument `cache_enable` must be bool type.") | |||
| self.param_info.cache_enable = value | |||
| @property | |||
| @@ -451,7 +452,7 @@ class Parameter(Tensor_): | |||
| @cache_shape.setter | |||
| def cache_shape(self, value): | |||
| if not isinstance(value, (tuple, list)): | |||
| raise TypeError("`cache_shape` parameter must be tuple or list type") | |||
| raise TypeError("The argument `cache_shape` must be tuple or list type.") | |||
| self.param_info.cache_shape = value | |||
| @property | |||
| @@ -462,7 +463,7 @@ class Parameter(Tensor_): | |||
| @requires_grad.setter | |||
| def requires_grad(self, value=True): | |||
| if not isinstance(value, bool): | |||
| raise TypeError("`requires_grad` parameter must be bool type") | |||
| raise TypeError("The argument `requires_grad` must be bool type") | |||
| self.param_info.requires_grad = value | |||
| @property | |||
| @@ -514,10 +515,8 @@ class Parameter(Tensor_): | |||
| current_tensor_is_init = isinstance(self, Tensor) and not self.has_init | |||
| if incoming_tensor_is_init and not current_tensor_is_init: | |||
| raise TypeError("Parameter is a `Tensor` and not initialized, `data` for `set_data`" | |||
| "should be a Tensor. If you want to update it by Tensor, call method" | |||
| "`init_parameters_data` of `Cell` to init and replace all the Parameter of" | |||
| "network, then call this method.") | |||
| raise TypeError("The original tensor data is initialized, but the argument 'data' is not initialized." | |||
| "Please initialize 'data' before call this method.") | |||
| if tuple(self.shape) != tuple(data.shape): | |||
| # If Slice create Parameter shape can be change. | |||
| if not slice_shape: | |||
| @@ -540,8 +539,6 @@ class Parameter(Tensor_): | |||
| self.init_mode = data | |||
| elif incoming_tensor_is_init or current_tensor_is_init: | |||
| self._update_tensor_data(data) | |||
| else: | |||
| raise ValueError(f"Not support to update the Parameter by {data}") | |||
| self.sliced = slice_shape | |||
| return self | |||
| @@ -143,7 +143,7 @@ def set_seed(seed): | |||
| >>> c2 = ops.uniform((1, 4), minval, maxval, seed=2) # still get C1 | |||
| """ | |||
| if not isinstance(seed, int): | |||
| raise TypeError("The seed must be type of int.") | |||
| raise TypeError("The argument 'seed' must be type of int.") | |||
| Validator.check_non_negative_int(seed, "seed", "global_seed") | |||
| import mindspore.dataset as de | |||
| np.random.seed(seed) | |||
| @@ -175,7 +175,8 @@ class CallbackManager(Callback): | |||
| elif isinstance(callbacks, list): | |||
| for cb in callbacks: | |||
| if not isinstance(cb, Callback): | |||
| raise TypeError("The 'callbacks' contains not-a-Callback item.") | |||
| raise TypeError("When the 'callbacks' is a list, the elements in " | |||
| "'callbacks' must be Callback functions.") | |||
| self._callbacks.append(cb) | |||
| elif callbacks is not None: | |||
| raise TypeError("The 'callbacks' is not a Callback or a list of Callback.") | |||
| @@ -249,7 +250,7 @@ class RunContext: | |||
| """ | |||
| def __init__(self, original_args): | |||
| if not isinstance(original_args, dict): | |||
| raise TypeError("The arg of RunContext should be dict type.") | |||
| raise TypeError("The argument 'original_args' of RunContext should be dict type.") | |||
| self._original_args = original_args | |||
| self._stop_requested = False | |||
| @@ -160,7 +160,8 @@ class CheckpointConfig: | |||
| if not save_checkpoint_steps and not save_checkpoint_seconds and \ | |||
| not keep_checkpoint_max and not keep_checkpoint_per_n_minutes: | |||
| raise ValueError("The input_param can't be all None or 0") | |||
| raise ValueError("The input arguments 'save_checkpoint_steps', 'save_checkpoint_seconds', " | |||
| "'keep_checkpoint_max' and 'keep_checkpoint_per_n_minutes' can't be all None or 0.") | |||
| self._save_checkpoint_steps = save_checkpoint_steps | |||
| self._save_checkpoint_seconds = save_checkpoint_seconds | |||
| @@ -248,7 +249,7 @@ class CheckpointConfig: | |||
| if append_info is None or append_info == []: | |||
| return None | |||
| if not isinstance(append_info, list): | |||
| raise TypeError(f"The type of append_info must list, but got {str(type(append_info))}.") | |||
| raise TypeError(f"The type of 'append_info' must list, but got {str(type(append_info))}.") | |||
| handle_append_info = {} | |||
| if "epoch_num" in append_info: | |||
| handle_append_info["epoch_num"] = 0 | |||
| @@ -303,8 +304,9 @@ class ModelCheckpoint(Callback): | |||
| self._last_triggered_step = 0 | |||
| if not isinstance(prefix, str) or prefix.find('/') >= 0: | |||
| raise ValueError("Prefix {} for checkpoint file name invalid, " | |||
| "please check and correct it and then continue.".format(prefix)) | |||
| raise ValueError("'Prefix' {} for checkpoint file name is invalid, 'prefix' must be " | |||
| "str and does not contain '/', please check and correct it and then " | |||
| "continue".format(prefix)) | |||
| self._prefix = prefix | |||
| if directory is not None: | |||
| @@ -316,7 +318,7 @@ class ModelCheckpoint(Callback): | |||
| self._config = CheckpointConfig() | |||
| else: | |||
| if not isinstance(config, CheckpointConfig): | |||
| raise TypeError("config should be CheckpointConfig type.") | |||
| raise TypeError("The argument 'config' should be CheckpointConfig type.") | |||
| self._config = config | |||
| # get existing checkpoint files | |||
| @@ -39,7 +39,8 @@ class LossMonitor(Callback): | |||
| def __init__(self, per_print_times=1): | |||
| super(LossMonitor, self).__init__() | |||
| if not isinstance(per_print_times, int) or per_print_times < 0: | |||
| raise ValueError("print_step must be int and >= 0.") | |||
| raise ValueError("'Per_print_times' must be int and >= 0, " | |||
| "but got {}".format(per_print_times)) | |||
| self._per_print_times = per_print_times | |||
| def step_end(self, run_context): | |||
| @@ -138,7 +138,7 @@ def connect_network_with_dataset(network, dataset_helper): | |||
| dataset = dataset_iter.dataset | |||
| if isinstance(dataset_iter, _DatasetIterNormal): | |||
| raise RuntimeError("Dataset should be connected with network only in sink mode.") | |||
| raise RuntimeError("The API 'connect_network_with_dataset' should be called in dataset sink mode.") | |||
| ms_role = os.getenv("MS_ROLE") | |||
| if ms_role in ("MS_PSERVER", "MS_SCHED"): | |||
| @@ -220,7 +220,7 @@ class DatasetHelper: | |||
| dataset_sink_mode = Validator.check_bool(dataset_sink_mode) | |||
| Validator.check_is_int(sink_size) | |||
| if sink_size < -1 or sink_size == 0: | |||
| raise ValueError("The sink_size must be -1 or positive, but got sink_size {}.".format(sink_size)) | |||
| raise ValueError("The 'sink_size' must be -1 or positive, but got sink_size {}.".format(sink_size)) | |||
| if sink_size == -1: | |||
| sink_size = dataset.get_dataset_size() | |||
| @@ -238,8 +238,8 @@ class DatasetHelper: | |||
| (context.get_context("device_target") == "GPU"): | |||
| iterclass = _DatasetIterMSLoopSink | |||
| elif context.get_context("device_target") == "CPU": | |||
| raise RuntimeError( | |||
| "Currently dataset sink mode is not supported when the device target is CPU.") | |||
| raise RuntimeError("Currently dataset sink mode is not supported when the device " | |||
| "target is CPU, please set dataset sink mode to False.") | |||
| else: | |||
| iterclass = _DatasetIterPyNative | |||
| self.iter = iterclass(dataset, sink_size, epoch_num) | |||
| @@ -329,8 +329,8 @@ class _DatasetIter: | |||
| if hasattr(dataset, '__loop_size__'): | |||
| loop_size = dataset.__loop_size__ | |||
| if loop_size <= dataset.get_dataset_size() and dataset.get_dataset_size() % loop_size != 0: | |||
| raise ValueError(f'Dataset size {dataset.get_dataset_size()} and ' | |||
| f'sink_size {loop_size} are not matched.') | |||
| raise ValueError(f"Dataset size {dataset.get_dataset_size()} and sink_size {loop_size} " | |||
| f"are not matched, sink_size should be divisible by dataset size.") | |||
| sink_count = math.ceil(dataset.get_dataset_size() / loop_size) | |||
| return sink_count | |||
| @@ -65,7 +65,7 @@ class FixedLossScaleManager(LossScaleManager): | |||
| """ | |||
| def __init__(self, loss_scale=128.0, drop_overflow_update=True): | |||
| if loss_scale < 1: | |||
| raise ValueError("loss_scale must be at least 1, " | |||
| raise ValueError("loss_scale must be >= 1, " | |||
| "but got loss_scale {}".format(loss_scale)) | |||
| self._loss_scale = loss_scale | |||
| self._drop_overflow_update = drop_overflow_update | |||
| @@ -131,12 +131,12 @@ class DynamicLossScaleManager(LossScaleManager): | |||
| scale_factor=2, | |||
| scale_window=2000): | |||
| if init_loss_scale < 1.0: | |||
| raise ValueError("Loss scale value should be > 1") | |||
| raise ValueError("loss_scale must be > 1, but got loss_scale {}".format(init_loss_scale)) | |||
| self.loss_scale = init_loss_scale | |||
| validator.check_positive_int(scale_window, "scale_window", self.__class__.__name__) | |||
| self.scale_window = scale_window | |||
| if scale_factor <= 0: | |||
| raise ValueError("Scale factor should be > 1") | |||
| raise ValueError("Scale factor should be > 0, but got scale_factor {}".format(scale_factor)) | |||
| self.scale_factor = scale_factor | |||
| self.increase_ratio = scale_factor | |||
| self.decrease_ratio = 1 / scale_factor | |||
| @@ -171,7 +171,8 @@ class DynamicLossScaleManager(LossScaleManager): | |||
| self.bad_step = 0 | |||
| if self.bad_step > self.bad_step_max: | |||
| raise RuntimeError("Dynamic loss scale Continuous overflow ", self.bad_step, " times") | |||
| raise RuntimeError("Dynamic loss scale Continuous overflow ", self.bad_step, | |||
| " times, has exceeded maximum threshold.") | |||
| self.cur_iter += 1 | |||
| @@ -186,7 +186,8 @@ class Model: | |||
| def _check_kwargs(self, kwargs): | |||
| for arg in kwargs: | |||
| if arg not in ['loss_scale_manager', 'keep_batchnorm_fp32']: | |||
| raise ValueError(f"Unsupported arg '{arg}'") | |||
| raise ValueError(f"The argument in 'kwargs' should be 'loss_scale_manager' or " | |||
| f"'keep_batchnorm_fp32', but got '{arg}'.") | |||
| def _check_reuse_dataset(self, dataset): | |||
| if not hasattr(dataset, '__model_hash__'): | |||
| @@ -298,7 +299,7 @@ class Model: | |||
| if isinstance(outputs, Tensor): | |||
| outputs = (outputs,) | |||
| if not isinstance(outputs, tuple): | |||
| raise ValueError("The `outputs` is not tuple.") | |||
| raise ValueError(f"The argument `outputs` should be tuple, but got {type(outputs)}.") | |||
| if self._eval_indexes is not None and len(outputs) < 3: | |||
| raise ValueError("The length of `outputs` must be greater than or equal to 3, \ | |||
| @@ -398,7 +399,7 @@ class Model: | |||
| raise RuntimeError('Pre-init process only supports GRAPH MODE and Ascend target currently.') | |||
| if not train_dataset and not valid_dataset: | |||
| raise ValueError('Both train_dataset and valid_dataset can not be None or empty.') | |||
| raise ValueError("'Train_dataset' and 'valid_dataset' can not both be None or empty.") | |||
| _device_number_check(self._parallel_mode, self._device_number) | |||
| @@ -422,7 +423,8 @@ class Model: | |||
| if valid_dataset: | |||
| if not self._metric_fns: | |||
| raise RuntimeError('If define `valid_dataset`, metric fn can not be None or empty.') | |||
| raise RuntimeError("If define `valid_dataset`, metric fn can not be None or empty, " | |||
| "you should set the argument 'metrics' for model.") | |||
| valid_dataset.__no_send__ = True | |||
| valid_dataset_helper, eval_network = self._exec_preprocess(is_train=False, | |||
| @@ -612,8 +614,9 @@ class Model: | |||
| len_element = len(next_element) | |||
| next_element = _transfer_tensor_to_tuple(next_element) | |||
| if self._loss_fn and len_element != 2: | |||
| raise ValueError("when loss_fn is not None, train_dataset should " | |||
| "return two elements, but got {}".format(len_element)) | |||
| raise ValueError("When 'loss_fn' is not None, 'train_dataset' should return " | |||
| "two elements, but got {}, please check the number of elements " | |||
| "returned by 'train_dataset'".format(len_element)) | |||
| cb_params.cur_step_num += 1 | |||
| cb_params.train_dataset_element = next_element | |||
| @@ -696,15 +699,15 @@ class Model: | |||
| """ | |||
| dataset_sink_mode = Validator.check_bool(dataset_sink_mode) | |||
| if isinstance(self._train_network, nn.GraphCell) and dataset_sink_mode is True: | |||
| raise ValueError("Sink mode is currently not supported when training with a GraphCell.") | |||
| raise ValueError("Dataset sink mode is currently not supported when training with a GraphCell.") | |||
| Validator.check_is_int(sink_size) | |||
| dataset_size = train_dataset.get_dataset_size() | |||
| if dataset_size == 0: | |||
| raise ValueError("There is no valid data in dataset, please check dataset file first.") | |||
| raise ValueError("There is no valid data in dataset, please check dataset file firstly.") | |||
| if sink_size == -1: | |||
| sink_size = dataset_size | |||
| if sink_size < -1 or sink_size == 0: | |||
| raise ValueError("The sink_size must be -1 or positive, but got sink_size {}.".format(sink_size)) | |||
| raise ValueError("The 'sink_size' must be -1 or positive, but got sink_size {}.".format(sink_size)) | |||
| _device_number_check(self._parallel_mode, self._device_number) | |||
| @@ -859,7 +862,8 @@ class Model: | |||
| _device_number_check(self._parallel_mode, self._device_number) | |||
| if not self._metric_fns: | |||
| raise ValueError("metric fn can not be None or empty.") | |||
| raise ValueError("The model argument 'metrics' can not be None or empty, " | |||
| "you should set the argument 'metrics' for model.") | |||
| if isinstance(self._eval_network, nn.GraphCell) and dataset_sink_mode is True: | |||
| raise ValueError("Sink mode is currently not supported when evaluating with a GraphCell.") | |||
| @@ -933,22 +937,23 @@ class Model: | |||
| sink_size (int): Control the amount of data in each sink. | |||
| """ | |||
| if context.get_context("mode") != context.GRAPH_MODE: | |||
| raise RuntimeError('Pre-compile process only supports GRAPH MODE and Ascend target currently.') | |||
| raise RuntimeError("Pre-compile process that generate parameter layout for the train network " | |||
| "only supports GRAPH MODE and Ascend target currently.") | |||
| if _get_parallel_mode() not in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL): | |||
| raise RuntimeError('infer train layout only supports semi auto parallel and auto parallel mode.') | |||
| raise RuntimeError('Infer train layout only supports semi auto parallel and auto parallel mode.') | |||
| dataset_sink_mode = Validator.check_bool(dataset_sink_mode) | |||
| if not dataset_sink_mode: | |||
| raise ValueError("Only dataset sink mode is supported for now.") | |||
| if isinstance(self._train_network, nn.GraphCell) and dataset_sink_mode is True: | |||
| raise ValueError("Sink mode is currently not supported when training with a GraphCell.") | |||
| raise ValueError("Dataset sink mode is currently not supported when training with a GraphCell.") | |||
| Validator.check_is_int(sink_size) | |||
| dataset_size = train_dataset.get_dataset_size() | |||
| if dataset_size == 0: | |||
| raise ValueError("There is no valid data in dataset, please check dataset file first.") | |||
| raise ValueError("There is no valid data in dataset, please check dataset file firstly.") | |||
| if sink_size == -1: | |||
| sink_size = dataset_size | |||
| if sink_size < -1 or sink_size == 0: | |||
| raise ValueError("The sink_size must be -1 or positive, but got sink_size {}.".format(sink_size)) | |||
| raise ValueError("The 'sink_size' must be -1 or positive, but got sink_size {}.".format(sink_size)) | |||
| def infer_train_layout(self, train_dataset, dataset_sink_mode=True, sink_size=-1): | |||
| """ | |||
| @@ -1051,9 +1056,10 @@ class Model: | |||
| >>> predict_map = model.infer_predict_layout(input_data) | |||
| """ | |||
| if context.get_context("mode") != context.GRAPH_MODE: | |||
| raise RuntimeError('Pre-compile process only supports GRAPH MODE currently.') | |||
| raise RuntimeError("Pre-compile process that generate parameter layout for the predict network " | |||
| "only supports GRAPH MODE and Ascend target currently.") | |||
| if _get_parallel_mode() not in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL): | |||
| raise RuntimeError('infer predict layout only supports semi auto parallel and auto parallel mode.') | |||
| raise RuntimeError('Infer predict layout only supports semi auto parallel and auto parallel mode.') | |||
| _parallel_predict_check() | |||
| check_input_data(*predict_data, data_class=Tensor) | |||