diff --git a/mindspore/_check_deps_version.py b/mindspore/_check_deps_version.py index a6a26b5ac1..37d0bbaa78 100644 --- a/mindspore/_check_deps_version.py +++ b/mindspore/_check_deps_version.py @@ -34,6 +34,7 @@ def parse_args(): args = parser.parse_args() return args + def check_deps_version(mindspore_version, supported_version): """ check te/hccl/topi version @@ -62,17 +63,18 @@ def check_deps_version(mindspore_version, supported_version): print(f"MindSpore version {mindspore_version} and \"topi\" wheel package version {v} does not " "match, reference to the match info on: https://www.mindspore.cn/install") - # pylint: disable=broad-except except Exception as e: print("CheckFailed: ", e.args) print("Minspore relies on the 3 whl packages of \"te\", \"topi\" and \"hccl\" in the \"fwkacllib\" " "folder of the Ascend 910 AI software package, please check whether they are installed " "correctly or not, reference to the match info on: https://www.mindspore.cn/install") + def main(): args = parse_args() check_deps_version(args.mindspore_version, args.supported_version) + if __name__ == "__main__": sys.path = sys.path[1:] # avoid the impact of relative path env, only affect this process main() diff --git a/mindspore/_check_version.py b/mindspore/_check_version.py index 44be538d36..978d748c2b 100644 --- a/mindspore/_check_version.py +++ b/mindspore/_check_version.py @@ -292,9 +292,7 @@ class AscendEnvChecker(EnvChecker): return try: - # pylint: disable=unused-import import te - # pylint: disable=broad-except except Exception: if Path(self.tbe_path).is_dir(): if os.getenv('LD_LIBRARY_PATH'): @@ -373,6 +371,7 @@ class AscendEnvChecker(EnvChecker): return self.v return self.v + def check_version_and_env_config(): """check version and env config""" if __package_name__.lower() == "mindspore-ascend": @@ -384,7 +383,6 @@ def check_version_and_env_config(): return try: - # pylint: disable=unused-import from . import _c_expression # check version of ascend site or cuda env_checker.check_version() diff --git a/mindspore/common/parameter.py b/mindspore/common/parameter.py index 8bfa88a642..acf02b6f54 100644 --- a/mindspore/common/parameter.py +++ b/mindspore/common/parameter.py @@ -350,7 +350,6 @@ class Parameter(Tensor_): Parameter, a new parameter. """ x = copy(self) - # pylint: disable=protected-access x.param_info = self.param_info.clone() x.is_init = False x.init = self.init @@ -426,11 +425,9 @@ class Parameter(Tensor_): def _update_tensor_data(self, data): "Update the parameter by a Tensor." if isinstance(self, Tensor): - # for Tensor same shape: self.init_flag = False self.init = None return self.assign_value(data) - # create a new tensor new_param = Parameter(data, self.name, self.requires_grad) new_param.param_info = self.param_info return new_param diff --git a/mindspore/context.py b/mindspore/context.py index 9d9eaf4eb5..311944072a 100644 --- a/mindspore/context.py +++ b/mindspore/context.py @@ -238,7 +238,6 @@ class _Context: graph_memory_max_size = _DEVICE_APP_MEMORY_SIZE - int(variable_memory_max_size[:-2]) graph_memory_max_size_ = str(graph_memory_max_size) + " * 1024 * 1024 * 1024" self.set_param(ms_ctx_param.variable_memory_max_size, variable_memory_max_size_) - # pylint: disable=protected-access self.set_param(ms_ctx_param._graph_memory_max_size, graph_memory_max_size_) def set_max_device_memory(self, max_device_memory): diff --git a/mindspore/core/ops/adam.cc b/mindspore/core/ops/adam.cc index fc5e0ab799..e696e7c676 100644 --- a/mindspore/core/ops/adam.cc +++ b/mindspore/core/ops/adam.cc @@ -43,7 +43,6 @@ abstract::AbstractBasePtr AdamInfer(const PrimitivePtr &primitive, const std::ve auto infer_m_type = CheckAndConvertUtils::CheckTensorTypeValid("m_type", m_type, common_valid_types, prim_name); auto infer_v_type = CheckAndConvertUtils::CheckTensorTypeValid("v_type", v_type, common_valid_types, prim_name); (void)CheckAndConvertUtils::CheckTensorTypeValid("grad_type", grad_type, common_valid_types, prim_name); - // auto infer_grad_type = grad_type->cast()->element(); auto output0 = std::make_shared(infer_var_type, var_shape); auto output1 = std::make_shared(infer_m_type, m_shape); auto output2 = std::make_shared(infer_v_type, v_shape); diff --git a/mindspore/core/ops/assert.cc b/mindspore/core/ops/assert.cc index 7bb4f621a1..1eff1ee887 100644 --- a/mindspore/core/ops/assert.cc +++ b/mindspore/core/ops/assert.cc @@ -52,7 +52,6 @@ AbstractBasePtr AssertInfer(const abstract::AnalysisEnginePtr &, const Primitive if (condition_shape[0] == 1) { auto condition_value = reinterpret_cast(input_args[0]->BuildValue()->cast()->data_c()); MS_EXCEPTION_IF_NULL(condition_value); - // auto condition_value = GetValue(input_args[0]->BuildValue()); CheckAndConvertUtils::CheckInteger("condition[0]", *condition_value, kEqual, 1, op_name); } condition = input_args[0]->BuildType(); diff --git a/mindspore/nn/layer/thor_layer.py b/mindspore/nn/layer/thor_layer.py index 323cb095de..8afb824884 100644 --- a/mindspore/nn/layer/thor_layer.py +++ b/mindspore/nn/layer/thor_layer.py @@ -184,10 +184,9 @@ class Dense_Thor(Cell): s = 'input_channels={}, output_channels={}'.format(self.in_channels, self.out_channels) if self.has_bias: s += ', has_bias={}'.format(self.has_bias) - # if self.activation_flag: - # s += ', activation={}'.format(self.activation) return s + class _Conv(Cell): """ Applies a N-D convolution over an input signal composed of several input planes. @@ -212,7 +211,6 @@ class _Conv(Cell): self.kernel_size = kernel_size self.stride = stride self.pad_mode = pad_mode - # self.weight_init = weight_init self.bias_init = bias_init if isinstance(padding, int): Validator.check_non_negative_int(padding, 'padding', self.cls_name)