Browse Source

!15404 mod_redundant_code

From: @changzherui
Reviewed-by: @kingxian,@zhoufeng54
Signed-off-by: @kingxian
pull/15404/MERGE
mindspore-ci-bot Gitee 4 years ago
parent
commit
95268474cc
7 changed files with 5 additions and 13 deletions
  1. +3
    -1
      mindspore/_check_deps_version.py
  2. +1
    -3
      mindspore/_check_version.py
  3. +0
    -3
      mindspore/common/parameter.py
  4. +0
    -1
      mindspore/context.py
  5. +0
    -1
      mindspore/core/ops/adam.cc
  6. +0
    -1
      mindspore/core/ops/assert.cc
  7. +1
    -3
      mindspore/nn/layer/thor_layer.py

+ 3
- 1
mindspore/_check_deps_version.py View File

@@ -34,6 +34,7 @@ def parse_args():
args = parser.parse_args() args = parser.parse_args()
return args return args



def check_deps_version(mindspore_version, supported_version): def check_deps_version(mindspore_version, supported_version):
""" """
check te/hccl/topi version check te/hccl/topi version
@@ -62,17 +63,18 @@ def check_deps_version(mindspore_version, supported_version):
print(f"MindSpore version {mindspore_version} and \"topi\" wheel package version {v} does not " print(f"MindSpore version {mindspore_version} and \"topi\" wheel package version {v} does not "
"match, reference to the match info on: https://www.mindspore.cn/install") "match, reference to the match info on: https://www.mindspore.cn/install")


# pylint: disable=broad-except
except Exception as e: except Exception as e:
print("CheckFailed: ", e.args) print("CheckFailed: ", e.args)
print("Minspore relies on the 3 whl packages of \"te\", \"topi\" and \"hccl\" in the \"fwkacllib\" " print("Minspore relies on the 3 whl packages of \"te\", \"topi\" and \"hccl\" in the \"fwkacllib\" "
"folder of the Ascend 910 AI software package, please check whether they are installed " "folder of the Ascend 910 AI software package, please check whether they are installed "
"correctly or not, reference to the match info on: https://www.mindspore.cn/install") "correctly or not, reference to the match info on: https://www.mindspore.cn/install")



def main(): def main():
args = parse_args() args = parse_args()
check_deps_version(args.mindspore_version, args.supported_version) check_deps_version(args.mindspore_version, args.supported_version)



if __name__ == "__main__": if __name__ == "__main__":
sys.path = sys.path[1:] # avoid the impact of relative path env, only affect this process sys.path = sys.path[1:] # avoid the impact of relative path env, only affect this process
main() main()

+ 1
- 3
mindspore/_check_version.py View File

@@ -292,9 +292,7 @@ class AscendEnvChecker(EnvChecker):
return return


try: try:
# pylint: disable=unused-import
import te import te
# pylint: disable=broad-except
except Exception: except Exception:
if Path(self.tbe_path).is_dir(): if Path(self.tbe_path).is_dir():
if os.getenv('LD_LIBRARY_PATH'): if os.getenv('LD_LIBRARY_PATH'):
@@ -373,6 +371,7 @@ class AscendEnvChecker(EnvChecker):
return self.v return self.v
return self.v return self.v



def check_version_and_env_config(): def check_version_and_env_config():
"""check version and env config""" """check version and env config"""
if __package_name__.lower() == "mindspore-ascend": if __package_name__.lower() == "mindspore-ascend":
@@ -384,7 +383,6 @@ def check_version_and_env_config():
return return


try: try:
# pylint: disable=unused-import
from . import _c_expression from . import _c_expression
# check version of ascend site or cuda # check version of ascend site or cuda
env_checker.check_version() env_checker.check_version()


+ 0
- 3
mindspore/common/parameter.py View File

@@ -350,7 +350,6 @@ class Parameter(Tensor_):
Parameter, a new parameter. Parameter, a new parameter.
""" """
x = copy(self) x = copy(self)
# pylint: disable=protected-access
x.param_info = self.param_info.clone() x.param_info = self.param_info.clone()
x.is_init = False x.is_init = False
x.init = self.init x.init = self.init
@@ -426,11 +425,9 @@ class Parameter(Tensor_):
def _update_tensor_data(self, data): def _update_tensor_data(self, data):
"Update the parameter by a Tensor." "Update the parameter by a Tensor."
if isinstance(self, Tensor): if isinstance(self, Tensor):
# for Tensor same shape:
self.init_flag = False self.init_flag = False
self.init = None self.init = None
return self.assign_value(data) return self.assign_value(data)
# create a new tensor
new_param = Parameter(data, self.name, self.requires_grad) new_param = Parameter(data, self.name, self.requires_grad)
new_param.param_info = self.param_info new_param.param_info = self.param_info
return new_param return new_param


+ 0
- 1
mindspore/context.py View File

@@ -238,7 +238,6 @@ class _Context:
graph_memory_max_size = _DEVICE_APP_MEMORY_SIZE - int(variable_memory_max_size[:-2]) graph_memory_max_size = _DEVICE_APP_MEMORY_SIZE - int(variable_memory_max_size[:-2])
graph_memory_max_size_ = str(graph_memory_max_size) + " * 1024 * 1024 * 1024" graph_memory_max_size_ = str(graph_memory_max_size) + " * 1024 * 1024 * 1024"
self.set_param(ms_ctx_param.variable_memory_max_size, variable_memory_max_size_) self.set_param(ms_ctx_param.variable_memory_max_size, variable_memory_max_size_)
# pylint: disable=protected-access
self.set_param(ms_ctx_param._graph_memory_max_size, graph_memory_max_size_) self.set_param(ms_ctx_param._graph_memory_max_size, graph_memory_max_size_)


def set_max_device_memory(self, max_device_memory): def set_max_device_memory(self, max_device_memory):


+ 0
- 1
mindspore/core/ops/adam.cc View File

@@ -43,7 +43,6 @@ abstract::AbstractBasePtr AdamInfer(const PrimitivePtr &primitive, const std::ve
auto infer_m_type = CheckAndConvertUtils::CheckTensorTypeValid("m_type", m_type, common_valid_types, prim_name); auto infer_m_type = CheckAndConvertUtils::CheckTensorTypeValid("m_type", m_type, common_valid_types, prim_name);
auto infer_v_type = CheckAndConvertUtils::CheckTensorTypeValid("v_type", v_type, common_valid_types, prim_name); auto infer_v_type = CheckAndConvertUtils::CheckTensorTypeValid("v_type", v_type, common_valid_types, prim_name);
(void)CheckAndConvertUtils::CheckTensorTypeValid("grad_type", grad_type, common_valid_types, prim_name); (void)CheckAndConvertUtils::CheckTensorTypeValid("grad_type", grad_type, common_valid_types, prim_name);
// auto infer_grad_type = grad_type->cast<TensorTypePtr>()->element();
auto output0 = std::make_shared<abstract::AbstractTensor>(infer_var_type, var_shape); auto output0 = std::make_shared<abstract::AbstractTensor>(infer_var_type, var_shape);
auto output1 = std::make_shared<abstract::AbstractTensor>(infer_m_type, m_shape); auto output1 = std::make_shared<abstract::AbstractTensor>(infer_m_type, m_shape);
auto output2 = std::make_shared<abstract::AbstractTensor>(infer_v_type, v_shape); auto output2 = std::make_shared<abstract::AbstractTensor>(infer_v_type, v_shape);


+ 0
- 1
mindspore/core/ops/assert.cc View File

@@ -52,7 +52,6 @@ AbstractBasePtr AssertInfer(const abstract::AnalysisEnginePtr &, const Primitive
if (condition_shape[0] == 1) { if (condition_shape[0] == 1) {
auto condition_value = reinterpret_cast<bool *>(input_args[0]->BuildValue()->cast<tensor::TensorPtr>()->data_c()); auto condition_value = reinterpret_cast<bool *>(input_args[0]->BuildValue()->cast<tensor::TensorPtr>()->data_c());
MS_EXCEPTION_IF_NULL(condition_value); MS_EXCEPTION_IF_NULL(condition_value);
// auto condition_value = GetValue<bool>(input_args[0]->BuildValue());
CheckAndConvertUtils::CheckInteger("condition[0]", *condition_value, kEqual, 1, op_name); CheckAndConvertUtils::CheckInteger("condition[0]", *condition_value, kEqual, 1, op_name);
} }
condition = input_args[0]->BuildType(); condition = input_args[0]->BuildType();


+ 1
- 3
mindspore/nn/layer/thor_layer.py View File

@@ -184,10 +184,9 @@ class Dense_Thor(Cell):
s = 'input_channels={}, output_channels={}'.format(self.in_channels, self.out_channels) s = 'input_channels={}, output_channels={}'.format(self.in_channels, self.out_channels)
if self.has_bias: if self.has_bias:
s += ', has_bias={}'.format(self.has_bias) s += ', has_bias={}'.format(self.has_bias)
# if self.activation_flag:
# s += ', activation={}'.format(self.activation)
return s return s



class _Conv(Cell): class _Conv(Cell):
""" """
Applies a N-D convolution over an input signal composed of several input planes. Applies a N-D convolution over an input signal composed of several input planes.
@@ -212,7 +211,6 @@ class _Conv(Cell):
self.kernel_size = kernel_size self.kernel_size = kernel_size
self.stride = stride self.stride = stride
self.pad_mode = pad_mode self.pad_mode = pad_mode
# self.weight_init = weight_init
self.bias_init = bias_init self.bias_init = bias_init
if isinstance(padding, int): if isinstance(padding, int):
Validator.check_non_negative_int(padding, 'padding', self.cls_name) Validator.check_non_negative_int(padding, 'padding', self.cls_name)


Loading…
Cancel
Save