Browse Source

!855 clean pylint and codedex warnings

Merge pull request !855 from zyli2020/fix_pylint_warnings
tags/v0.3.0-alpha
mindspore-ci-bot Gitee 5 years ago
parent
commit
cb9fa17a8d
7 changed files with 24 additions and 32 deletions
  1. +2
    -2
      mindspore/_akg/gpu/squeeze_grad.py
  2. +6
    -13
      mindspore/_akg/message.py
  3. +3
    -3
      mindspore/_akg/op_build.py
  4. +2
    -2
      mindspore/_akg/ops/math/mean.py
  5. +0
    -0
      mindspore/_akg/ops/math/sum_value.py
  6. +11
    -11
      mindspore/ccsrc/kernel/common_utils.cc
  7. +0
    -1
      mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py

+ 2
- 2
mindspore/_akg/gpu/squeeze_grad.py View File

@@ -15,14 +15,14 @@
"""squeeze grad""" """squeeze grad"""
import _akg.topi as topi import _akg.topi as topi


def SqueezeGrad(y_grad, x_shape, axis=None):

def SqueezeGrad(y_grad, x_shape):
""" """
Computes gradients for squeeze op. Computes gradients for squeeze op.


Args: Args:
y_grad (tvm.tensor.Tensor): the gradient needed to be propagation. y_grad (tvm.tensor.Tensor): the gradient needed to be propagation.
x_shape (Union[list, tuple]): output Tensor shape. x_shape (Union[list, tuple]): output Tensor shape.
axis (Union[list, tuple, int, None], optional): eliminated axis by squeeze.


Returns: Returns:
tvm.tensor.Tensor: output gradient. tvm.tensor.Tensor: output gradient.


+ 6
- 13
mindspore/_akg/message.py View File

@@ -46,7 +46,8 @@ def compilewithjson(json_str):
impl_path = os.path.realpath(kernel_info['impl_path']) impl_path = os.path.realpath(kernel_info['impl_path'])
if os.path.isfile(impl_path): if os.path.isfile(impl_path):
custom_mod_name = Path(impl_path).resolve().stem custom_mod_name = Path(impl_path).resolve().stem
mod_spec = importlib.util.spec_from_file_location(custom_mod_name, impl_path)
mod_spec = importlib.util.spec_from_file_location(
custom_mod_name, impl_path)
custom_mod = importlib.util.module_from_spec(mod_spec) custom_mod = importlib.util.module_from_spec(mod_spec)
mod_spec.loader.exec_module(custom_mod) mod_spec.loader.exec_module(custom_mod)
op_func = getattr(custom_mod, op_name, None) op_func = getattr(custom_mod, op_name, None)
@@ -57,7 +58,8 @@ def compilewithjson(json_str):
op_func = getattr(gpu, op_name, None) op_func = getattr(gpu, op_name, None)


if op_func is None: if op_func is None:
logging.error("this op not supported, please check op name %s", str(op_name))
logging.error(
"this op not supported, please check op name %s", str(op_name))
return False return False


args = {} args = {}
@@ -87,25 +89,16 @@ def compilewithjson(json_str):


output = op_func(**args) output = op_func(**args)


schedule_func = None
attrs = {}
if isinstance(output, (list, tuple)): if isinstance(output, (list, tuple)):
from inspect import isfunction from inspect import isfunction
tmp_outputs = [] tmp_outputs = []
for elem in output: for elem in output:
if isfunction(elem):
schedule_func = elem
elif isinstance(elem, dict):
for key, value in elem.items():
if key not in attrs or not attrs[key]:
attrs[key] = value
else:
if not isfunction(elem) or isinstance(elem, dict):
tmp_outputs.append(elem) tmp_outputs.append(elem)


output = tmp_outputs output = tmp_outputs
else: else:
output = [output] output = [output]



tsr = tsr + [i for i in output if TensorUtils.is_output_value(i)] tsr = tsr + [i for i in output if TensorUtils.is_output_value(i)]
return op_build([op_name], output, tsr, schedule_func, processor, kernel_info['op'], attrs)
return op_build([op_name], output, tsr, processor, kernel_info['op'])

+ 3
- 3
mindspore/_akg/op_build.py View File

@@ -25,8 +25,8 @@ from _akg import save_gpu_param as gpu_utils
from _akg.utils import validation_check as vc_util from _akg.utils import validation_check as vc_util




@vc_util.check_input_type(list, (list, tuple), (list, tuple), (types.FunctionType, type(None)), str, str, dict)
def op_build(opnames, computes, args, custom_schedule, device, kernel_name, attrs):
@vc_util.check_input_type(list, (list, tuple), (list, tuple), str, str)
def op_build(opnames, computes, args, device, kernel_name):
"""op_build""" """op_build"""
kernel_meta_path = "./cuda_meta_" + str(os.getpid()) + "/" kernel_meta_path = "./cuda_meta_" + str(os.getpid()) + "/"
if device == "cuda": if device == "cuda":
@@ -60,7 +60,7 @@ def op_build(opnames, computes, args, custom_schedule, device, kernel_name, attr
kernel_info = (ptx_code, json_file, kernel_name) kernel_info = (ptx_code, json_file, kernel_name)
gpu_utils.save_gpu_params(s, args, kernel_info) gpu_utils.save_gpu_params(s, args, kernel_info)
os.chmod(ptx_file, 0o400) os.chmod(ptx_file, 0o400)
except Exception:
except IOError:
logging.error(traceback.format_exc()) logging.error(traceback.format_exc())
return None return None
return True return True


+ 2
- 2
mindspore/_akg/ops/math/mean.py View File

@@ -17,7 +17,7 @@ import _akg.topi
import _akg.tvm import _akg.tvm
from _akg.utils import format_transform as ft_util from _akg.utils import format_transform as ft_util
from _akg.utils import validation_check as vc_util from _akg.utils import validation_check as vc_util
from _akg.ops.math import sum
from _akg.ops.math import sum_value




@vc_util.check_input_type(_akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None))) @vc_util.check_input_type(_akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None)))
@@ -41,7 +41,7 @@ def mean(data, axis=None, keepdims=False):
count = 1 count = 1
for i in axis: for i in axis:
count *= shape[i] count *= shape[i]
output, _ = sum.sum_value(data, axis, keepdims)
output, _ = sum_value.sum_value(data, axis, keepdims)
res = _akg.topi.divide(output, count) res = _akg.topi.divide(output, count)


return res return res

mindspore/_akg/ops/math/sum.py → mindspore/_akg/ops/math/sum_value.py View File


+ 11
- 11
mindspore/ccsrc/kernel/common_utils.cc View File

@@ -131,18 +131,18 @@ void KernelMeta::Initialize() {
} }


void KernelMeta::RemoveKernelCache() { void KernelMeta::RemoveKernelCache() {
if (access(kernel_meta_path_.c_str(), 0) == 0) {
DIR *dir = opendir(kernel_meta_path_.c_str());
MS_EXCEPTION_IF_NULL(dir);
struct dirent *entry;
while ((entry = readdir(dir)) != nullptr) {
std::string kernel_file = entry->d_name;
std::string kernel_file_realpath = kernel_meta_path_ + kernel_file;
(void)remove(kernel_file_realpath.c_str());
}
(void)closedir(dir);
(void)rmdir(kernel_meta_path_.c_str());
DIR *dir = opendir(kernel_meta_path_.c_str());
if (dir == nullptr) {
return;
}
struct dirent *entry;
while ((entry = readdir(dir)) != nullptr) {
std::string kernel_file = entry->d_name;
std::string kernel_file_realpath = kernel_meta_path_ + kernel_file;
(void)remove(kernel_file_realpath.c_str());
} }
(void)closedir(dir);
(void)rmdir(kernel_meta_path_.c_str());
} }


std::string KernelMeta::Search(const std::string &kernel_name) const { std::string KernelMeta::Search(const std::string &kernel_name) const {


+ 0
- 1
mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py View File

@@ -20,7 +20,6 @@ squeeze_grad_op_info = AkgRegOp("SqueezeGrad") \
.input(0, "y_grad") \ .input(0, "y_grad") \
.output(0, "output") \ .output(0, "output") \
.attr("x_shape", "required", "listInt") \ .attr("x_shape", "required", "listInt") \
.attr("axis", "optional", "listInt") \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \ .dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \ .dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info() .get_op_info()


Loading…
Cancel
Save