Browse Source

fixing pylint for operators

tags/v1.0.0
zhangz0911gm 5 years ago
parent
commit
de2a458e7c
11 changed files with 0 additions and 20 deletions
  1. +0
    -3
      mindspore/ops/_grad/grad_nn_ops.py
  2. +0
    -1
      mindspore/ops/_op_impl/_custom_op/correction_mul.py
  3. +0
    -1
      mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py
  4. +0
    -1
      mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py
  5. +0
    -1
      mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py
  6. +0
    -1
      mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py
  7. +0
    -4
      mindspore/ops/composite/multitype_ops/_constexpr_utils.py
  8. +0
    -4
      mindspore/ops/composite/multitype_ops/setitem_impl.py
  9. +0
    -2
      mindspore/ops/operations/_thor_ops.py
  10. +0
    -1
      mindspore/ops/operations/array_ops.py
  11. +0
    -1
      mindspore/ops/operations/nn_ops.py

+ 0
- 3
mindspore/ops/_grad/grad_nn_ops.py View File

@@ -678,16 +678,13 @@ def get_bprop_top_kv2(self):

def bprop(input_x, k, out, dout):

# (n1, n2, ...., n_p), in_lastdim = n_p
in_shape = shape_op(input_x)
in_lastdim = in_shape[-1]

# (n_1, ... n_(p-1), k), ind_lastdim = k
indices = out[1]
ind_shape = shape_op(indices)
ind_lastdim = ind_shape[-1]

# (n_1*n_2..*n_(p-1), k), outerdim = n_1*n_2..*n_(p-1)
ind_2d = reshape_op(indices, (-1, ind_lastdim))
outerdim = shape_op(ind_2d)[0]



+ 0
- 1
mindspore/ops/_op_impl/_custom_op/correction_mul.py View File

@@ -71,7 +71,6 @@ def correction_mul(x, batch_std, running_std, y, channel, kernel_name="correctio
if not inp_dtype in check_list:
raise RuntimeError("Dtype of input only support float16, float32")

# shape = util.shape_refine(shape)
x_t = tvm.placeholder(shape, name="x", dtype=inp_dtype)
shape_c = [1] * len(shape)
shape_c[channel] = batch_std.get("ori_shape")[0]


+ 0
- 1
mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py View File

@@ -60,7 +60,6 @@ def fake_quant_perchannel_compute(x, min_val, max_val, y, quant_min, quant_max,
quant_min = te.lang.cce.broadcast(quant_min, minmax_shape, x.dtype)
quant_max = te.lang.cce.broadcast(quant_max, minmax_shape, x.dtype)

# CalNudge(NudgeMinMax)
scale = te.lang.cce.vdiv(te.lang.cce.vsub(
max_val, min_val), te.lang.cce.vsub(quant_max, quant_min))
zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale))


+ 0
- 1
mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py View File

@@ -87,7 +87,6 @@ def fake_quant_perchannel_grad_compute(dout, x, min_val, max_val, quant_min, qua
quant_min = te.lang.cce.broadcast(quant_min, minmax_shape, x.dtype)
quant_max = te.lang.cce.broadcast(quant_max, minmax_shape, x.dtype)

# CalNudge(NudgeMinMax)
scale = te.lang.cce.vdiv(te.lang.cce.vsub(
max_val, min_val), te.lang.cce.vsub(quant_max, quant_min))
zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale))


+ 0
- 1
mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py View File

@@ -61,7 +61,6 @@ def fake_quant_per_layer_compute(x, min_val, max_val, y, quant_min, quant_max, s
max_val = te.lang.cce.vmax(te.lang.cce.vmuls(min_val, -1.), max_val)
min_val = te.lang.cce.vmuls(max_val, -1.)

# CalNudge(NudgeMinMax)
scale = te.lang.cce.vdiv(te.lang.cce.vsub(
max_val, min_val), te.lang.cce.vsub(quant_max, quant_min))
zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale))


+ 0
- 1
mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py View File

@@ -92,7 +92,6 @@ def fake_quant_per_layer_grad_compute(dout, x, min_val, max_val, quant_min, quan
max_val = te.lang.cce.vmax(te.lang.cce.vmuls(min_val, -1.), max_val)
min_val = te.lang.cce.vmuls(max_val, -1.)

# CalNudge(NudgeMinMax)
scale = te.lang.cce.vdiv(te.lang.cce.vsub(
max_val, min_val), te.lang.cce.vsub(quant_max, quant_min))
zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale))


+ 0
- 4
mindspore/ops/composite/multitype_ops/_constexpr_utils.py View File

@@ -91,20 +91,16 @@ def check_tensor_setitem_index(index, element_type=None):
"""Checks tuple index type of tensor assignment."""
if index is None:
raise IndexError("Tensor's index cannot be None.")
# eg. Tensor[Slice] = u
if isinstance(index, slice):
return True
# eg. Tensor[tuple] = u
if isinstance(index, tuple):
if not index:
raise IndexError("Tensor's index cannot be empty.")
# eg. Tensor[tuple(Slice,...)] = u
for item in index:
if not isinstance(item, (slice, type(...), int)):
raise IndexError(
"Index of type '{}' is not supported yet.".format(type(item)))
return True
# eg. Tensor[Tensor[dtype=bool]] = u
if isinstance(index, mstype.tensor_type):
if element_type is None or element_type != mstype.bool_:
raise TypeError(


+ 0
- 4
mindspore/ops/composite/multitype_ops/setitem_impl.py View File

@@ -305,23 +305,19 @@ def _tensor_setitem_with_slice_v1(data, input_slice, value):

@setitem.register("Tensor", "Number", "Number")
def _tensor_setitem_with_int_v1(data, index, value):
"""Syntax: A[1] = 3"""
return compile_utils.tensor_setitem_by_number_with_number(data, index, value)


@setitem.register("Tensor", "Number", "Tensor")
def _tensor_setitem_with_int_v2(data, index, value):
"""Syntax: A[1] = Tensor"""
return compile_utils.tensor_setitem_by_number_with_tensor(data, index, value)


@setitem.register("Tensor", "Ellipsis", "Number")
def _tensor_setitem_with_ellipsis_v1(data, index, value):
"""Syntax: A[...] = number."""
return compile_utils.tensor_setitem_by_ellipsis_with_number(data, index, value)


@setitem.register("Tensor", "Ellipsis", "Tensor")
def _tensor_setitem_with_ellipsis_v2(data, index, value):
"""Syntax: A[...] = Tensor."""
return compile_utils.tensor_setitem_by_ellipsis_with_tensor(data, index, value)

+ 0
- 2
mindspore/ops/operations/_thor_ops.py View File

@@ -320,8 +320,6 @@ class CusMatMulCube(PrimitiveWithInfer):
from mindspore.ops._op_impl._custom_op.matmul_cube_impl import CusMatMulCube

def infer_shape(self, data1_shape, data2_shape):
# shape = [1, data1_shape[1], data2_shape[2], 16, 16]
# return shape
if self.transpose_a:
k1, m = data1_shape
else:


+ 0
- 1
mindspore/ops/operations/array_ops.py View File

@@ -2068,7 +2068,6 @@ def _compute_slicing_length(begin, end, stride, x_shape, i):
if 0 <= begin < x_dim:
begin += -x_dim
if begin >= x_dim:
# When slicing backward, if begin >= x_dim, set begin = -1, which means start from the last element.
begin = -1
if 0 <= end < x_dim:
end += -x_dim


+ 0
- 1
mindspore/ops/operations/nn_ops.py View File

@@ -745,7 +745,6 @@ class BNTrainingUpdate(PrimitiveWithInfer):
def __init__(self, isRef=True, epsilon=1e-5, factor=0.1):
self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
#self.isRef = validator.check_integer('isRef', isRef, [0, 1], Rel.IN)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate')
self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')



Loading…
Cancel
Save