Browse Source

!3856 fix matrixDiag msg , FloorDiv grad, SoftmaxCrossEntropyLogit mag

Merge pull request !3856 from fangzehua/fix_matrix_msg
tags/v0.7.0-beta
mindspore-ci-bot Gitee 5 years ago
parent
commit
a1bdecb9e6
4 changed files with 9 additions and 22 deletions
  1. +3
    -3
      mindspore/nn/layer/basic.py
  2. +2
    -1
      mindspore/nn/loss/loss.py
  3. +2
    -12
      mindspore/ops/_grad/grad_math_ops.py
  4. +2
    -6
      mindspore/ops/_grad/grad_nn_ops.py

+ 3
- 3
mindspore/nn/layer/basic.py View File

@@ -591,7 +591,7 @@ class MatrixDiagPart(Cell):
Tensor, same type as input `x`. The shape should be x.shape[:-2] + [min(x.shape[-2:])]. Tensor, same type as input `x`. The shape should be x.shape[:-2] + [min(x.shape[-2:])].


Examples: Examples:
>>> x = Tensor([[[-1, 0], [0, 1]], [-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> matrix_diag_part = nn.MatrixDiagPart() >>> matrix_diag_part = nn.MatrixDiagPart()
>>> result = matrix_diag_part(x) >>> result = matrix_diag_part(x)
[[-1., 1.], [-1., 1.], [-1., 1.]] [[-1., 1.], [-1., 1.], [-1., 1.]]
@@ -622,11 +622,11 @@ class MatrixSetDiag(Cell):
Tensor, same type as input `x`. The shape same as `x`. Tensor, same type as input `x`. The shape same as `x`.


Examples: Examples:
>>> x = Tensor([[[-1, 0], [0, 1]], [-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32) >>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32)
>>> matrix_set_diag = nn.MatrixSetDiag() >>> matrix_set_diag = nn.MatrixSetDiag()
>>> result = matrix_set_diag(x, diagonal) >>> result = matrix_set_diag(x, diagonal)
[[[-1, 0], [0, 2]], [-1, 0], [0, 1]], [[-1, 0], [0, 1]]]
[[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]]
""" """
def __init__(self): def __init__(self):
super(MatrixSetDiag, self).__init__() super(MatrixSetDiag, self).__init__()


+ 2
- 1
mindspore/nn/loss/loss.py View File

@@ -218,7 +218,8 @@ class SoftmaxCrossEntropyWithLogits(_Loss):
sparse (bool): Specifies whether labels use sparse format or not. Default: False. sparse (bool): Specifies whether labels use sparse format or not. Default: False.
reduction (Union[str, None]): Type of reduction to apply to loss. Support 'sum' or 'mean' If None, reduction (Union[str, None]): Type of reduction to apply to loss. Support 'sum' or 'mean' If None,
do not reduction. Default: None. do not reduction. Default: None.
smooth_factor (float): Label smoothing factor. It is a optional input. Default: 0.
smooth_factor (float): Label smoothing factor. It is a optional input which should be in range [0, 1].
Default: 0.
num_classes (int): The number of classes in the task. It is a optional input Default: 2. num_classes (int): The number of classes in the task. It is a optional input Default: 2.


Inputs: Inputs:


+ 2
- 12
mindspore/ops/_grad/grad_math_ops.py View File

@@ -284,14 +284,9 @@ def get_bprop_ceil(self):
@bprop_getters.register(P.FloorDiv) @bprop_getters.register(P.FloorDiv)
def get_bprop_floordiv(self): def get_bprop_floordiv(self):
"""Grad definition for `FloorDiv` operation.""" """Grad definition for `FloorDiv` operation."""
div_op = P.FloorDiv()
neg = P.Neg()
mul_op = P.Mul()


def bprop(x, y, out, dout): def bprop(x, y, out, dout):
bc_x = div_op(dout, y)
bc_y = neg(mul_op(bc_x, out))
return binop_grad_common(x, y, bc_x, bc_y)
return zeros_like(x), zeros_like(y)


return bprop return bprop


@@ -311,14 +306,9 @@ def get_bprop_floormod(self):
@bprop_getters.register(P.TruncateDiv) @bprop_getters.register(P.TruncateDiv)
def get_bprop_truncate_div(self): def get_bprop_truncate_div(self):
"""Grad definition for `TruncateDiv` operation.""" """Grad definition for `TruncateDiv` operation."""
div_op = P.TruncateDiv()
neg = P.Neg()
mul_op = P.Mul()


def bprop(x, y, out, dout): def bprop(x, y, out, dout):
bc_x = div_op(dout, y)
bc_y = neg(mul_op(bc_x, out))
return binop_grad_common(x, y, bc_x, bc_y)
return zeros_like(x), zeros_like(y)


return bprop return bprop




+ 2
- 6
mindspore/ops/_grad/grad_nn_ops.py View File

@@ -14,7 +14,6 @@
# ============================================================================ # ============================================================================


"""Define the grad rules of neural network related operations.""" """Define the grad rules of neural network related operations."""
import math
import numpy as np import numpy as np
from mindspore.ops import _selected_grad_ops as SG from mindspore.ops import _selected_grad_ops as SG
from mindspore.ops.primitive import constexpr from mindspore.ops.primitive import constexpr
@@ -632,11 +631,8 @@ def get_bprop_onehot(self):
@constexpr @constexpr
def _range_op(start, limit, delta, dtype): def _range_op(start, limit, delta, dtype):
"""helper function for Grad TopK""" """helper function for Grad TopK"""
range_op = inner.Range(float(start), float(limit), float(delta))
length_input = math.ceil((limit - start) / delta)
input_tensor = Tensor(list(range(length_input)), dtype)
range_out = range_op(input_tensor)
return range_out
output_tensor = Tensor(list(range(start, limit, delta)), dtype)
return output_tensor


@constexpr @constexpr
def _get_1d_shape(in_shape): def _get_1d_shape(in_shape):


Loading…
Cancel
Save