Browse Source

!12624 add rmse_loss and mae_loss

From: @lijiaqi0612
Reviewed-by: @kingxian,@zh_qh
Signed-off-by: @kingxian
tags/v1.2.0-rc1
mindspore-ci-bot Gitee 5 years ago
parent
commit
ed46bf8b18
3 changed files with 110 additions and 2 deletions
  1. +4
    -2
      mindspore/nn/loss/__init__.py
  2. +92
    -0
      mindspore/nn/loss/loss.py
  3. +14
    -0
      tests/ut/python/nn/test_loss.py

+ 4
- 2
mindspore/nn/loss/__init__.py View File

@@ -21,9 +21,11 @@ It shows how well the model works on a dataset and the optimization target which

from .loss import L1Loss, MSELoss, SmoothL1Loss, FocalLoss,\
SoftmaxCrossEntropyWithLogits, BCELoss, CosineEmbeddingLoss, \
SampledSoftmaxLoss, DiceLoss, BCEWithLogitsLoss, MultiClassDiceLoss
SampledSoftmaxLoss, DiceLoss, BCEWithLogitsLoss, MultiClassDiceLoss,\
RMSELoss, MAELoss


__all__ = ['L1Loss', 'MSELoss', 'SmoothL1Loss', 'FocalLoss',
'SoftmaxCrossEntropyWithLogits', 'BCELoss', 'BCEWithLogitsLoss',
'CosineEmbeddingLoss', 'SampledSoftmaxLoss', 'DiceLoss', 'MultiClassDiceLoss']
'CosineEmbeddingLoss', 'SampledSoftmaxLoss', 'DiceLoss', 'MultiClassDiceLoss',
'RMSELoss', 'MAELoss']

+ 92
- 0
mindspore/nn/loss/loss.py View File

@@ -173,6 +173,98 @@ class MSELoss(_Loss):
return self.get_loss(x)


class RMSELoss(_Loss):
r"""
RMSELoss creates a standard to measure the root mean square error between :math:`x` and :math:`y`
element-wise, where :math:`x` is the input and :math:`y` is the target.

For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,
the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:

.. math::
loss = \sqrt{\frac{1}{M}\sum_{m=1}^{M}{(x_m-y_m)^2}}

Args:
reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "sum", and "none".
Default: "mean".

Inputs:
- **logits** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.
- **label** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.

Outputs:
Tensor, weighted loss float tensor.

Raises:
ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
ValueError: If the dimensions are different.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> loss = nn.RMSELoss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> output = loss(input_data, target_data)
>>> print(output)
0.57735026
"""
def __init__(self):
super(RMSELoss, self).__init__()
self.MSELoss = MSELoss()

def construct(self, logits, label):
_check_shape(logits.shape, label.shape)
rmse_loss = F.sqrt(self.MSELoss(logits, label))

return rmse_loss


class MAELoss(_Loss):
r"""
MAELoss creates a standard to measure the average absolute error between :math:`x` and :math:`y`
element-wise, where :math:`x` is the input and :math:`y` is the target.

For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,
the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:

.. math::
\text{MAE} = \frac{1}{M}\sum_{m=1}^N\left| x_m - y_m \right|

Args:
reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "sum", and "none".
Default: "mean".

Inputs:
- **logits** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.
- **label** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.

Outputs:
Tensor, weighted loss float tensor.

Raises:
ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
ValueError: If the dimensions are different.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> loss = nn.MAELoss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> output = loss(input_data, target_data)
>>> print(output)
0.33333334
"""

def construct(self, logits, label):
_check_shape(logits.shape, label.shape)
x = F.absolute(logits - label)
return self.get_loss(x)


class SmoothL1Loss(_Loss):
r"""
A loss class for learning region proposals.


+ 14
- 0
tests/ut/python/nn/test_loss.py View File

@@ -204,3 +204,17 @@ def test_multi_class_dice_loss_init_activation2():
y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32)
y = Tensor(np.array([[1, 0], [0, 1]]), mstype.float32)
loss(y_pred, y)


def test_rmse_loss():
loss = nn.RMSELoss()
input_data = Tensor(np.array([[1, 2, 3], [2, 3, 2]]).astype(np.float32))
target_data = Tensor(np.array([[0, 0, 5], [1, 2, 3]]).astype(np.float32))
loss(input_data, target_data)


def test_mae_loss():
loss = nn.MAELoss()
input_data = Tensor(np.array([[1, 2, 3], [2, 3, 2]]).astype(np.float32))
target_data = Tensor(np.array([[0, 0, 5], [1, 2, 3]]).astype(np.float32))
loss(input_data, target_data)

Loading…
Cancel
Save