Browse Source

!23130 Update the operator in order to let users better understand the reason for the error of the operator part: loss and others

Merge pull request !23130 from dinglinhe/dlh_code_ms_I43QY0_loss
tags/v1.5.0-rc1
i-robot Gitee 4 years ago
parent
commit
92fa257aeb
13 changed files with 279 additions and 229 deletions
  1. +12
    -7
      mindspore/_checkparam.py
  2. +0
    -1
      mindspore/nn/layer/basic.py
  3. +8
    -6
      mindspore/nn/layer/combined.py
  4. +44
    -26
      mindspore/nn/layer/container.py
  5. +23
    -24
      mindspore/nn/layer/conv.py
  6. +30
    -23
      mindspore/nn/layer/embedding.py
  7. +1
    -1
      mindspore/nn/layer/image.py
  8. +1
    -1
      mindspore/nn/layer/math.py
  9. +149
    -126
      mindspore/nn/loss/loss.py
  10. +1
    -1
      mindspore/ops/composite/multitype_ops/_constexpr_utils.py
  11. +4
    -6
      mindspore/ops/composite/random_ops.py
  12. +4
    -4
      mindspore/ops/operations/array_ops.py
  13. +2
    -3
      mindspore/ops/operations/comm_ops.py

+ 12
- 7
mindspore/_checkparam.py View File

@@ -386,7 +386,7 @@ class Validator:
return arg_value

@staticmethod
def check_bool(arg_value, arg_name=None):
def check_bool(arg_value, arg_name=None, prim_name=None):
"""
Check argument is instance of bool.

@@ -395,8 +395,13 @@ class Validator:
- has_bias = check_bool(has_bias, "has_bias")
"""
if not isinstance(arg_value, bool):
arg_name = arg_name if arg_name else "Parameter"
raise TypeError(f'`{arg_name}` should be isinstance of bool, but got `{arg_value}`.')
if prim_name and arg_name:
msg_prefix = f"For '{prim_name}', the '{arg_name}'"
elif prim_name and arg_name is None:
msg_prefix = f"For '{prim_name}', Parameter"
else:
msg_prefix = "Parameter"
raise TypeError(f"{msg_prefix} should be isinstance of bool, but got {arg_value}.")
return arg_value

@staticmethod
@@ -495,10 +500,10 @@ class Validator:
f' and it is a subset of the data types above.')

@staticmethod
def check_const_input(arg_name, arg_value, prim_name):
def check_valid_input(arg_name, arg_value, prim_name):
"""Checks valid value."""
if arg_value is None:
raise ValueError(f'For \'{prim_name}\', the `{arg_name}` must be a const input, but got {arg_value}.')
raise ValueError(f"For \'{prim_name}\', the '{arg_name}' can not be None, but got {arg_value}.")
return arg_value

@staticmethod
@@ -622,8 +627,8 @@ class Validator:
axis = axis if isinstance(axis, Iterable) else (axis,)
exp_shape = [ori_shape[i] for i in range(len(ori_shape)) if i not in axis]
if list(shape) != exp_shape:
raise ValueError(f'For {prim_name}, {ori_shape} reduce on {axis} should be '
f'{tuple(exp_shape)}, but got {shape}.')
raise ValueError(f"For '{prim_name}', the origin shape {ori_shape} reduce on {axis} should be "
f"{tuple(exp_shape)}, but got {shape}.")

@staticmethod
def check_astype_dtype(dtype):


+ 0
- 1
mindspore/nn/layer/basic.py View File

@@ -1222,7 +1222,6 @@ class MatrixDiag(Cell):
[[[ 1. 0. 0.]
[ 0. -1. 0.]
[ 0. 0. 1.]

[[ 1. 0. 0.]
[ 0. -1. 0.]
[ 0. 0. 1.]]]


+ 8
- 6
mindspore/nn/layer/combined.py View File

@@ -127,9 +127,9 @@ class Conv2dBnAct(Cell):
has_bias=has_bias,
weight_init=weight_init,
bias_init=bias_init)
self.has_bn = Validator.check_bool(has_bn, "has_bn")
self.has_bn = Validator.check_bool(has_bn, "has_bn", self.cls_name)
self.has_act = activation is not None
self.after_fake = Validator.check_bool(after_fake, "after_fake")
self.after_fake = Validator.check_bool(after_fake, "after_fake", self.cls_name)
if has_bn:
self.batchnorm = BatchNorm2d(out_channels, eps, momentum)
if activation == "leakyrelu":
@@ -137,7 +137,8 @@ class Conv2dBnAct(Cell):
else:
self.activation = get_activation(activation) if isinstance(activation, str) else activation
if activation is not None and not isinstance(self.activation, (Cell, Primitive)):
raise TypeError("The activation must be str or Cell or Primitive,"" but got {}.".format(activation))
raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell or Primitive, "
f"but got {type(activation).__name__}.")

def construct(self, x):
x = self.conv(x)
@@ -217,9 +218,9 @@ class DenseBnAct(Cell):
weight_init,
bias_init,
has_bias)
self.has_bn = Validator.check_bool(has_bn, "has_bn")
self.has_bn = Validator.check_bool(has_bn, "has_bn", self.cls_name)
self.has_act = activation is not None
self.after_fake = Validator.check_bool(after_fake, "after_fake")
self.after_fake = Validator.check_bool(after_fake, "after_fake", self.cls_name)
if has_bn:
self.batchnorm = BatchNorm1d(out_channels, eps, momentum)
if activation == "leakyrelu":
@@ -227,7 +228,8 @@ class DenseBnAct(Cell):
else:
self.activation = get_activation(activation) if isinstance(activation, str) else activation
if activation is not None and not isinstance(self.activation, (Cell, Primitive)):
raise TypeError("The activation must be str or Cell or Primitive,"" but got {}.".format(activation))
raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell or Primitive, "
f"but got {type(activation).__name__}.")

def construct(self, x):
x = self.dense(x)


+ 44
- 26
mindspore/nn/layer/container.py View File

@@ -20,23 +20,28 @@ from ..cell import Cell
__all__ = ['SequentialCell', 'CellList']


def _valid_index(cell_num, index):
def _valid_index(cell_num, index, op_name=None):
"""Internal function, used to detect the value and type of index."""
msg_prefix = f"For '{op_name}', the" if op_name else "The"
if not isinstance(index, int):
raise TypeError("Index {} is not int type")
raise TypeError(f"{msg_prefix} type of index should be int type, but got {type(index).__name__}.")
if not -cell_num <= index < cell_num:
raise IndexError("Index should be a number in range [{}, {}), but got {}"
.format(-cell_num, cell_num, index))
raise IndexError(f"{msg_prefix} value of index should be a number in range [{-cell_num}, {cell_num}), "
f"but got {index}.")
return index % cell_num


def _valid_cell(cell):
def _valid_cell(cell, op_name=None):
"""Internal function, used to check whether the input cell is a subclass of Cell."""
if issubclass(cell.__class__, Cell):
return True
raise TypeError('`{}` is not a subclass of Cell. Please check your code'.format(cell))
msg_prefix = f"For '{op_name}'," if op_name else ""
raise TypeError(f'{msg_prefix} each cell should be subclass of Cell. '
f'Please check your code')


def _get_prefix_and_index(cells):
"""get prefix and index of parameter name in sequential cell or cell list"""
"""get prefix and index of parameter name in sequential cell or cell list."""
prefix = ""
index = 0
if not cells:
@@ -150,7 +155,8 @@ class SequentialCell(Cell):
cell.update_parameters_name(name + ".")
self._is_dynamic_name.append(False)
else:
raise TypeError('Cells must be list or orderedDict')
raise TypeError(f"For '{self.__class__.__name__}', Cells must be list or orderedDict, "
f"but got {type(cells).__name__}")
else:
for index, cell in enumerate(args):
self.insert_child_to_cell(str(index), cell)
@@ -162,21 +168,23 @@ class SequentialCell(Cell):
if isinstance(index, slice):
return self.__class__(
OrderedDict(list(self._cells.items())[index]))
index = _valid_index(len(self), index)
index = _valid_index(len(self), index, self.__class__.__name__)
return list(self._cells.values())[index]

def __setitem__(self, index, cell):
if _valid_cell(cell):
cls_name = self.__class__.__name__
if _valid_cell(cell, cls_name):
prefix, _ = _get_prefix_and_index(self._cells)
index = _valid_index(len(self), index)
index = _valid_index(len(self), index, cls_name)
key = list(self._cells.keys())[index]
self._cells[key] = cell
cell.update_parameters_name(prefix + key + ".")
self.cell_list = list(self._cells.values())

def __delitem__(self, index):
cls_name = self.__class__.__name__
if isinstance(index, int):
index = _valid_index(len(self), index)
index = _valid_index(len(self), index, cls_name)
key = list(self._cells.keys())[index]
del self._cells[key]
del self._is_dynamic_name[index]
@@ -186,7 +194,8 @@ class SequentialCell(Cell):
del self._cells[key]
del self._is_dynamic_name[index]
else:
raise TypeError('Index {} is not int type or slice type'.format(index))
raise TypeError(f"For '{cls_name}', the type of index should be int type or slice type, "
f"but got {type(index).__name__}")
prefix, key_index = _get_prefix_and_index(self._cells)
temp_dict = OrderedDict()
for idx, key in enumerate(self._cells.keys()):
@@ -225,7 +234,7 @@ class SequentialCell(Cell):
[[26.999863 26.999863]
[26.999863 26.999863]]]]
"""
if _valid_cell(cell):
if _valid_cell(cell, self.__class__.__name__):
prefix, _ = _get_prefix_and_index(self._cells)
cell.update_parameters_name(prefix + str(len(self)) + ".")
self._is_dynamic_name.append(True)
@@ -280,32 +289,38 @@ class CellList(_CellListBase, Cell):
self.extend(args[0])

def __getitem__(self, index):
cls_name = self.__class__.__name__
if isinstance(index, slice):
return self.__class__(list(self._cells.values())[index])
if isinstance(index, int):
index = _valid_index(len(self), index)
index = _valid_index(len(self), index, cls_name)
return self._cells[str(index)]
raise TypeError('Index {} is not int type or slice type'.format(index))
raise TypeError(f"For '{cls_name}', the type of index should be int type or slice type, "
f"but got {type(index).__name__}")

def __setitem__(self, index, cell):
if not isinstance(index, int) and _valid_cell(cell):
raise TypeError('Index {} is not int type'.format(index))
index = _valid_index(len(self), index)
cls_name = self.__class__.__name__
if not isinstance(index, int) and _valid_cell(cell, cls_name):
raise TypeError(f"For '{cls_name}', the type of index should be int type, "
f"but got {type(index).__name__}")
index = _valid_index(len(self), index, cls_name)
if self._auto_prefix:
prefix, _ = _get_prefix_and_index(self._cells)
cell.update_parameters_name(prefix + str(index) + ".")
self._cells[str(index)] = cell

def __delitem__(self, index):
cls_name = self.__class__.__name__
if isinstance(index, int):
index = _valid_index(len(self), index)
index = _valid_index(len(self), index, cls_name)
del self._cells[str(index)]
elif isinstance(index, slice):
keys = list(self._cells.keys())[index]
for key in keys:
del self._cells[key]
else:
raise TypeError('Index {} is not int type or slice type'.format(index))
raise TypeError(f"For '{cls_name}', the type of index should be int type or slice type, "
f"but got {type(index).__name__}")
# adjust orderedDict
prefix, key_index = _get_prefix_and_index(self._cells)
temp_dict = OrderedDict()
@@ -328,8 +343,9 @@ class CellList(_CellListBase, Cell):

def insert(self, index, cell):
"""Inserts a given cell before a given index in the list."""
idx = _valid_index(len(self), index)
_valid_cell(cell)
cls_name = self.__class__.__name__
idx = _valid_index(len(self), index, cls_name)
_valid_cell(cell, cls_name)
length = len(self)
prefix, key_index = _get_prefix_and_index(self._cells)
while length > idx:
@@ -350,11 +366,13 @@ class CellList(_CellListBase, Cell):
Raises:
TypeError: If the cells are not a list of subcells.
"""
cls_name = self.__class__.__name__
if not isinstance(cells, list):
raise TypeError('Cells {} should be list of subcells'.format(cells))
raise TypeError(f"For '{cls_name}', the new cells wanted to append "
f"should be list of subcells.")
prefix, _ = _get_prefix_and_index(self._cells)
for cell in cells:
if _valid_cell(cell):
if _valid_cell(cell, cls_name):
if self._auto_prefix:
cell.update_parameters_name(prefix + str(len(self)) + ".")
self._cells[str(len(self))] = cell
@@ -362,7 +380,7 @@ class CellList(_CellListBase, Cell):

def append(self, cell):
"""Appends a given cell to the end of the list."""
if _valid_cell(cell):
if _valid_cell(cell, self.__class__.__name__):
if self._auto_prefix:
prefix, _ = _get_prefix_and_index(self._cells)
cell.update_parameters_name(prefix + str(len(self)) + ".")


+ 23
- 24
mindspore/nn/layer/conv.py View File

@@ -58,9 +58,13 @@ class _Conv(Cell):
self.bias_init = bias_init
self.format = Validator.check_string(data_format, ['NCHW', 'NHWC', 'NCDHW'], 'format', self.cls_name)
if context.get_context("device_target") != "GPU" and self.format == "NHWC":
raise ValueError("NHWC format only support in GPU target.")
raise ValueError(f"For '{self.cls_name}', the \"NHWC\" format only support in GPU target, "
f"but got the format is {self.format} and "
f"the platform is {context.get_context('device_target')}.")
if context.get_context("device_target") == "CPU" and self.format == "NCDHW":
raise ValueError("NCDHW format only support in Ascend and GPU targets.")
raise ValueError(f"For '{self.cls_name}', the \"NCDHW\" format only support in Ascend and GPU target, "
f"but got the format is {self.format} and "
f"the platform is {context.get_context('device_target')}.")
if isinstance(padding, int):
Validator.check_non_negative_int(padding, 'padding', self.cls_name)
self.padding = padding
@@ -69,7 +73,8 @@ class _Conv(Cell):
Validator.check_non_negative_int(pad, 'padding item', self.cls_name)
self.padding = padding
else:
raise TypeError("padding type must be int/tuple(int) cannot be {}!".format(type(padding)))
raise TypeError(f"For '{self.cls_name}', the type of 'padding' must be int or tuple(int), "
f"but got {type(padding).__name__}.")

self.dilation = dilation
self.group = Validator.check_positive_int(group)
@@ -81,11 +86,11 @@ class _Conv(Cell):
for dilation_elem in dilation:
Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name)
if in_channels % group != 0:
raise ValueError(f"Attr 'in_channels' of {self.cls_name} Op must be divisible by "
f"attr 'group' of {self.cls_name} Op.")
raise ValueError(f"For '{self.cls_name}', the attr 'in_channels' must be divisible by attr 'group', "
f"but got 'in_channels': {in_channels} and 'group': {group}.")
if out_channels % group != 0:
raise ValueError(f"Attr 'out_channels' {self.cls_name} Op must be divisible by "
f"attr 'group' of {self.cls_name} Op.")
raise ValueError(f"For '{self.cls_name}', the 'out_channels' must be divisible by attr 'group', "
f"but got 'out_channels': {out_channels} and 'group': {group}.")
if transposed:
shape = [in_channels, out_channels // group, *kernel_size]
else:
@@ -93,7 +98,7 @@ class _Conv(Cell):
[out_channels, in_channels // group, *kernel_size]
self.weight = Parameter(initializer(self.weight_init, shape), name='weight')

if Validator.check_bool(has_bias):
if Validator.check_bool(has_bias, "has_bias", self.cls_name):
self.bias = Parameter(initializer(self.bias_init, [out_channels]), name='bias')
else:
if self.bias_init != 'zeros':
@@ -284,9 +289,9 @@ class Conv2d(_Conv):


@constexpr
def _check_input_3d(input_shape):
def _check_input_3d(input_shape, op_name):
if len(input_shape) != 3:
raise ValueError(f"Input should be 3d, but got shape {input_shape}")
raise ValueError(f"For '{op_name}', the shape of input should be 3d, but got shape {input_shape}")


class Conv1d(_Conv):
@@ -437,16 +442,14 @@ class Conv1d(_Conv):
dilation=self.dilation,
group=self.group)
self.bias_add = P.BiasAdd()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1d\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze(2)
self.shape = P.Shape()

def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
_check_input_3d(x_shape, self.cls_name)
x = self.expand_dims(x, 2)
output = self.conv2d(x, self.weight)
if self.has_bias:
@@ -477,7 +480,7 @@ class Conv1d(_Conv):
@constexpr
def _check_input_5dims(input_shape, op_name):
if len(input_shape) != 5:
raise ValueError(f"For {op_name}, input should be 5 dims, but got shape {input_shape}.")
raise ValueError(f"For '{op_name}', the input shape should be 5 dimensions, but got shape {input_shape}.")


class Conv3d(_Conv):
@@ -978,13 +981,11 @@ class Conv2dTranspose(_Conv):
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if Validator.check_bool(has_bias):
if Validator.check_bool(has_bias, "has_bias", self.cls_name):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')

# cause Conv2DTranspose's out_channel refers to Conv2D's out_channel.
@@ -1162,13 +1163,11 @@ class Conv1dTranspose(_Conv):
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if Validator.check_bool(has_bias):
if Validator.check_bool(has_bias, "has_bias", self.cls_name):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')

# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
@@ -1190,7 +1189,7 @@ class Conv1dTranspose(_Conv):

def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
_check_input_3d(x_shape, self.cls_name)
x = self.expand_dims(x, 2)

n, _, h, w = self.shape(x)


+ 30
- 23
mindspore/nn/layer/embedding.py View File

@@ -39,7 +39,7 @@ __all__ = ['Embedding', 'EmbeddingLookup', 'MultiFieldEmbeddingLookup']
@constexpr
def _check_input_2d(input_shape, param_name, func_name):
if len(input_shape) != 2:
raise ValueError(f"{func_name} {param_name} should be 2d, but got shape {input_shape}")
raise ValueError(f"For '{func_name}', the '{param_name}' should be 2d, but got shape {input_shape}")
return True


@@ -232,11 +232,10 @@ class EmbeddingLookup(Cell):
self.sparse = sparse
self.cache_enable = self.vocab_cache_size > 0
self.forward_unique = False
if target not in ('CPU', 'DEVICE'):
raise ValueError('Attr \'target\' of \'EmbeddingLookup\' Op passed '
+ str(target) + ', should be one of values in \'CPU\', \'DEVICE\'.')
validator.check_string(target, ['CPU', 'DEVICE'], 'target', self.cls_name)
if not sparse and target == 'CPU':
raise ValueError('When target is CPU, embedding_lookup must be sparse.')
raise ValueError(f"For '{self.cls_name}', 'sparse' must be True when 'target' is \"CPU\", "
f"but got 'sparse': {sparse} and 'target': {target}")
if sparse:
self.gatherv2 = P.SparseGatherV2()
else:
@@ -264,9 +263,11 @@ class EmbeddingLookup(Cell):
indices_shape_size = 2
if slice_mode == "field_slice" and is_auto_parallel:
if not manual_shapes:
raise ValueError("in slice field mode, the manual_shapes should not be none")
raise ValueError(f"For '{self.cls_name}', the 'manual_shapes' should not be none "
f"when the 'slice_mode' is \"filed_slice\", but got {manual_shapes}.")
if not isinstance(manual_shapes, tuple):
raise TypeError("manual_shapes type must be tuple(int) cannot be {}!".format(type(manual_shapes)))
raise TypeError(f"For '{self.cls_name}', the type of 'manual_shapes' must be tuple(int), "
f"but got {type(manual_shapes).__name__}!")
for dim in manual_shapes:
validator.check_positive_int(dim, 'manual shape dim', self.cls_name)
self.gatherv2.add_prim_attr("manual_split", manual_shapes)
@@ -298,11 +299,11 @@ class EmbeddingLookup(Cell):
self.embeddinglookup.shard(((1, 1), indices_strategy))
else:
if is_auto_parallel:
raise ValueError("slice_mode should support mode in nn.EmbeddingLookup, but get "
+ str(slice_mode))
Support_mode = ["field_slice", "table_row_slice", "table_column_slice", "batch_slice"]
validator.check_string(slice_mode, Support_mode, "slice_mode", self.cls_name)
if self.cache_enable and not enable_ps:
if parallel_mode != ParallelMode.STAND_ALONE:
raise ValueError("parallel mode haven't supported cache enable yet.")
raise ValueError(f"For '{self.cls_name}', parallel mode haven't supported cache enable yet.")
self._set_cache_enable()
self.embedding_table.unique = self.forward_unique
self.max_norm = max_norm
@@ -313,11 +314,14 @@ class EmbeddingLookup(Cell):
def _set_cache_enable(self):
"""EmbeddingLookup cache check for not ps env, which is only support 'ascend'."""
if self.target != 'DEVICE':
raise ValueError("The configuration of 'vocab_cache_size' is valid only in 'DEVICE' target.")
raise ValueError(f"For '{self.cls_name}', the configuration of 'vocab_cache_size' is valid only "
f"when 'target' is 'DEVICE', but got 'target': {self.target}")
if not self.sparse:
raise ValueError("The configuration of 'vocab_cache_size' is valid only 'sparse' is true.")
raise ValueError(f"For '{self.cls_name}', the configuration of 'vocab_cache_size' is valid only "
f"when 'sparse' is true, but got 'sparse': {self.sparse}.")
if context.get_context("device_target") != 'Ascend':
raise ValueError("The configuration of 'vocab_cache_size' is valid only in 'ascend'.")
raise ValueError(f"For '{self.cls_name}', the configuration of 'vocab_cache_size' is valid only "
f"when device target is 'Ascend', but got {context.get_context('device_target')}.")

logger.info("EmbeddingLookup cache enable takes effect.")
self.forward_unique = True
@@ -347,17 +351,20 @@ class EmbeddingLookup(Cell):
rank_id = get_rank()
full_batch = _get_full_batch()
if rank_size > 1 and not (full_batch and slice_mode == "table_row_slice"):
raise ValueError("The embeddingLookup cache of parameter server parallel only be used "
"in 'full_batch' and 'table_row_slice' parallel strategy.")
raise ValueError(f"For '{self.cls_name}', the cache of parameter server parallel only be used "
f"in \"full_batch\" and \"table_row_slice\" parallel strategy, but got "
f"full_batch: {full_batch} and 'slice_mode': {slice_mode}.")
self.vocab_cache_size = self.vocab_cache_size * rank_size
_set_rank_id(rank_id)
self.cache_enable = True
if _is_role_worker():
self.vocab_size = self.vocab_cache_size
if context.get_context("enable_sparse") != self.sparse:
raise ValueError("The value of parameter 'sparse' must be same for all EmbeddingLookup "
"kernels and equal the value of 'enable_sparse' in context setting in "
"parameter server cache mode")
raise ValueError(f"For '{self.cls_name}', the value of parameter 'sparse' must be same for all "
f"kernels and equal the value of 'enable_sparse' in context setting in "
f"parameter server cache mode, but got value of parameter 'sparse': {self.sparse}"
f" and the 'enable_sparse' in context setting: "
f"{context.get_context('enable_sparse')}.")

def _set_voacb_cache_enable_for_ps(self, vocab_cache_size, embedding_size, vocab_size):
"""PS embeddingLookup cache enable set."""
@@ -487,14 +494,14 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
self.max_mask_mul = P.Mul()
self.max_no_equal = P.NotEqual()

validator.check_string(operator, ['SUM', 'MAX', 'MEAN'], 'operator', self.cls_name)
if operator == MultiFieldEmbeddingLookup.OPERATOR_SUM:
self.merge_op = P.UnsortedSegmentSum()
elif operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
self.merge_op = P.UnsortedSegmentMax()
elif operator == MultiFieldEmbeddingLookup.OPERATOR_MEAN:
self.merge_op = P.UnsortedSegmentSum()
else:
raise ValueError("The operator supports ['SUM', 'MAX', 'MEAN'], but found: "+str(operator))
self.merge_op = P.UnsortedSegmentSum()


parallel_mode = _get_parallel_mode()
is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL)
@@ -532,8 +539,8 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
self.inf_add.shard(((1, 1, get_group_size()), (1, 1, 1)))
else:
if is_auto_parallel:
raise ValueError("slice_mode should be ['table_row_slice', 'batch_slice' and \
'table_column_slice'], but get " + str(slice_mode))
raise ValueError("For '{}', the 'slice_mode' should be in ['table_row_slice', 'batch_slice' and \
'table_column_slice'], but got {}".format(self.cls_name, str(slice_mode)))

# Min value for fp32
self.negative_inf_value = -3.402823466E+38


+ 1
- 1
mindspore/nn/layer/image.py View File

@@ -117,7 +117,7 @@ def _get_dtype_max(dtype):
@constexpr
def _check_input_4d(input_shape, param_name, func_name):
if len(input_shape) != 4:
raise ValueError(f"{func_name} {param_name} should be 4d, but got shape {input_shape}")
raise ValueError(f"For '{func_name}', the '{param_name}' should be 4d, but got shape {input_shape}.")
return True




+ 1
- 1
mindspore/nn/layer/math.py View File

@@ -1030,7 +1030,7 @@ class MatInverse(Cell):
>>> print(output)
[[49.36112 -13.555558 2.1111116]
[-13.555558 3.7777784 -0.5555557]
[2.1111116 -0.5555557 0.11111111]]
[2.1111116 -0.5555557 0.11111113]]
"""
def __init__(self):
"""Initialize MatInverse."""


+ 149
- 126
mindspore/nn/loss/loss.py View File

@@ -50,7 +50,8 @@ class LossBase(Cell):
super(LossBase, self).__init__()

if reduction not in ('mean', 'sum', 'none'):
raise ValueError(f"The reduction method for {reduction} is not supported")
raise ValueError(f"For '{self.cls_name}', the 'reduction' should be in ['mean', 'sum', 'none'], "
f"but got {reduction}.")

self.average = True
self.reduce = True
@@ -98,7 +99,7 @@ class LossBase(Cell):
x = self.cast(x, input_dtype)
return x

def construct(self, base, target):
def construct(self, logits, labels):
raise NotImplementedError


@@ -112,12 +113,13 @@ class _Loss(LossBase):
"will be removed in a future version, use 'LossBase' instead.")
super(_Loss, self).__init__(reduction)

def construct(self, base, target):
def construct(self, logits, labels):
raise NotImplementedError


@constexpr
def _check_is_tensor(param_name, input_data, cls_name):
"""Internal function, used to check whether the input data is Tensor."""
if input_data is not None and not isinstance(F.typeof(input_data), mstype.tensor_type):
raise TypeError(f"For '{cls_name}', the '{param_name}' should be '{mstype.tensor_type}', "
f"but got '{F.typeof(input_data)}'")
@@ -125,7 +127,7 @@ def _check_is_tensor(param_name, input_data, cls_name):
class L1Loss(LossBase):
r"""
L1Loss creates a criterion to measure the mean absolute error (MAE) between :math:`x` and :math:`y` element-wise,
where :math:`x` is the input Tensor and :math:`y` is the target Tensor.
where :math:`x` is the input Tensor and :math:`y` is the labels Tensor.

For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,
the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:
@@ -185,17 +187,17 @@ class L1Loss(LossBase):
super(L1Loss, self).__init__(reduction)
self.abs = P.Abs()

def construct(self, base, target):
_check_is_tensor('logits', base, self.cls_name)
_check_is_tensor('labels', target, self.cls_name)
x = self.abs(base - target)
def construct(self, logits, labels):
_check_is_tensor('logits', logits, self.cls_name)
_check_is_tensor('labels', labels, self.cls_name)
x = self.abs(logits - labels)
return self.get_loss(x)


class MSELoss(LossBase):
r"""
MSELoss creates a criterion to measure the mean squared error (squared L2-norm) between :math:`x` and :math:`y`
element-wise, where :math:`x` is the input and :math:`y` is the target.
element-wise, where :math:`x` is the input and :math:`y` is the labels.

For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,
the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:
@@ -250,17 +252,17 @@ class MSELoss(LossBase):
[[0. 1. 4.]
[0. 0. 1.]]
"""
def construct(self, base, target):
_check_is_tensor('logits', base, self.cls_name)
_check_is_tensor('labels', target, self.cls_name)
x = F.square(base - target)
def construct(self, logits, labels):
_check_is_tensor('logits', logits, self.cls_name)
_check_is_tensor('labels', labels, self.cls_name)
x = F.square(logits - labels)
return self.get_loss(x)


class RMSELoss(LossBase):
r"""
RMSELoss creates a criterion to measure the root mean square error between :math:`x` and :math:`y`
element-wise, where :math:`x` is the input and :math:`y` is the target.
element-wise, where :math:`x` is the input and :math:`y` is the labels.

For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,
the loss of :math:`x` and :math:`y` is given as:
@@ -268,7 +270,6 @@ class RMSELoss(LossBase):
.. math::
loss = \sqrt{\frac{1}{N}\sum_{i=1}^{N}{(x_i-y_i)^2}}


Inputs:
- **logits** (Tensor) - Tensor of shape :math:`(N, *)` where :math:`*` means, any number of
additional dimensions.
@@ -312,7 +313,7 @@ class RMSELoss(LossBase):
class MAELoss(LossBase):
r"""
MAELoss creates a criterion to measure the average absolute error between :math:`x` and :math:`y`
element-wise, where :math:`x` is the input and :math:`y` is the target.
element-wise, where :math:`x` is the input and :math:`y` is the labels.

For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,
the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:
@@ -438,10 +439,10 @@ class SmoothL1Loss(LossBase):
self.beta = beta
self.smooth_l1_loss = P.SmoothL1Loss(self.beta)

def construct(self, base, target):
_check_is_tensor('logits', base, self.cls_name)
_check_is_tensor('labels', target, self.cls_name)
return self.smooth_l1_loss(base, target)
def construct(self, logits, labels):
_check_is_tensor('logits', logits, self.cls_name)
_check_is_tensor('labels', labels, self.cls_name)
return self.smooth_l1_loss(logits, labels)


class SoftMarginLoss(LossBase):
@@ -449,7 +450,7 @@ class SoftMarginLoss(LossBase):
A loss class for two-class classification problems.

SoftMarginLoss creates a criterion that optimizes a two-class classification
logistic loss between input tensor :math:`x` and target tensor :math:`y`
logistic loss between input tensor :math:`x` and labels tensor :math:`y`
(containing 1 or -1).

.. math::
@@ -487,8 +488,8 @@ class SoftMarginLoss(LossBase):
super(SoftMarginLoss, self).__init__()
self.soft_margin_loss = P.SoftMarginLoss(reduction)

def construct(self, base, target):
return self.soft_margin_loss(base, target)
def construct(self, logits, labels):
return self.soft_margin_loss(logits, labels)


class SoftmaxCrossEntropyWithLogits(LossBase):
@@ -496,7 +497,7 @@ class SoftmaxCrossEntropyWithLogits(LossBase):
Computes softmax cross entropy between logits and labels.

Measures the distribution error between the probabilities of the input (computed with softmax function) and the
target where the classes are mutually exclusive (only one class is positive) using cross entropy loss.
labels where the classes are mutually exclusive (only one class is positive) using cross entropy loss.

Typical input into this function is unnormalized scores denoted as x whose shape is (N, C),
and the corresponding targets.
@@ -510,7 +511,7 @@ class SoftmaxCrossEntropyWithLogits(LossBase):
where :math:`x_i` is a 1D score Tensor, :math:`c` is the index of 1 in one-hot.

Note:
While the target classes are mutually exclusive, i.e., only one class is positive in the target, the predicted
While the labels classes are mutually exclusive, i.e., only one class is positive in the labels, the predicted
probabilities does not need to be exclusive. It is only required that the predicted probability distribution
of entry is a valid one.

@@ -581,6 +582,7 @@ class SoftmaxCrossEntropyWithLogits(LossBase):

@constexpr
def _check_label_dtype(labels_dtype, cls_name):
"""Internal function, used to check whether the data type of labels meets the requirements."""
validator.check_type_name("labels", labels_dtype, [mstype.int32, mstype.int64], cls_name)


@@ -631,7 +633,7 @@ class DiceLoss(LossBase):
def construct(self, logits, label):
_check_is_tensor('logits', logits, self.cls_name)
_check_is_tensor('labels', label, self.cls_name)
_check_shape(logits.shape, label.shape)
_check_shape(logits.shape, label.shape, self.cls_name)
intersection = self.reduce_sum(self.mul(logits.view(-1), label.view(-1)))
unionset = self.reduce_sum(self.mul(logits.view(-1), logits.view(-1))) + \
self.reduce_sum(self.mul(label.view(-1), label.view(-1)))
@@ -643,22 +645,28 @@ class DiceLoss(LossBase):


@constexpr
def _check_shape(logits_shape, label_shape):
validator.check('logits_shape', logits_shape, 'label_shape', label_shape)
def _check_shape(logits_shape, label_shape, prim_name=None):
"""Internal function, used to check whether the shape of logits and labels meets the requirements."""
validator.check('logits_shape', logits_shape, 'label_shape', label_shape, prim_name=prim_name)


@constexpr
def _check_ndim_multi(logits_dim, label_dim):
def _check_ndim_multi(logits_dim, label_dim, prim_name=None):
"""Internal function, used to check whether the dimension of logits and label meets the requirements."""
msg_prefix = f'For \'{prim_name}\', the' if prim_name else "The"
if logits_dim < 2:
raise ValueError("Logits dimension should be greater than 1, but got {}".format(logits_dim))
raise ValueError(f"{msg_prefix} Logits dimension should be greater than 1, but got {logits_dim}.")
if label_dim < 2:
raise ValueError("Label dimension should be greater than 1, but got {}".format(label_dim))
raise ValueError(f"{msg_prefix} Label dimension should be greater than 1, but got {label_dim}.")


@constexpr
def _check_weights(weight_shape, label_shape):
def _check_weights(weight_shape, label_shape, prim_name=None):
"""Internal function, used to check whether the reduced shape meets the requirements."""
msg_prefix = f'For \'{prim_name}\', the' if prim_name else "The"
if weight_shape != label_shape:
raise ValueError("The weight shape[0] should be equal to label.shape[1].")
raise ValueError(f"{msg_prefix} weight shape[0] should be equal to label.shape[1], "
f"but got weight_shape: {weight_shape} and label_shape: {label_shape}.")


class MultiClassDiceLoss(LossBase):
@@ -709,22 +717,25 @@ class MultiClassDiceLoss(LossBase):
self.binarydiceloss = DiceLoss(smooth=1e-5)
self.weights = weights if weights is None else validator.check_value_type("weights", weights, [Tensor])
if isinstance(self.weights, Tensor) and self.weights.ndim != 2:
raise ValueError("The weight dimension should be 2, but got {}.".format(self.weights.ndim))
raise ValueError(f"For '{self.cls_name}', the dimension of 'weights' should be 2, "
f"but got {self.weights.ndim}.")
self.ignore_indiex = ignore_indiex if ignore_indiex is None else \
validator.check_value_type("ignore_indiex", ignore_indiex, [int])
if isinstance(activation, str) and activation not in activation_list:
raise ValueError("The activation must be in {}, but got {}.".format(activation_list, activation))
raise ValueError(f"For '{self.cls_name}', the 'activation' must be in {activation_list}, "
f"but got {activation}.")

self.activation = get_activation(activation) if isinstance(activation, str) else activation
if self.activation is not None and not isinstance(self.activation, Cell):
raise TypeError("The activation must be str or Cell, but got {}.".format(type(self.activation)))
raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell, "
f"but got {type(self.activation)}.")
self.reshape = P.Reshape()

def construct(self, logits, label):
_check_is_tensor('logits', logits, self.cls_name)
_check_is_tensor('labels', label, self.cls_name)
_check_shape(logits.shape, label.shape)
_check_ndim_multi(logits.ndim, label.ndim)
_check_shape(logits.shape, label.shape, self.cls_name)
_check_ndim_multi(logits.ndim, label.ndim, self.cls_name)
total_loss = 0

if self.activation is not None:
@@ -734,7 +745,7 @@ class MultiClassDiceLoss(LossBase):
if i != self.ignore_indiex:
dice_loss = self.binarydiceloss(logits[:, i], label[:, i])
if self.weights is not None:
_check_weights(self.weights.shape[0], label.shape[1])
_check_weights(self.weights.shape[0], label.shape[1], self.cls_name)
dice_loss *= self.weights[i]
total_loss += dice_loss

@@ -749,12 +760,12 @@ class SampledSoftmaxLoss(LossBase):
Args:
num_sampled (int): The number of classes to randomly sample per batch.
num_classes (int): The number of possible classes.
num_true (int): The number of target classes per training example. Default: 1.
num_true (int): The number of labels classes per training example. Default: 1.
sampled_values (Union[list, tuple]): List or tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*CandidateSampler` function.
Default to None, `UniformCandidateSampler` is applied.
remove_accidental_hits (bool): Whether to remove "accidental hits"
where a sampled class equals to one of the target classes. Default: True.
where a sampled class equals to one of the labels classes. Default: True.
seed (int): Random seed for candidate sampling. Default: 0
reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "sum", and "none".
If "none", do not perform reduction. Default: "none".
@@ -762,7 +773,7 @@ class SampledSoftmaxLoss(LossBase):
Inputs:
- **weights** (Tensor) - Tensor of shape :math:`(C, dim)`.
- **bias** (Tensor) - Tensor of shape :math:`(C,)`. The class biases.
- **labels** (Tensor) - Tensor of shape :math:`(N, num\_true)`, type `int64, int32`. The target classes.
- **labels** (Tensor) - Tensor of shape :math:`(N, num\_true)`, type `int64, int32`. The labels classes.
- **logits** (Tensor) - Tensor of shape :math:`(N, dim)`. The forward activations of the input network.

Outputs:
@@ -798,18 +809,24 @@ class SampledSoftmaxLoss(LossBase):
super(SampledSoftmaxLoss, self).__init__(reduction)

if num_true < 1:
raise ValueError(f"The num_true {num_true} is less than 1.")
raise ValueError(f"For '{self.cls_name}', the 'num_true' must be greater than or equal to 1, "
f"but got {num_true}.")
if seed < 0:
raise ValueError(f"The seed {seed} is less than 0.")
raise ValueError(f"For '{self.cls_name}', the 'seed' must be greater than or equal to 0, but got {seed}.")
if num_sampled > num_classes:
raise ValueError(f"The num_sampled {num_sampled} is greater than num_classes {num_classes}.")
raise ValueError(f"For '{self.cls_name}', the 'num_sampled' must be smaller than or "
f"equal to 'num_classes', but got 'num_sampled': {num_sampled} "
f"and 'num_classes': {num_classes}.")
if num_true > num_classes:
raise ValueError(f"The num_true {num_true} is greater than num_classes {num_classes}.")
raise ValueError(f"For '{self.cls_name}', the 'num_true' must be smaller than or equal to 'num_classes', "
f"but got 'num_true': {num_true} amd 'num_classes': {num_classes}.")
if sampled_values is not None:
if not isinstance(sampled_values, (list, tuple)):
raise TypeError(f"The sampled_values {sampled_values} is not a list or tuple.")
raise TypeError(f"For '{self.cls_name}', the type of 'sampled_values' must be a list or tuple, "
f"but got {type(sampled_values).__name__}.")
if len(sampled_values) != 3:
raise ValueError(f"The sampled_values size {len(sampled_values)} is not 3.")
raise ValueError(f"For '{self.cls_name}', the length of 'sampled_values' must be equal to 3,"
f"but got {len(sampled_values)}.")

self.num_sampled = num_sampled
self.num_classes = num_classes
@@ -843,18 +860,18 @@ class SampledSoftmaxLoss(LossBase):
self.expand_dims = P.ExpandDims()
self.dtype = P.DType()

def construct(self, weights, biases, labels, inputs):
def construct(self, weights, biases, labels, logits):
_check_is_tensor('weights', weights, self.cls_name)
_check_is_tensor('biases', biases, self.cls_name)
_check_is_tensor('labels', labels, self.cls_name)
_check_is_tensor('inputs', inputs, self.cls_name)
_check_is_tensor('logits', logits, self.cls_name)
_check_label_dtype(self.dtype(labels), self.cls_name)

logits, labels = self._compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
logits=logits,
num_true=self.num_true,
sampled_values=self.sampled_values,
subtract_log_q=True)
@@ -870,7 +887,7 @@ class SampledSoftmaxLoss(LossBase):
def _compute_sampled_logits(self, weights,
biases,
labels,
inputs,
logits,
num_true=1,
sampled_values=None,
subtract_log_q=True):
@@ -878,17 +895,17 @@ class SampledSoftmaxLoss(LossBase):

Computes sampled output training logits and labels suitable

Note: In the case where num_true > 1, we assign to each target class
with the target probability (1/num_true) so that the target probabilities
Note: In the case where num_true > 1, we assign to each labels class
with the labels probability (1/num_true) so that the labels probabilities
sum to 1 per-example.

Args:
weights (Tensor): Tensor of shape `[num_classes, dim]`.
biases (Tensor): Tensor of shape `[num_classes]`.
labels (Tensor): Tensor of shape `[batch_size, num_true]`. The target classes.
inputs (Tensor): Tensor of shape `[batch_size, dim]`. The forward
labels (Tensor): Tensor of shape `[batch_size, num_true]`. The labels classes.
logits (Tensor): Tensor of shape `[batch_size, dim]`. The forward
activations of the input network.
num_true (int): The number of target classes per training example.
num_true (int): The number of labels classes per training example.
sampled_values: A tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `UniformCandidateSampler` function.
subtract_log_q: A `bool`. whether to subtract the log expected count of
@@ -924,14 +941,14 @@ class SampledSoftmaxLoss(LossBase):

true_w = self.slice_op(all_w, [0, 0], [n_true, n_dim])
sampled_w = self.slice_op(all_w, [n_true, 0], [n_sampled, n_dim])
sampled_logits = self.matmul(inputs, sampled_w)
sampled_logits = self.matmul(logits, sampled_w)

all_b = self.gather_v2(biases, all_ids, 0)
true_b = self.slice_op(all_b, [0], [n_true])
sampled_b = self.slice_op(all_b, [n_true], [n_sampled])

new_true_w_shape = (-1, num_true, n_dim)
row_wise_dots = self.mul(self.expand_dims(inputs, 1),
row_wise_dots = self.mul(self.expand_dims(logits, 1),
self.reshape(true_w, new_true_w_shape))

# We want the row-wise dot plus biases which yields a
@@ -1027,19 +1044,20 @@ class BCELoss(LossBase):
else:
self.ones = P.OnesLike()

def construct(self, inputs, labels):
_check_is_tensor('logits', inputs, self.cls_name)
def construct(self, logits, labels):
_check_is_tensor('logits', logits, self.cls_name)
_check_is_tensor('labels', labels, self.cls_name)
if self.weight_one:
weight = self.ones(inputs)
weight = self.ones(logits)
else:
weight = self.weight
loss = self.binary_cross_entropy(inputs, labels, weight)
loss = self.binary_cross_entropy(logits, labels, weight)
return loss


@constexpr
def _check_reduced_shape_valid(ori_shape, reduced_shape, axis, cls_name):
"""Internal function, used to check whether the reduced shape meets the requirements."""
validator.check_reduce_shape(ori_shape, reduced_shape, axis, cls_name)


@@ -1096,25 +1114,25 @@ class CosineEmbeddingLoss(LossBase):
validator.check_value_type("margin", margin, [float], self.cls_name)
self.margin = validator.check_float_range(margin, -1.0, 1.0, Rel.INC_BOTH, "margin", self.cls_name)

def construct(self, x1, x2, y):
_check_is_tensor('logits_x1', x1, self.cls_name)
_check_is_tensor('logits_x2', x2, self.cls_name)
_check_is_tensor('labels', y, self.cls_name)
F.same_type_shape(x1, x2)
_check_reduced_shape_valid(F.shape(x1), F.shape(y), (1,), self.cls_name)
# if target > 0, 1-cosine(x1, x2)
# else, max(0, cosine(x1, x2)-margin)
prod_sum = self.reduce_sum(x1 * x2, (1,))
square1 = self.reduce_sum(F.square(x1), (1,))
square2 = self.reduce_sum(F.square(x2), (1,))
def construct(self, logits_x1, logits_x2, labels):
_check_is_tensor('logits_x1', logits_x1, self.cls_name)
_check_is_tensor('logits_x2', logits_x2, self.cls_name)
_check_is_tensor('labels', labels, self.cls_name)
F.same_type_shape(logits_x1, logits_x2)
_check_reduced_shape_valid(F.shape(logits_x1), F.shape(labels), (1,), self.cls_name)
# if labels > 0, 1-cosine(logits_x1, logits_x2)
# else, max(0, cosine(logits_x1, logits_x2)-margin)
prod_sum = self.reduce_sum(logits_x1 * logits_x2, (1,))
square1 = self.reduce_sum(F.square(logits_x1), (1,))
square2 = self.reduce_sum(F.square(logits_x2), (1,))
denom = F.sqrt(square1) * F.sqrt(square2)
cosine = prod_sum / denom

pos_value = 1.0 - cosine
neg_value = self.maximum(cosine - self.margin, 0.0)
zeros = F.zeros_like(cosine)
pos_part = F.select(y == 1, pos_value, zeros)
neg_part = F.select(y == -1, neg_value, zeros)
pos_part = F.select(labels == 1, pos_value, zeros)
neg_part = F.select(labels == -1, neg_value, zeros)
output_unreduced = pos_part + neg_part

return self.get_loss(output_unreduced)
@@ -1185,17 +1203,17 @@ class BCEWithLogitsLoss(LossBase):
super(BCEWithLogitsLoss, self).__init__()
self.bce_with_logits_loss = P.BCEWithLogitsLoss(reduction=reduction)
if isinstance(weight, Parameter):
raise TypeError(f"For {self.cls_name}, the weight can not be a parameter.")
raise TypeError(f"For '{self.cls_name}', the 'weight' can not be a parameter.")
if isinstance(pos_weight, Parameter):
raise TypeError(f"For {self.cls_name}, the pos_weight can not be a parameter.")
raise TypeError(f"For '{self.cls_name}', the 'pos_weight' can not be a parameter.")
self.weight = weight
self.pos_weight = pos_weight
self.ones = P.OnesLike()

def construct(self, predict, target):
_check_is_tensor('logits', predict, self.cls_name)
_check_is_tensor('labels', target, self.cls_name)
ones_input = self.ones(predict)
def construct(self, logits, labels):
_check_is_tensor('logits', logits, self.cls_name)
_check_is_tensor('labels', labels, self.cls_name)
ones_input = self.ones(logits)
if self.weight is not None:
weight = self.weight
else:
@@ -1204,37 +1222,42 @@ class BCEWithLogitsLoss(LossBase):
pos_weight = self.pos_weight
else:
pos_weight = ones_input
loss = self.bce_with_logits_loss(predict, target, weight, pos_weight)
loss = self.bce_with_logits_loss(logits, labels, weight, pos_weight)
return loss


@constexpr
def _check_ndim(predict_nidm, target_ndim):
if predict_nidm < 2 or predict_nidm > 4:
raise ValueError("The dimensions of predict should be between 2 and 4, but got"
"predict dim {}.".format(predict_nidm))
if target_ndim < 2 or target_ndim > 4:
raise ValueError("The dimensions of target should be between 2 and 4, but got"
"target dim {}.".format(target_ndim))
if predict_nidm != target_ndim:
raise ValueError("The dimensions of predict and target must be equal, but got"
"predict dim {} and target dim {}.".format(predict_nidm, target_ndim))
def _check_ndim(logits_nidm, labels_ndim, prime_name=None):
'''Internal function, used to check whether the dimension of logits and labels meets the requirements。'''
msg_prefix = f'For \'{prime_name}\', the' if prime_name else "The"
if logits_nidm < 2 or logits_nidm > 4:
raise ValueError(f"{msg_prefix} dimensions of 'logits' should be in [2, 4], but got"
f"dimension of 'logits' {logits_nidm}.")
if labels_ndim < 2 or labels_ndim > 4:
raise ValueError(f"{msg_prefix} dimensions of 'labels' should be in [2, 4], but got"
f"dimension of 'labels' {labels_ndim}.")
if logits_nidm != labels_ndim:
raise ValueError(f"{msg_prefix} dimensions of 'logits' and 'labels' must be equal, but got"
f"dimension of 'logits' {logits_nidm} and dimension of 'labels' {labels_ndim}.")


@constexpr
def _check_channel_and_shape(predict, target):
if predict == 1:
raise ValueError("Single channel prediction is not supported.")
if target not in (1, predict):
raise ValueError("The target must have a channel or the same shape as predict."
"If it has a channel, it should be the range [0, C-1], where C is the number of classes "
f"inferred from 'predict': C={predict}.")
def _check_channel_and_shape(logits, labels, prime_name=None):
'''Internal function, used to check whether the channels or shape of logits and labels meets the requirements.'''
msg_prefix = f'For \'{prime_name}\', the' if prime_name else "The"
if logits == 1:
raise ValueError(f"{msg_prefix} single channel prediction is not supported, but got {logits}.")
if labels not in (1, logits):
raise ValueError(f"{msg_prefix} 'labels' must have a channel or the same shape as 'logits'."
f"If it has a channel, it should be the range [0, C-1], where C is the number of classes "
f"inferred from 'logits': C={logits}, but got 'labels': {labels}.")


@constexpr
def _check_input_dtype(targets_dtype, cls_name):
validator.check_type_name("targets", targets_dtype, [mstype.int32, mstype.int64, mstype.float16,
mstype.float32], cls_name)
def _check_input_dtype(labels_dtype, cls_name):
"""Internal function, used to check whether the data type of labels meets the requirements."""
validator.check_type_name("labels", labels_dtype,
[mstype.int32, mstype.int64, mstype.float16, mstype.float32], cls_name)


class FocalLoss(LossBase):
@@ -1293,9 +1316,9 @@ class FocalLoss(LossBase):

self.gamma = validator.check_value_type("gamma", gamma, [float])
if weight is not None and not isinstance(weight, Tensor):
raise TypeError("The type of weight should be a tensor, but got {}.".format(type(weight)))
raise TypeError(f"For '{self.cls_name}', the type of 'weight' should be a Tensor.")
if isinstance(weight, Tensor) and weight.ndim != 1:
raise ValueError("The dimension of weight should be 1, but got {}.".format(weight.ndim))
raise ValueError(f"For '{self.cls_name}', the dimension of 'weight' should be 1, but got {weight.ndim}.")
self.weight = weight
self.expand_dims = P.ExpandDims()
self.gather_d = P.GatherD()
@@ -1305,41 +1328,41 @@ class FocalLoss(LossBase):
self.dtype = P.DType()
self.logsoftmax = nn.LogSoftmax(1)

def construct(self, predict, target):
_check_is_tensor('logits', predict, self.cls_name)
_check_is_tensor('labels', target, self.cls_name)
targets = target
_check_ndim(predict.ndim, targets.ndim)
_check_channel_and_shape(predict.shape[1], targets.shape[1])
_check_input_dtype(self.dtype(targets), self.cls_name)
if predict.ndim > 2:
predict = predict.view(predict.shape[0], predict.shape[1], -1)
targets = targets.view(targets.shape[0], targets.shape[1], -1)
def construct(self, logits, labels):
_check_is_tensor('logits', logits, self.cls_name)
_check_is_tensor('labels', labels, self.cls_name)
labelss = labels
_check_ndim(logits.ndim, labelss.ndim)
_check_channel_and_shape(logits.shape[1], labelss.shape[1])
_check_input_dtype(self.dtype(labelss), self.cls_name)
if logits.ndim > 2:
logits = logits.view(logits.shape[0], logits.shape[1], -1)
labelss = labelss.view(labelss.shape[0], labelss.shape[1], -1)
else:
predict = self.expand_dims(predict, 2)
targets = self.expand_dims(targets, 2)
logits = self.expand_dims(logits, 2)
labelss = self.expand_dims(labelss, 2)

log_probability = self.logsoftmax(predict)
log_probability = self.logsoftmax(logits)

if target.shape[1] == 1:
log_probability = self.gather_d(log_probability, 1, self.cast(targets, mindspore.int32))
if labels.shape[1] == 1:
log_probability = self.gather_d(log_probability, 1, self.cast(labelss, mindspore.int32))
log_probability = self.squeeze(log_probability)

probability = F.exp(log_probability)

if self.weight is not None:
convert_weight = self.weight[None, :, None]
convert_weight = self.tile(convert_weight, (targets.shape[0], 1, targets.shape[2]))
if target.shape[1] == 1:
convert_weight = self.gather_d(convert_weight, 1, self.cast(targets, mindspore.int32))
convert_weight = self.tile(convert_weight, (labelss.shape[0], 1, labelss.shape[2]))
if labels.shape[1] == 1:
convert_weight = self.gather_d(convert_weight, 1, self.cast(labelss, mindspore.int32))
convert_weight = self.squeeze(convert_weight)
log_probability = log_probability * convert_weight

weight = F.pows(-1 * probability + 1.0, self.gamma)
if target.shape[1] == 1:
if labels.shape[1] == 1:
loss = (-1 * weight * log_probability).mean(axis=1)
else:
loss = (-1 * weight * targets * log_probability).mean(axis=-1)
loss = (-1 * weight * labelss * log_probability).mean(axis=-1)

return self.get_loss(loss)

+ 1
- 1
mindspore/ops/composite/multitype_ops/_constexpr_utils.py View File

@@ -252,7 +252,7 @@ def is_same_type(inst, type_):
def check_valid_dim(dim, name):
"""Checks whether the dim is valid."""
if dim not in (1, 2):
raise ValueError(f"For {name}, inputs dim must be 1d or 2d")
raise ValueError(f"For '{name}', inputs dim must be 1d or 2d, but got {dim}.")


@constexpr


+ 4
- 6
mindspore/ops/composite/random_ops.py View File

@@ -13,7 +13,6 @@
# limitations under the License.
# ============================================================================
"""Operations for random number generators."""

from mindspore.ops.primitive import constexpr
from .. import operations as P
from .. import functional as F
@@ -253,10 +252,8 @@ def gamma(shape, alpha, beta, seed=None):
>>> print(output)
[[[ 2.2132034 5.8855834]]
[ 3.3981476 7.5805717]

[[ 3.3981476 7.5805717]]
[ 3.7190282 19.941492]

[[ 2.9512358 2.5969937]]
[ 3.786061 5.160872 ]]]
>>> # case 4: beta_shape is (2, 1), the output is different.
@@ -268,10 +265,8 @@ def gamma(shape, alpha, beta, seed=None):
>>> print(output)
[[[ 5.6085486 7.8280783]]
[ 15.97684 16.116285]

[[ 1.8347423 1.713663]]
[ 3.2434065 15.667398]

[[ 4.2922077 7.3365674]]
[ 5.3876944 13.159832 ]]]
"""
@@ -388,7 +383,10 @@ def multinomial(inputs, num_sample, replacement=True, seed=None):
seed1, seed2 = _get_seed(seed, "multinomial")
if not replacement:
if shape(inputs)[-1] < num_sample:
const_utils.raise_value_error("num_sample must be less than shape(input)[-1] without replacement")
const_utils.raise_value_error("For 'multinomial', the 'num_sample' must be less than "
"the last dimension of input without 'replacement', "
"but got 'num_sample': {} and "
"'replacement': {}".format(num_sample, replacement))
n_dist = 1
if len(shape(inputs)) > 1:
n_dist = shape(inputs)[-2]


+ 4
- 4
mindspore/ops/operations/array_ops.py View File

@@ -2710,8 +2710,8 @@ class Slice(PrimitiveWithInfer):
def __infer__(self, x, begin, size):
x_shape = x['shape']
x_shp_len = len(x_shape)
validator.check_const_input('begin', begin['value'], self.name)
validator.check_const_input('size', size['value'], self.name)
validator.check_valid_input('begin', begin['value'], self.name)
validator.check_valid_input('size', size['value'], self.name)
begin_v, size_v = begin['value'], size['value']
if begin_v is None or size_v is None:
return {'shape': None,
@@ -5564,8 +5564,8 @@ class EditDistance(PrimitiveWithInfer):
self.set_const_input_indexes([2, 5])

def __infer__(self, h_indices, h_values, h_shape, truth_indices, truth_values, truth_shape):
validator.check_const_input('hypothesis_shape', h_shape['value'], self.name)
validator.check_const_input('truth_shape', truth_shape['value'], self.name)
validator.check_valid_input('hypothesis_shape', h_shape['value'], self.name)
validator.check_valid_input('truth_shape', truth_shape['value'], self.name)
args_int = {"hypothesis_indices": h_indices['dtype'], "hypothesis_shape": h_shape['dtype'],
"truth_indices": truth_indices['dtype'], "truth_shape": truth_shape['dtype']}
validator.check_tensors_dtypes_same_and_valid(args_int, [mstype.int64], self.name)


+ 2
- 3
mindspore/ops/operations/comm_ops.py View File

@@ -51,10 +51,9 @@ class ReduceOp:

Examples:
>>> from mindspore.communication import init
>>> from mindspore import Tensor
>>> from mindspore.ops.operations.comm_ops import ReduceOp
>>> from mindspore import Tensor, ops
>>> from mindspore.ops import ReduceOp
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as ops
>>>
>>> init()
>>> class Net(nn.Cell):


Loading…
Cancel
Save