You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

array_ops.py 5.8 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """array Operations."""
  16. from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
  17. from mindspore.common import dtype as mstype
  18. from mindspore._checkparam import Validator as validator
  19. from mindspore._checkparam import Rel
  20. from mindspore.ops.primitive import constexpr
  21. from mindspore.ops import functional as F
  22. from .. import operations as P
  23. @constexpr
  24. def _check_is_int(arg_value, arg_name, op_name):
  25. arg_value = validator.check_is_int(arg_value, arg_name, op_name)
  26. return arg_value
  27. @constexpr
  28. def _check_positive_int(arg_value, arg_name, op_name):
  29. arg_value = validator.check_positive_int(arg_value, arg_name, op_name)
  30. return arg_value
  31. @constexpr
  32. def _check_axis_range(arg_value, limit, arg_name, op_name):
  33. arg_value = validator.check_int_range(arg_value, -limit, limit, Rel.INC_LEFT, arg_name, op_name)
  34. return arg_value
  35. @constexpr
  36. def _cal_repeat_dims(x_rank, rep, expand_axis):
  37. rep_dims = [1] * (x_rank + 1)
  38. rep_dims[expand_axis] = rep
  39. return tuple(rep_dims)
  40. @constexpr
  41. def _cal_reshape(x_shape, rep, axis):
  42. x_reshape = list(x_shape)
  43. x_reshape[axis] *= rep
  44. return tuple(x_reshape)
  45. def repeat_elements(x, rep, axis=0):
  46. """
  47. Repeat elements of a tensor along an axis, like np.repeat.
  48. Args:
  49. x (Tensor): The tensor to repeat values for. Must be of type: float16,
  50. float32, int8, uint8, int16, int32, or int64.
  51. rep (int): The number of times to repeat, must be positive, required.
  52. axis (int): The axis along which to repeat, default 0.
  53. Outputs:
  54. One tensor with values repeated along the specified axis. If x has shape
  55. (s1, s2, ..., sn) and axis is i, the output will have shape (s1, s2, ...,
  56. si * rep, ..., sn). The output type will be the same as the type of `x`.
  57. Supported Platforms:
  58. ``Ascend`` ``GPU`` ``CPU``
  59. Examples:
  60. >>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
  61. >>> output = C.repeat_elements(x, rep = 2, axis = 0)
  62. >>> print(output)
  63. [[0 1 2]
  64. [0 1 2]
  65. [3 4 5]
  66. [3 4 5]]
  67. """
  68. const_utils.check_type_valid(F.dtype(x), mstype.number_type, 'input x')
  69. rep = _check_positive_int(rep, "rep", "repeat_elements")
  70. axis = _check_is_int(axis, "axis", "repeat_elements")
  71. shape_op = P.Shape()
  72. rank_op = P.Rank()
  73. tile_op = P.Tile()
  74. expand_dims_op = P.ExpandDims()
  75. reshape_op = P.Reshape()
  76. x_rank = rank_op(x)
  77. axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
  78. expand_axis = axis + 1
  79. x_expand = expand_dims_op(x, expand_axis)
  80. rep_dims = _cal_repeat_dims(x_rank, rep, expand_axis)
  81. x_expand = tile_op(x_expand, rep_dims)
  82. x_shape = shape_op(x)
  83. x_reshape = _cal_reshape(x_shape, rep, axis)
  84. x_rep = reshape_op(x_expand, x_reshape)
  85. return x_rep
  86. @constexpr
  87. def _check_sequence_mask_input_len(input_shape):
  88. if not input_shape:
  89. raise ValueError(f"sequence_mask input lengths_shape should be > 0. "
  90. f"current lengths_shape is {input_shape}.")
  91. def sequence_mask(lengths, maxlen=None):
  92. """
  93. Returns a mask tensor representing the first N positions of each cell.
  94. If lengths has shape [d_1, d_2, ..., d_n], then the resulting tensor mask has type dtype and shape
  95. [d_1, d_2, ..., d_n, maxlen], with mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
  96. Inputs:
  97. - **lengths** (Tensor) - Tensor to calculate the mask for. All values in this tensor should be
  98. less than or equal to `maxlen`. Values greater than `maxlen` will be treated as `maxlen`.
  99. Must be type int32 or int64.
  100. - **maxlen** (int) - size of the last dimension of returned tensor. Must be positive and same
  101. type as elements in `lengths`.
  102. Outputs:
  103. One mask tensor of shape lengths.shape + (maxlen,).
  104. Supported Platforms:
  105. ``GPU``
  106. Examples:
  107. >>> x = Tensor(np.array([[1, 3], [2, 0]]))
  108. >>> output = C.sequence_mask(x, 3)
  109. >>> print(output)
  110. [[[True, False, False],
  111. [True, True, True]],
  112. [[True, True, False],
  113. [False, False, False]]]
  114. """
  115. argmax_op = P.ArgMaxWithValue()
  116. reshape_op = P.Reshape()
  117. range_op = P.Range()
  118. expand_op = P.ExpandDims()
  119. cast_op = P.Cast()
  120. shape_op = P.Shape()
  121. to_tensor_op = P.ScalarToArray()
  122. const_utils.check_type_valid(F.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
  123. _check_sequence_mask_input_len(shape_op(lengths))
  124. if maxlen is None:
  125. flatten_data = reshape_op(lengths, (-1,))
  126. flatten_data = cast_op(flatten_data, mstype.float32)
  127. _, value = argmax_op(flatten_data)
  128. maxlen = cast_op(value, mstype.int32)
  129. else:
  130. maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
  131. maxlen = to_tensor_op(maxlen)
  132. range_vector = range_op(to_tensor_op(0), maxlen
  133. , to_tensor_op(1))
  134. mask = expand_op(lengths, -1)
  135. result = range_vector < mask
  136. return result