You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

_operators.py 8.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Packaged operations based on MindSpore."""
  16. __all__ = [
  17. 'absolute',
  18. 'arange',
  19. 'argmax',
  20. 'argmin',
  21. 'argsort',
  22. 'assign',
  23. 'intersection',
  24. 'matmul',
  25. 'maximum',
  26. 'minimum',
  27. 'mean',
  28. 'mul',
  29. 'sort',
  30. 'sqrt',
  31. 'squeeze',
  32. 'tile',
  33. 'reshape',
  34. 'zeros',
  35. 'zeros_like',
  36. 'softmax',
  37. 'Tensor',
  38. 'summation'
  39. ]
  40. from typing import List, Tuple, Union, Callable
  41. import numpy as np
  42. import mindspore
  43. from mindspore import nn
  44. import mindspore.ops.operations as op
  45. _Axis = Union[int, Tuple[int, ...], List[int]]
  46. _Idx = Union[int, mindspore.Tensor, Tuple[int, ...], Tuple[mindspore.Tensor, ...]]
  47. _Number = Union[int, float, np.int, np.float]
  48. _Shape = Union[int, Tuple[int, ...]]
  49. Tensor = mindspore.Tensor
  50. def absolute(inputs: Tensor) -> Tensor:
  51. """Get the absolute value of a tensor value."""
  52. abs_op = op.Abs()
  53. outputs = abs_op(inputs)
  54. return outputs
  55. def arange(
  56. start: _Number,
  57. end: _Number,
  58. step: _Number = 1,
  59. dtype: mindspore.dtype = None) -> Tensor:
  60. """Get the arange value of tensor."""
  61. nums = np.arange(start=start, stop=end, step=step, dtype=np.int32)
  62. nums = mindspore.Tensor(nums, dtype=dtype)
  63. return nums
  64. def argmax(inputs: Tensor, axis: int = -1, keep_dims: bool = False) -> Tensor:
  65. """Returns the indices of the maximum values along an axis."""
  66. inputs_np = inputs.asnumpy()
  67. outputs = np.argmax(inputs_np, axis=axis)
  68. if keep_dims:
  69. outputs = np.expand_dims(outputs, axis=axis)
  70. return mindspore.Tensor(outputs, mindspore.int32)
  71. def argmin(inputs: Tensor, axis: int = -1, keep_dims: bool = False) -> Tensor:
  72. """Returns the indices of the minimum values along an axis."""
  73. inputs_np = inputs.asnumpy()
  74. outputs = np.argmin(inputs_np, axis=axis)
  75. if keep_dims:
  76. outputs = np.expand_dims(outputs, axis=axis)
  77. return mindspore.Tensor(outputs, mindspore.int32)
  78. def argsort(inputs: Tensor, axis: int = -1, descending: bool = False) -> Tensor:
  79. """Returns the indices that would sort an array."""
  80. inputs_np = inputs.asnumpy()
  81. factor = -1 if descending else 1
  82. indices_np = np.argsort(factor * inputs_np, axis=axis)
  83. indices = mindspore.Tensor(indices_np, dtype=mindspore.int32)
  84. return indices
  85. def assign(inputs: Tensor, idx: _Idx, value: Tensor) -> Tensor:
  86. """Assign a tensor value to the given tensor and index."""
  87. inputs_np = inputs.asnumpy()
  88. if isinstance(idx, Tensor):
  89. idx = idx.asnumpy()
  90. value_np = value.asnumpy()
  91. inputs_np[idx] = value_np
  92. outputs = mindspore.Tensor(inputs_np)
  93. return outputs
  94. def intersection(*inputs: Tensor) -> Tensor:
  95. """Get the intersection value by the given tensor list."""
  96. outputs_np = np.ones_like(inputs[0])
  97. for inp in inputs:
  98. outputs_np &= inp.asnumpy()
  99. outputs = mindspore.Tensor(outputs_np)
  100. return outputs
  101. def matmul(inputs_x: Tensor, inputs_y: Tensor) -> Tensor:
  102. """Multiplies matrix `inputs_x` and matrix `inputs_y`."""
  103. matmul_op = op.MatMul()
  104. outputs = matmul_op(inputs_x, inputs_y)
  105. return outputs
  106. def maximum(inputs: Tensor, axis: _Axis = (), keep_dims: bool = False) -> Tensor:
  107. """Reduces a dimension of a tensor by the maximum value in this dimension."""
  108. max_op = op.ReduceMax(keep_dims)
  109. outputs = max_op(inputs, axis)
  110. return outputs
  111. def minimum(inputs: Tensor, axis: _Axis = (), keep_dims: bool = False) -> Tensor:
  112. """Reduces a dimension of a tensor by the minimum value in the dimension."""
  113. max_op = op.ReduceMin(keep_dims)
  114. outputs = max_op(inputs, axis)
  115. return outputs
  116. def mean(inputs: Tensor, axis: _Axis = (), keep_dims: bool = False) -> Tensor:
  117. """Reduces a dimension of a tensor by averaging all elements in the dimension."""
  118. mean_op = op.ReduceMean(keep_dims)
  119. outputs = mean_op(inputs, axis)
  120. return outputs
  121. def mul(inputs_x: Tensor, inputs_y: Tensor) -> Tensor:
  122. """
  123. Multiplies two tensors element-wise.
  124. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  125. The inputs must be two tensors or one tensor and one scalar.
  126. When the inputs are two tensors,
  127. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  128. When the inputs are one tensor and one scalar,
  129. the scalar could only be a constant.
  130. Inputs:
  131. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  132. a bool or a tensor whose data type is number or bool.
  133. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  134. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  135. Outputs:
  136. Tensor, the shape is the same as the one after broadcasting,
  137. and the data type is the one with higher precision or higher digits among the two inputs.
  138. """
  139. mul_op = op.Mul()
  140. outputs = mul_op(inputs_x, inputs_y)
  141. return outputs
  142. def sort(inputs: Tensor, axis: _Axis = -1, descending: bool = False) -> Tensor:
  143. """Return a sorted copy of an array."""
  144. inputs_np = inputs.asnumpy()
  145. outputs_np = np.sort(inputs_np, axis=axis)
  146. if descending:
  147. outputs_np = np.flip(outputs_np, axis=axis)
  148. outputs = mindspore.Tensor(outputs_np)
  149. return outputs
  150. def squeeze(inputs: Tensor, axis: _Axis = ()):
  151. """Returns a tensor with the same type but dimensions of 1 are removed based on `axis`."""
  152. squeeze_op = op.Squeeze(axis)
  153. outputs = squeeze_op(inputs)
  154. return outputs
  155. def tile(inputs: Tensor, shape: Tuple[int, ...]) -> Tensor:
  156. """Replicates a tensor with given multiples times."""
  157. tile_op = op.Tile()
  158. outputs = tile_op(inputs, shape)
  159. return outputs
  160. def reshape(inputs: Tensor, shape: _Shape) -> Tensor:
  161. """Reshapes input tensor with the same values based on a given shape tuple."""
  162. if isinstance(shape, int):
  163. shape = (shape,)
  164. return op.Reshape()(inputs, shape)
  165. def zeros(shape: _Shape, dtype: mindspore.dtype = None) -> Tensor:
  166. """Return a new array of given shape and type, filled with zeros."""
  167. outputs = np.zeros(shape)
  168. return mindspore.Tensor(outputs, dtype=dtype)
  169. def zeros_like(inputs: Tensor, dtype: mindspore.dtype = None) -> Tensor:
  170. """Return an array of zeros with the same shape and type as a given array."""
  171. inputs_np = inputs.asnumpy()
  172. outputs_np = np.zeros_like(inputs_np)
  173. outputs = mindspore.Tensor(outputs_np, dtype)
  174. return outputs
  175. def random(shape: _Shape, dtype: mindspore.dtype = None) -> Tensor:
  176. """Return random floats in the half-open interval [0.0, 1.0)."""
  177. outputs_np = np.random.random(shape)
  178. outputs = mindspore.Tensor(outputs_np, dtype)
  179. return outputs
  180. def randint(low: int, high: int, shape: _Shape, dtype: mindspore.dtype = mindspore.int8) -> Tensor:
  181. """Return random integers from `low` (inclusive) to `high` (exclusive)."""
  182. outputs_np = np.random.randint(low, high, size=shape)
  183. outputs = mindspore.Tensor(outputs_np, dtype=dtype)
  184. return outputs
  185. def softmax(axis: int = -1) -> Callable:
  186. """Softmax activation function."""
  187. func = nn.Softmax(axis=axis)
  188. return func
  189. def summation(inputs: Tensor, axis: _Axis = (), keep_dims: bool = False) -> Tensor:
  190. """Reduces a dimension of a tensor by summing all elements in the dimension."""
  191. sum_op = op.ReduceSum(keep_dims)
  192. outputs = sum_op(inputs, axis)
  193. return outputs
  194. def stack(inputs: List[Tensor], axis: int) -> Tensor:
  195. """Stacks a list of tensors in specified axis."""
  196. stack_op = op.Stack(axis)
  197. outputs = stack_op(inputs)
  198. return outputs
  199. def sqrt(inputs: Tensor) -> Tensor:
  200. """Returns square root of a tensor element-wise."""
  201. sqrt_op = op.Sqrt()
  202. return sqrt_op(inputs)