You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math_ops.py 26 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """math Operations."""
  16. from itertools import zip_longest
  17. from collections import deque
  18. import numpy as np
  19. from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
  20. from mindspore.common import dtype as mstype
  21. from mindspore._checkparam import Validator as validator
  22. from mindspore.ops.primitive import constexpr
  23. from mindspore.ops import functional as F
  24. from .. import operations as P
  25. # count_nonzero
  26. @constexpr
  27. def _check_validate_axis(axis, name):
  28. if isinstance(axis, (tuple, list)):
  29. for idx, item in enumerate(axis):
  30. validator.check_value_type("axis[%d]" % idx, item, [int], name)
  31. axis = validator.check_value_type('axis', axis, [int, tuple, list], name)
  32. return axis
  33. @constexpr
  34. def _check_validate_keepdims(keep_dims, name):
  35. keep_dims = validator.check_value_type('keep_dims', keep_dims, [bool], name)
  36. return keep_dims
  37. def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
  38. r"""
  39. Count number of nonzero elements across axis of input tensor
  40. Args:
  41. x (Tensor): Input data is used to count non-zero numbers.
  42. axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Only constant value is allowed.
  43. Default: (), reduce all dimensions.
  44. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  45. If false, don't keep these dimensions. Default: False.
  46. dtype (Union[Number, mstype.bool\_]): The data type of the output tensor. Only constant value is allowed.
  47. Default: mstype.int32
  48. Returns:
  49. Tensor, number of nonzero element. The data type is dtype.
  50. Supported Platforms:
  51. ``Ascend`` ``GPU`` ``CPU``
  52. Examples:
  53. >>> input_x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))
  54. >>> nonzero_num = count_nonzero(x=input_x, axis=[0, 1], keep_dims=True, dtype=mstype.int32)
  55. >>> print(nonzero_num)
  56. [[3]]
  57. """
  58. const_utils.check_type_valid(F.dtype(x), mstype.number_type, 'input x')
  59. axis = _check_validate_axis(axis, "count_nonzero")
  60. keep_dims = _check_validate_keepdims(keep_dims, "count_nonzero")
  61. const_utils.check_type_valid(dtype, mstype.number_type + (mstype.bool_,), 'dtype')
  62. not_equal = P.NotEqual()
  63. cast = P.Cast()
  64. reduce_sum = P.ReduceSum(keep_dims)
  65. nonzero_bool = not_equal(x, 0)
  66. # ReduceSum only support float16 or float32 tensor.
  67. nonzero_val = cast(nonzero_bool, mstype.float32)
  68. nonzero_num = cast(reduce_sum(nonzero_val, axis), dtype)
  69. return nonzero_num
  70. # tensor dot
  71. @constexpr
  72. def _int_to_tuple_conv(axes):
  73. """
  74. Converts ints to tuples in input axes, expected by most validation checks.
  75. """
  76. for x in [0, 1]:
  77. if isinstance(axes[x], int):
  78. axes[x] = (axes[x],)
  79. return axes
  80. @constexpr
  81. def _check_axes(axes):
  82. """
  83. Check for validity and type of axes passed to function.
  84. """
  85. validator.check_value_type('axes', axes, [int, tuple, list], "tensor dot")
  86. if not isinstance(axes, int):
  87. axes = list(axes) # to avoid immutability issues
  88. if len(axes) != 2:
  89. raise ValueError("Require two axes inputs, given less")
  90. axes = _int_to_tuple_conv(axes) # convert before length checks
  91. if len(axes[0]) != len(axes[1]):
  92. raise ValueError("Axes have to be the same size/length")
  93. if len(axes[0]) != len(set(axes[0])) or len(axes[1]) != len(set(axes[1])):
  94. raise ValueError("Axes cannot have duplicating values")
  95. return axes
  96. @constexpr
  97. def _typecheck_input(x1_type, x2_type):
  98. """
  99. Check input tensor types to be valid and confirm they are the same type.
  100. """
  101. const_utils.check_type_valid(x1_type, [mstype.float32, mstype.float16], 'x1')
  102. const_utils.check_type_valid(x2_type, [mstype.float32, mstype.float16], 'x2')
  103. if x1_type != x2_type:
  104. raise TypeError(f'Both Inputs must be the same Type. x1 is \'{x1_type}\' and x2 is \'{x2_type}\' ')
  105. @constexpr
  106. def _axes_int_check(x1_shape, x2_shape, axes):
  107. """
  108. Convert from single int axes to 2d tuple if required
  109. """
  110. if isinstance(axes, int):
  111. if axes < 0:
  112. raise ValueError(f"axes must be at least 0 for tensor dot, got {axes}")
  113. if axes == 0:
  114. # outer product, no input validation required
  115. return ([], [])
  116. if axes > len(x1_shape) or axes > len(x2_shape):
  117. raise ValueError(
  118. "Axes value too high for given input arrays dimensions.")
  119. x1_ind = tuple(range(len(x1_shape))[-1 * axes:])
  120. x2_ind = tuple(range(len(x2_shape))[:axes])
  121. axes = tuple((x1_ind, x2_ind))
  122. axes = _int_to_tuple_conv(axes)
  123. return axes
  124. @constexpr
  125. def _validate_axes(x1_shape, x2_shape, axes):
  126. """
  127. Checks for axes having the correct length according to input, for any value in axis
  128. being out of range with given shape and also checking for compatible axes values
  129. with given inputs.
  130. """
  131. shapes = [x1_shape, x2_shape]
  132. # axis length check
  133. for ix_input, x_axes in enumerate(axes):
  134. axes_len = len(x_axes)
  135. shape_dim_len = len(shapes[ix_input])
  136. if axes_len > shape_dim_len:
  137. raise ValueError(f"axes for input: {ix_input + 1} are of length: {axes_len} "
  138. f"can only be max: {shape_dim_len} due to input shape.")
  139. # axis values range check
  140. for ix_input, x_axes in enumerate(axes):
  141. comp_shape = shapes[ix_input]
  142. max_val = len(comp_shape) - 1
  143. min_val = -1 * len(comp_shape)
  144. for _, x_value in enumerate(x_axes):
  145. if not min_val <= x_value <= max_val:
  146. raise ValueError(f"axes for input: {ix_input + 1} contains index: "
  147. f"{x_value}, but range is: [{min_val}, {max_val}]")
  148. # check axis value with input shape - both ways for axis valid
  149. invalid_a = False
  150. invalid_b = False
  151. for i in range(len(axes[0])): # sizes already validated
  152. if x1_shape[axes[0][i]] != x2_shape[axes[1][i]]:
  153. invalid_a = True
  154. if x1_shape[axes[0][i]] != x2_shape[axes[1][len(axes[0])-1-i]]:
  155. invalid_b = True
  156. if invalid_a and invalid_b:
  157. raise ValueError("Given Axes are incompatible with given input arrays")
  158. @constexpr
  159. def _calc_new_shape(shape, axes, position=0):
  160. """
  161. Calculate transpose and reshape parameters for input transformations,
  162. 'position' refers to whether tensor is first or second in the op.
  163. """
  164. contraction_axes = tuple(i if i >= 0 else i + len(shape) for i in axes[position])
  165. prod_contraction = int(np.prod([shape[i] for i in contraction_axes]))
  166. free_axes = tuple(i for i in range(len(shape)) if i not in contraction_axes)
  167. free_dims = tuple(shape[i] for i in free_axes)
  168. prod_free = int(np.prod(free_dims))
  169. transpose_perm = contraction_axes + free_axes if position else free_axes + contraction_axes
  170. new_shape = (prod_contraction, prod_free) if position else (prod_free, prod_contraction)
  171. return new_shape, transpose_perm, free_dims
  172. def tensor_dot(x1, x2, axes):
  173. """
  174. Computation of Tensor contraction on arbitrary axes between tensors `a` and `b`.
  175. Contraction allows for the summation of products of elements of `a` and `b` on specified axes.
  176. The same number of axes must be specified for both x1 and x2, and values must be within range
  177. of number of dims of both `a` and `b`.
  178. Selected dims in both inputs must also match.
  179. axes = 0 leads to outer product
  180. axes = 1 leads to normal matrix multiplication when inputs both 2D.
  181. axes = 1 is the same as axes = ((1,),(0,) where both `a` and `b` are 2D.
  182. axes = 2 is the same as axes = ((1,2),(0,1)) where both `a` and `b` are 3D.
  183. Inputs:
  184. - **x1** (Tensor) - First tensor in tensor_dot with datatype float16 or float32
  185. - **x2** (Tensor) - Second tensor in tensor_dot with datatype float16 or float32
  186. - **axes** (Union[int, tuple(int), tuple(tuple(int)), list(list(int))]) - Single value or
  187. tuple/list of length 2 with dimensions specified for `a` and `b` each. If single value `N` passed,
  188. automatically picks up last N dims from `a` input shape and first N dims from `b` input shape in order
  189. as axes for each respectively.
  190. Outputs:
  191. Tensor, the shape of the output tensor is :math:`(N + M)`. Where :math:`N` and :math:`M` are the free axes not
  192. contracted in both inputs
  193. Raises:
  194. TypeError: If `x1` or `x2` is not a Tensor.
  195. TypeError: If `axes` is not one of the following: int, tuple, list.
  196. Supported Platforms:
  197. ``Ascend`` ``GPU`` ``CPU``
  198. Examples:
  199. >>> input_x1 = Tensor(np.ones(shape=[1, 2, 3]), mindspore.float32)
  200. >>> input_x2 = Tensor(np.ones(shape=[3, 1, 2]), mindspore.float32)
  201. >>> output = C.tensor_dot(input_x1, input_x2, ((0,1),(1,2)))
  202. >>> print(output)
  203. [[2. 2. 2]
  204. [2. 2. 2]
  205. [2. 2. 2]]
  206. """
  207. shape_op = P.Shape()
  208. reshape_op = P.Reshape()
  209. transpose_op = P.Transpose()
  210. matmul_op = P.MatMul(False, False)
  211. # input validity checks
  212. x1_shape = shape_op(x1)
  213. x2_shape = shape_op(x2)
  214. x1_type = F.dtype(x1)
  215. x2_type = F.dtype(x2)
  216. axes = _check_axes(axes)
  217. _typecheck_input(x1_type, x2_type)
  218. # input compatibility check & axes format update
  219. axes = _axes_int_check(x1_shape, x2_shape, axes)
  220. _validate_axes(x1_shape, x2_shape, axes)
  221. x1_reshape_fwd, x1_transpose_fwd, x1_ret = _calc_new_shape(x1_shape, axes, 0)
  222. x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape(x2_shape, axes, 1)
  223. output_shape = x1_ret + x2_ret # combine free axes from both inputs
  224. # run tensor_dot op
  225. x1_transposed = transpose_op(x1, x1_transpose_fwd)
  226. x2_transposed = transpose_op(x2, x2_transpose_fwd)
  227. x1_reshaped = reshape_op(x1_transposed, x1_reshape_fwd)
  228. x2_reshaped = reshape_op(x2_transposed, x2_reshape_fwd)
  229. mul_result = matmul_op(x1_reshaped, x2_reshaped)
  230. final_result = reshape_op(mul_result, output_shape)
  231. return final_result
  232. @constexpr
  233. def _check_invalid_input(x1_shape, x2_shape):
  234. if len(x1_shape) < 2 or len(x2_shape) < 2:
  235. raise ValueError('C.dot inputs x1, x2 should has dimension >= 2,'
  236. + f'while x1 is ({len(x1_shape)}) and x2 is ({len(x2_shape)}).')
  237. @constexpr
  238. def _typecheck_input_dot(x1_type, x2_type):
  239. """
  240. Check input tensor types to be valid and confirm they are the same type for dot and batch dot ops.
  241. """
  242. const_utils.check_type_valid(x1_type, [mstype.float16, mstype.float32], 'x1')
  243. const_utils.check_type_valid(x2_type, [mstype.float16, mstype.float32], 'x2')
  244. if x1_type != x2_type:
  245. raise TypeError(f'Both Inputs must be the same Type. x1 is \'{x1_type}\' and x2 is \'{x2_type}\' ')
  246. @constexpr
  247. def _get_transpose_shape(x2_shape):
  248. x2_shape_range = tuple(range(len(x2_shape)))
  249. x2_shape_transpose = x2_shape_range[-2:-1] + x2_shape_range[:-2] + x2_shape_range[-1:]
  250. return x2_shape_transpose
  251. def dot(x1, x2):
  252. """
  253. Computation a dot product between samples in two tensors.
  254. Inputs:
  255. - **x1** (Tensor) - First tensor in Dot op with datatype float16 or float32
  256. - **x2** (Tensor) - Second tensor in Dot op with datatype float16 or float32
  257. Outputs:
  258. Tensor, dot product of x1 and x2.
  259. Raises:
  260. TypeError: If type of x1 and x2 are not the same.
  261. TypeError: If dtype of x1 or x2 is not float16 or float32.
  262. ValueError: If rank of x1 or x2 less than 2.
  263. Supported Platforms:
  264. ``Ascend`` ``GPU`` ``CPU``
  265. Examples:
  266. >>> input_x1 = Tensor(np.ones(shape=[2, 3]), mindspore.float32)
  267. >>> input_x2 = Tensor(np.ones(shape=[1, 3, 2]), mindspore.float32)
  268. >>> output = C.dot(input_x1, input_x2)
  269. >>> print(output)
  270. [[[3. 3.]]
  271. [[3. 3.]]]
  272. """
  273. shape_op = P.Shape()
  274. reshape_op = P.Reshape()
  275. transpose_op = P.Transpose()
  276. matmul_op = P.MatMul(False, False)
  277. x1_shape = shape_op(x1)
  278. x2_shape = shape_op(x2)
  279. x1_type = F.dtype(x1)
  280. x2_type = F.dtype(x2)
  281. _typecheck_input_dot(x1_type, x2_type)
  282. _check_invalid_input(x1_shape, x2_shape)
  283. if len(x1_shape) > 2 or len(x2_shape) > 2:
  284. x2_shape_transpose = _get_transpose_shape(x2_shape)
  285. x2_transpose = transpose_op(x2, x2_shape_transpose)
  286. x1_reshape = reshape_op(x1, (-1, x1_shape[-1]))
  287. x2_reshape = reshape_op(x2_transpose, (x2_shape[-2], -1))
  288. mul_result = matmul_op(x1_reshape, x2_reshape)
  289. return reshape_op(mul_result, x1_shape[:-1] + x2_shape[:-2] + x2_shape[-1:])
  290. return matmul_op(x1, x2)
  291. @constexpr
  292. def _get_batch_size(x1_shape, x2_shape):
  293. """
  294. Get batch sizes from two inputs
  295. """
  296. if len(x1_shape) < 2 or len(x2_shape) < 2:
  297. raise ValueError("Require both inputs with rank >= 2.")
  298. return x1_shape[0], x2_shape[0]
  299. @constexpr
  300. def _typecheck_input_batch_dot(x1_type, x2_type):
  301. """
  302. Check input tensor types to be valid and confirm they are the same type for batch dot ops.
  303. """
  304. const_utils.check_type_valid(x1_type, [mstype.float32], 'x1')
  305. const_utils.check_type_valid(x2_type, [mstype.float32], 'x2')
  306. if x1_type != x2_type:
  307. raise TypeError(f'Both Inputs must be the same Type. x1 is \'{x1_type}\' and x2 is \'{x2_type}\' ')
  308. @constexpr
  309. def _check_axes_for_batch_dot(x1_shape, x2_shape, axes):
  310. """
  311. Check whether axes are valid and cast axes from tuple to list
  312. """
  313. if axes is None:
  314. if len(x2_shape) == 2:
  315. axes = [len(x1_shape) - 1, len(x2_shape) - 1]
  316. else:
  317. axes = [len(x1_shape) - 1, len(x2_shape) - 2]
  318. if isinstance(axes, (list, tuple)):
  319. if 0 in axes:
  320. raise ValueError("Batch dim cannot be used as in axes.")
  321. if len(axes) != 2:
  322. raise ValueError("Require two axes inputs, given less")
  323. if isinstance(axes, tuple):
  324. axes = list(axes)
  325. validator.check_value_type('axes[0]', axes[0], [int], 'batch_dot')
  326. validator.check_value_type('axes[1]', axes[1], [int], 'batch_dot')
  327. # Reverse if axis < 0
  328. if axes[0] < 0:
  329. axes[0] += len(x1_shape)
  330. if axes[1] < 0:
  331. axes[1] += len(x2_shape)
  332. validator.check_non_negative_int(axes[0], 'reversed axes[0]', 'batch_dot')
  333. validator.check_non_negative_int(axes[1], 'reversed axes[1]', 'batch_dot')
  334. if axes[0] > len(x1_shape) or axes[1] > len(x2_shape):
  335. raise ValueError(
  336. "Axes value too high for given input arrays dimensions.")
  337. elif isinstance(axes, int):
  338. if axes == 0:
  339. raise ValueError("Batch dim cannot be used as in axes.")
  340. if axes < 0:
  341. axes = [axes + len(x1_shape), axes + len(x2_shape)]
  342. validator.check_non_negative_int(axes[0], 'reversed axes', 'batch_dot')
  343. elif axes > len(x1_shape) or axes > len(x2_shape):
  344. raise ValueError(
  345. "Axes value too high for given input arrays dimensions.")
  346. else:
  347. axes = [axes, axes]
  348. else:
  349. raise ValueError(
  350. "Axes type must be one of those: int, tuple(int), list(int).")
  351. return axes
  352. @constexpr
  353. def _calc_new_shape_batchdot(shape, axes, position=0):
  354. """
  355. Calculate transpose and reshape parameters for input transformations,
  356. 'position' refers to whether tensor is first or second in the op.
  357. """
  358. axis = axes[position]
  359. contraction_axes = tuple([axis])
  360. prod_contraction = int(np.prod([shape[i] for i in contraction_axes]))
  361. free_axes = tuple(i for i in range(1, len(shape)) if i not in contraction_axes)
  362. free_dims = tuple(shape[i] for i in free_axes)
  363. prod_free = int(np.prod(free_dims))
  364. transpose_perm = contraction_axes + free_axes if position else free_axes + contraction_axes
  365. transpose_perm = tuple([0]) + transpose_perm
  366. new_shape = (prod_contraction, prod_free) if position else (prod_free, prod_contraction)
  367. new_shape = tuple([shape[0]]) + new_shape
  368. return new_shape, transpose_perm, free_dims
  369. @constexpr
  370. def _check_batch_size(x1_batch_size, x2_batch_size):
  371. """
  372. Check whether batch size of two inputs are the same
  373. """
  374. if x1_batch_size != x2_batch_size:
  375. raise ValueError("Require both inputs with the same batch sizes.")
  376. @constexpr
  377. def _get_output_shape(batch_size, x1_ret, x2_ret):
  378. """
  379. Compute output shape for batch dot
  380. """
  381. output_shape = tuple([batch_size]) + x1_ret + x2_ret
  382. return output_shape
  383. def batch_dot(x1, x2, axes=None):
  384. """
  385. Computation of batch dot product between samples in two tensors containing batch dims.
  386. .. math::
  387. output = x1[batch, :] * x2[batch, :]
  388. Inputs:
  389. - **x1** (Tensor) - First tensor in Batch Dot op with datatype float32
  390. - **x2** (Tensor) - Second tensor in Batch Dot op with datatype float32. x2's datatype should
  391. be same as x1's.
  392. - **axes** (Union[int, tuple(int), list(int)]) - Single value or tuple/list of length 2 with dimensions
  393. specified for `a` and `b` each. If single value `N` passed, automatically picks up last N dims from
  394. `a` input shape and last N dims from `b` input shape in order as axes for each respectively.
  395. Outputs:
  396. Tensor, batch dot product of x1 and x2. The Shape of output for input shapes (batch, d1, axes, d2) and
  397. (batch, d3, axes, d4) is (batch, d1, d2, d3, d4)
  398. Raises:
  399. TypeError: If type of x1 and x2 are not the same.
  400. TypeError: If dtype of x1 or x2 is not float32.
  401. ValueError: If rank of x1 or x2 less than 2.
  402. ValueError: If batch dim used in axes.
  403. ValueError: If len(axes) less than 2.
  404. ValueError: If axes is not one of those: None, int, (int, int).
  405. ValueError: If axes reversed from negative int is too low for dimensions of input arrays.
  406. ValueError: If axes value is too high for dimensions of input arrays.
  407. ValueError: If batch size of x1 and x2 are not the same.
  408. Supported Platforms:
  409. ``Ascend`` ``GPU`` ``CPU``
  410. Examples:
  411. >>> input_x1 = Tensor(np.ones(shape=[2, 2, 3]), mindspore.float32)
  412. >>> input_x2 = Tensor(np.ones(shape=[2, 3, 2]), mindspore.float32)
  413. >>> axes = (-1, -2)
  414. >>> output = C.batch_dot(input_x1, input_x2, axes)
  415. >>> print(output)
  416. [[[3. 3.]
  417. [3. 3.]]
  418. [[3. 3.]
  419. [3. 3.]]]
  420. """
  421. transpose_op = P.Transpose()
  422. batch_matmul_op = P.BatchMatMul()
  423. squeeze_one_op = P.Squeeze(1)
  424. squeeze_minus_one_op = P.Squeeze(-1)
  425. # input validity checks
  426. x1_shape = F.shape(x1)
  427. x2_shape = F.shape(x2)
  428. x1_dim_num = len(x1_shape)
  429. x2_dim_num = len(x2_shape)
  430. x1_type = F.dtype(x1)
  431. x2_type = F.dtype(x2)
  432. x1_batch_size, x2_batch_size = _get_batch_size(x1_shape, x2_shape)
  433. _typecheck_input_batch_dot(x1_type, x2_type)
  434. _check_batch_size(x1_batch_size, x2_batch_size)
  435. axes = _check_axes_for_batch_dot(x1_shape, x2_shape, axes)
  436. if x1_dim_num == 2:
  437. x1 = F.expand_dims(x1, 1)
  438. axes[0] += 1
  439. if x2_dim_num == 2:
  440. x2 = F.expand_dims(x2, 2)
  441. x1_shape = F.shape(x1)
  442. x2_shape = F.shape(x2)
  443. x1_reshape_fwd, x1_transpose_fwd, x1_ret = _calc_new_shape_batchdot(x1_shape, axes, 0)
  444. x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape_batchdot(x2_shape, axes, 1)
  445. output_shape = _get_output_shape(x1_batch_size, x1_ret, x2_ret)
  446. x1_transposed = transpose_op(x1, x1_transpose_fwd)
  447. x2_transposed = transpose_op(x2, x2_transpose_fwd)
  448. x1_reshaped = F.reshape(x1_transposed, x1_reshape_fwd)
  449. x2_reshaped = F.reshape(x2_transposed, x2_reshape_fwd)
  450. # Batch matmal op part
  451. mul_result = batch_matmul_op(x1_reshaped, x2_reshaped)
  452. final_result = F.reshape(mul_result, output_shape)
  453. # if the original dims are expanded, restore them from 3 to 2
  454. if x1_dim_num == 2:
  455. final_result = squeeze_one_op(final_result)
  456. elif x2_dim_num == 2:
  457. final_result = squeeze_minus_one_op(final_result)
  458. return final_result
  459. @constexpr
  460. def _check_same_type(dtype1, dtype2):
  461. return dtype1 == dtype2
  462. @constexpr
  463. def _max(*args):
  464. """Returns the maximum value."""
  465. return max(*args)
  466. @constexpr
  467. def _min(*args):
  468. """Returns the minimum value."""
  469. return min(*args)
  470. @constexpr
  471. def _infer_shape_rem(shape1, shape2, ndim1, ndim2, transpose_b):
  472. """Infers the shape of the last two dimensions after performing matmul."""
  473. shape_rem = []
  474. if ndim1 >= 2:
  475. shape_rem.append(shape1[-2])
  476. if transpose_b:
  477. if ndim2 >= 2:
  478. shape_rem.append(shape2[-2])
  479. else:
  480. if ndim1 >= 1:
  481. shape_rem.append(shape2[-1])
  482. return tuple(shape_rem)
  483. @constexpr
  484. def _check_matmul_shapes(shape1, shape2):
  485. """Checks shape1 and shape2 are valid to perform matmul, and returns output shape after broadcasting."""
  486. ndim1, ndim2 = len(shape1), len(shape2)
  487. if ndim1 < 1 or ndim2 < 1:
  488. raise ValueError('input operands must have at least 1 dimension')
  489. if ndim2 >= 2 and shape1[-1] != shape2[-2]:
  490. raise ValueError(f'mismatch in core dimension of input operands (size '
  491. f'{shape1[-1]} is different from {shape2[-2]})')
  492. shape_out = deque()
  493. for items in zip_longest(reversed(shape1[:-2]), reversed(shape2[:-2]), fillvalue=1):
  494. max_size = max(items)
  495. if any(item not in (1, max_size) for item in items):
  496. raise ValueError(f'operands could not be broadcast together with shapes {shape1} {shape2}')
  497. shape_out.appendleft(max_size)
  498. return tuple(shape_out)
  499. @constexpr
  500. def _tile_size(shape, out_shape, ndim):
  501. """Returns tile_size such that shape*tile_size = out_shape"""
  502. size = [1]*ndim
  503. for idx, (i, j) in enumerate(zip(shape, out_shape)):
  504. if i != j:
  505. size[idx] = j
  506. return tuple(size)
  507. @constexpr
  508. def _check_need_broadcast(shape1, shape2):
  509. """Returns True if broadcast is necessary for batchmatmul."""
  510. return shape1[:-2] != shape2[:-2]
  511. def _expand(x, ndim):
  512. """Expand x to ndim from axis, which can be 0 or -1."""
  513. while F.rank(x) < ndim:
  514. x = F.expand_dims(x, 0)
  515. return x
  516. def _broadcast_to(x, shape_cur, shape_to, ndim_to):
  517. """Broadcasts x from shape_cur to shape_to."""
  518. size = _tile_size(shape_cur, shape_to, ndim_to)
  519. return F.tile(x, size)
  520. def matmul(x1, x2, dtype=None):
  521. """
  522. Returns the matrix product of two arrays.
  523. Note:
  524. Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  525. not supported.
  526. On GPU, the supported dtypes are np.float16 and np.float32.
  527. On CPU, the supported dtypes are np.float16 and np.float32.
  528. Args:
  529. x1 (Tensor): Input tensor, scalar not allowed.
  530. x2 (Tensor): Input tensor, scalar not allowed.
  531. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  532. output Tensor.
  533. Returns:
  534. Tensor or scalar, the matrix product of the inputs. This is a scalar only
  535. when both `x1`, `x2` are 1-d vectors.
  536. Raises:
  537. ValueError: If the last dimension of `x1` is not the same size as the
  538. second-to-last dimension of `x2`, or if a scalar value is passed in.
  539. Supported Platforms:
  540. ``Ascend`` ``GPU`` ``CPU``
  541. Examples:
  542. >>> x1 = Tensor(np.arange(2*3*4).reshape(2, 3, 4), mindspore.float32)
  543. >>> x2 = Tensor(np.arange(4*5).reshape(4, 5), mindspore.float32)
  544. >>> output = ops.matmul(x1, x2)
  545. >>> print(output)
  546. [[[ 70. 76. 82. 88. 94.]
  547. [ 190. 212. 234. 256. 278.]
  548. [ 310. 348. 386. 424. 462.]]
  549. [[ 430. 484. 538. 592. 646.]
  550. [ 550. 620. 690. 760. 830.]
  551. [ 670. 756. 842. 928. 1014.]]]
  552. """
  553. # performs type promotion
  554. dtype1 = F.dtype(x1)
  555. dtype2 = F.dtype(x2)
  556. if not _check_same_type(dtype1, dtype2):
  557. x1 = x1.astype(mstype.float32)
  558. x2 = x2.astype(mstype.float32)
  559. ndim1_orig, ndim2_orig = F.rank(x1), F.rank(x2)
  560. shape1_orig, shape2_orig = F.shape(x1), F.shape(x2)
  561. transpose_b = ndim2_orig == 1
  562. shape_backbone = _check_matmul_shapes(shape1_orig, shape2_orig)
  563. # infers the shape of the output
  564. shape_out = shape_backbone + _infer_shape_rem(shape1_orig, shape2_orig,
  565. ndim1_orig, ndim2_orig, transpose_b)
  566. x1 = _expand(x1, 2)
  567. x2 = _expand(x2, 2)
  568. if F.rank(x2) == 2:
  569. if F.rank(x1) > 2:
  570. x1 = F.reshape(x1, (-1, shape1_orig[-1]))
  571. res = P.MatMul(False, transpose_b)(x1, x2)
  572. else:
  573. # broadcasts x1.shape[:-2] with x2.shape[:-2]
  574. ndim_aligned = _max(ndim1_orig, ndim2_orig)
  575. x1 = _expand(x1, ndim_aligned)
  576. x2 = _expand(x2, ndim_aligned)
  577. shape1_aligned, shape2_aligned = F.shape(x1), F.shape(x2)
  578. x1 = _broadcast_to(x1, shape1_aligned[:-2], shape_backbone, ndim_aligned)
  579. x2 = _broadcast_to(x2, shape2_aligned[:-2], shape_backbone, ndim_aligned)
  580. res = P.BatchMatMul(False, transpose_b)(x1, x2)
  581. if dtype is not None:
  582. res = res.astype(dtype)
  583. return F.reshape(res, shape_out)