You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_nn_ops_check.py 23 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test ops """
  16. import functools
  17. import numpy as np
  18. from mindspore import ops
  19. from mindspore.ops import functional as F
  20. from mindspore.ops import operations as P
  21. from mindspore.ops import composite as C
  22. from mindspore.ops.operations import _grad_ops as G
  23. import mindspore.nn as nn
  24. from mindspore import Tensor
  25. from mindspore.common import dtype as mstype
  26. from mindspore.common.parameter import Parameter
  27. from ..ut_filter import non_graph_engine
  28. from mindspore.common.api import _executor
  29. from ....mindspore_test_framework.mindspore_test import mindspore_test
  30. from ....mindspore_test_framework.pipeline.forward.compile_forward\
  31. import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config,
  32. pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception)
  33. from ....mindspore_test_framework.pipeline.gradient.compile_gradient\
  34. import pipeline_for_compile_grad_ge_graph_for_case_by_case_config
  35. class Conv2DBackpropInputNet(nn.Cell):
  36. def __init__(self, net, x_shape):
  37. super(Conv2DBackpropInputNet, self).__init__()
  38. self.net = net
  39. self.x_shape = x_shape
  40. def construct(self, dout, w):
  41. return self.net(dout, w, self.x_shape)
  42. class TopKNet(nn.Cell):
  43. def __init__(self, net, k):
  44. super(TopKNet, self).__init__()
  45. self.net = net
  46. self.k = k
  47. def construct(self, x):
  48. return self.net(x, self.k)
  49. raise_set = [
  50. # input is scalar
  51. ('Flatten0', {
  52. 'block': (P.Flatten(), {'exception': TypeError, 'error_keywords': ['Flatten']}),
  53. 'desc_inputs': [5.0],
  54. 'skip': ['backward']}),
  55. # dim of input is zero
  56. ('Flatten1', {
  57. 'block': (P.Flatten(), {'exception': ValueError, 'error_keywords': ['Flatten']}),
  58. 'desc_inputs': [F.scalar_to_tensor(5.0)],
  59. 'skip': ['backward']}),
  60. # input is scalar
  61. ('Softmax0', {
  62. 'block': (P.Softmax(), {'exception': TypeError, 'error_keywords': ['Softmax']}),
  63. 'desc_inputs': [5.0],
  64. 'skip': ['backward']}),
  65. # axis is empty tuple
  66. ('Softmax1', {
  67. 'block': (P.Softmax(axis=()), {'exception': ValueError, 'error_keywords': ['Softmax']}),
  68. 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))],
  69. 'skip': ['backward']}),
  70. # axis value is not in range
  71. ('Softmax2', {
  72. 'block': (P.Softmax(axis=2), {'exception': ValueError, 'error_keywords': ['Softmax']}),
  73. 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))],
  74. 'skip': ['backward']}),
  75. # input is scalar
  76. ('LogSoftmax0', {
  77. 'block': (P.LogSoftmax(), {'exception': TypeError, 'error_keywords': ['LogSoftmax']}),
  78. 'desc_inputs': [5.0],
  79. 'skip': ['backward']}),
  80. # axis value is not in range
  81. ('LogSoftmax1', {
  82. 'block': (P.LogSoftmax(axis=2), {'exception': ValueError, 'error_keywords': ['LogSoftmax']}),
  83. 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))],
  84. 'skip': ['backward']}),
  85. # input is scalar
  86. ('ReLU0', {
  87. 'block': (P.ReLU(), {'exception': TypeError, 'error_keywords': ['ReLU']}),
  88. 'desc_inputs': [5.0],
  89. 'skip': ['backward']}),
  90. # input is Tensor(Bool)
  91. ('ReLU1', {
  92. 'block': (P.ReLU(), {'exception': TypeError, 'error_keywords': ['ReLU']}),
  93. 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))],
  94. 'skip': ['backward']}),
  95. # input is scalar
  96. ('ReLU60', {
  97. 'block': (P.ReLU6(), {'exception': TypeError, 'error_keywords': ['ReLU6']}),
  98. 'desc_inputs': [5.0],
  99. 'skip': ['backward']}),
  100. # input is Tensor(int32)
  101. ('ReLU61', {
  102. 'block': (P.ReLU6(), {'exception': TypeError, 'error_keywords': ['ReLU6']}),
  103. 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))],
  104. 'skip': ['backward']}),
  105. # input is scalar
  106. ('Elu0', {
  107. 'block': (P.Elu(), {'exception': TypeError, 'error_keywords': ['Elu']}),
  108. 'desc_inputs': [5.0],
  109. 'skip': ['backward']}),
  110. # input is Tensor(int32)
  111. ('Elu1', {
  112. 'block': (P.Elu(alpha=0.9), {'exception': TypeError, 'error_keywords': ['Elu']}),
  113. 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))],
  114. 'skip': ['backward']}),
  115. # input is scalar
  116. ('Sigmoid0', {
  117. 'block': (P.Sigmoid(), {'exception': TypeError, 'error_keywords': ['Sigmoid']}),
  118. 'desc_inputs': [5.0],
  119. 'skip': ['backward']}),
  120. # input is Tensor(int32)
  121. ('Sigmoid1', {
  122. 'block': (P.Sigmoid(), {'exception': TypeError, 'error_keywords': ['Sigmoid']}),
  123. 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))],
  124. 'skip': ['backward']}),
  125. # input is scalar
  126. ('Tanh0', {
  127. 'block': (P.Tanh(), {'exception': TypeError, 'error_keywords': ['Tanh']}),
  128. 'desc_inputs': [5.0],
  129. 'skip': ['backward']}),
  130. # input is scalar
  131. ('BatchNorm0', {
  132. 'block': (P.BatchNorm(is_training=False), {'exception': TypeError, 'error_keywords': ['BatchNorm']}),
  133. 'desc_inputs': [5.0, 5.0, 5.0, 5.0, 5.0],
  134. 'skip': ['backward']}),
  135. # is_training=False and mean=None
  136. ('BatchNorm1', {
  137. 'block': (P.BatchNorm(is_training=False), {'exception': TypeError, 'error_keywords': ['BatchNorm']}),
  138. 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32)),
  139. Tensor(np.ones([5, 3]).astype(np.float32)), None, None],
  140. 'skip': ['backward']}),
  141. # is_training=True and mean=None
  142. ('BatchNorm2', {
  143. 'block': (P.BatchNorm(is_training=True), {'exception': TypeError, 'error_keywords': ['BatchNorm']}),
  144. 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
  145. Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float16)),
  146. Tensor(np.ones([3]).astype(np.float32))],
  147. 'skip': ['backward']}),
  148. # scale and bias rank > 1
  149. ('BatchNorm3', {
  150. 'block': (P.BatchNorm(is_training=True), {'exception': ValueError, 'error_keywords': ['BatchNorm']}),
  151. 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32)),
  152. Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
  153. Tensor(np.ones([3]).astype(np.float32))],
  154. 'skip': ['backward']}),
  155. # scale and bias shape not match
  156. ('BatchNorm4', {
  157. 'block': (P.BatchNorm(is_training=True), {'exception': ValueError, 'error_keywords': ['BatchNorm']}),
  158. 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
  159. Tensor(np.ones([7]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
  160. Tensor(np.ones([3]).astype(np.float32))],
  161. 'skip': ['backward']}),
  162. # is_training=False, mean and variance shape not match
  163. ('BatchNorm5', {
  164. 'block': (P.BatchNorm(is_training=False), {'exception': ValueError, 'error_keywords': ['BatchNorm']}),
  165. 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
  166. Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
  167. Tensor(np.ones([5]).astype(np.float32))],
  168. 'skip': ['backward']}),
  169. # is_training=False, mean and scale shape not match
  170. ('BatchNorm6', {
  171. 'block': (P.BatchNorm(is_training=False), {'exception': ValueError, 'error_keywords': ['BatchNorm']}),
  172. 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
  173. Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float32)),
  174. Tensor(np.ones([5]).astype(np.float32))],
  175. 'skip': ['backward']}),
  176. # input is scalar
  177. ('Conv2D0', {
  178. 'block': (P.Conv2D(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['Conv2D']}),
  179. 'desc_inputs': [5.0, 5.0],
  180. 'skip': ['backward']}),
  181. # input is Tensor(bool)
  182. ('Conv2D1', {
  183. 'block': (P.Conv2D(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['Conv2D']}),
  184. 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
  185. 'skip': ['backward']}),
  186. # input x and w type mismatch
  187. ('Conv2D2', {
  188. 'block': (P.Conv2D(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['Conv2D']}),
  189. 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float16))],
  190. 'skip': ['backward']}),
  191. # rank of x is not 4
  192. ('Conv2D3', {
  193. 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
  194. 'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))],
  195. 'skip': ['backward']}),
  196. # rank of 2 is not 4
  197. ('Conv2D4', {
  198. 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
  199. 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9]).astype(np.float32))],
  200. 'skip': ['backward']}),
  201. # x_shape[1] / group != w_shape[1]
  202. ('Conv2D5', {
  203. 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
  204. 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,2,9,9]).astype(np.float32))],
  205. 'skip': ['backward']}),
  206. # out_channel != w_shape[0]
  207. ('Conv2D6', {
  208. 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
  209. 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))],
  210. 'skip': ['backward']}),
  211. # kernel_size != w_shape[2:4]
  212. ('Conv2D7', {
  213. 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
  214. 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([2,1,5,6]).astype(np.float32))],
  215. 'skip': ['backward']}),
  216. # input is scalar
  217. ('DepthwiseConv2dNative0', {
  218. 'block': (P.DepthwiseConv2dNative(2, (5, 5)),
  219. {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}),
  220. 'desc_inputs': [5.0, 5.0],
  221. 'skip': ['backward']}),
  222. # input is Tensor(bool)
  223. ('DepthwiseConv2dNative1', {
  224. 'block': (P.DepthwiseConv2dNative(2, (5, 5)),
  225. {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}),
  226. 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
  227. 'skip': ['backward']}),
  228. # input x and w type mismatch
  229. ('DepthwiseConv2dNative2', {
  230. 'block': (P.DepthwiseConv2dNative(2, (5, 5)),
  231. {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}),
  232. 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float16))],
  233. 'skip': ['backward']}),
  234. # rank of x is not 4
  235. ('DepthwiseConv2dNative3', {
  236. 'block': (P.DepthwiseConv2dNative(2, (5, 5)),
  237. {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}),
  238. 'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))],
  239. 'skip': ['backward']}),
  240. # rank of 2 is not 4
  241. ('DepthwiseConv2dNative4', {
  242. 'block': (P.DepthwiseConv2dNative(2, (5, 5)),
  243. {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}),
  244. 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9]).astype(np.float32))],
  245. 'skip': ['backward']}),
  246. # x_shape[1] != w_shape[1]
  247. ('DepthwiseConv2dNative5', {
  248. 'block': (P.DepthwiseConv2dNative(2, (5, 5)),
  249. {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}),
  250. 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,2,9,9]).astype(np.float32))],
  251. 'skip': ['backward']}),
  252. # kernel_size != w_shape[2:4]
  253. ('DepthwiseConv2dNative6', {
  254. 'block': (P.DepthwiseConv2dNative(2, (5, 5)),
  255. {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}),
  256. 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([2,1,5,6]).astype(np.float32))],
  257. 'skip': ['backward']}),
  258. # input is scalar
  259. ('MaxPoolWithArgmax0', {
  260. 'block': (P.MaxPoolWithArgmax(), {'exception': TypeError, 'error_keywords': ['MaxPoolWithArgmax']}),
  261. 'desc_inputs': [5.0],
  262. 'skip': ['backward']}),
  263. # input is Tensor(bool)
  264. ('MaxPoolWithArgmax1', {
  265. 'block': (P.MaxPoolWithArgmax(), {'exception': TypeError, 'error_keywords': ['MaxPoolWithArgmax']}),
  266. 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_))],
  267. 'skip': ['backward']}),
  268. # rank of x is not 4
  269. ('MaxPoolWithArgmax2', {
  270. 'block': (P.MaxPoolWithArgmax(), {'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}),
  271. 'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))],
  272. 'skip': ['backward']}),
  273. # kernel size is invalid(very large)
  274. ('MaxPoolWithArgmax3', {
  275. 'block': (P.MaxPoolWithArgmax(ksize=50), {'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}),
  276. 'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))],
  277. 'skip': ['backward']}),
  278. # input is scalar
  279. ('MaxPool0', {
  280. 'block': (P.MaxPool(), {'exception': TypeError, 'error_keywords': ['MaxPool']}),
  281. 'desc_inputs': [5.0],
  282. 'skip': ['backward']}),
  283. # rank of x is not 4
  284. ('MaxPool1', {
  285. 'block': (P.MaxPool(), {'exception': ValueError, 'error_keywords': ['MaxPool']}),
  286. 'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))],
  287. 'skip': ['backward']}),
  288. # rank of x is not 4
  289. ('MaxPool2', {
  290. 'block': (P.MaxPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['MaxPool']}),
  291. 'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))],
  292. 'skip': ['backward']}),
  293. # input is scalar
  294. ('AvgPool0', {
  295. 'block': (P.AvgPool(), {'exception': TypeError, 'error_keywords': ['AvgPool']}),
  296. 'desc_inputs': [5.0],
  297. 'skip': ['backward']}),
  298. # rank of x is not 4
  299. ('AvgPool1', {
  300. 'block': (P.AvgPool(), {'exception': ValueError, 'error_keywords': ['AvgPool']}),
  301. 'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))],
  302. 'skip': ['backward']}),
  303. # rank of x is not 4
  304. ('AvgPool2', {
  305. 'block': (P.AvgPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['AvgPool']}),
  306. 'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))],
  307. 'skip': ['backward']}),
  308. # input is scalar
  309. ('Conv2DBackpropInput0', {
  310. 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)),
  311. {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
  312. 'desc_inputs': [5.0, 5.0],
  313. 'skip': ['backward']}),
  314. # input is Tensor(bool)
  315. ('Conv2DBackpropInput1', {
  316. 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)),
  317. {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
  318. 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
  319. 'skip': ['backward']}),
  320. # types of doutput and w mismatch
  321. ('Conv2DBackpropInput2', {
  322. 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)),
  323. {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
  324. 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))],
  325. 'skip': ['backward']}),
  326. # types x_size is not tuple
  327. ('Conv2DBackpropInput3', {
  328. 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), 2),
  329. {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
  330. 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))],
  331. 'skip': ['backward']}),
  332. # types x_size is not tuple(int,...)
  333. ('Conv2DBackpropInput4', {
  334. 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2, 3.0)),
  335. {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
  336. 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))],
  337. 'skip': ['backward']}),
  338. # input is scalar
  339. ('BiasAdd0', {
  340. 'block': (P.BiasAdd(), {'exception': TypeError, 'error_keywords': ['BiasAdd']}),
  341. 'desc_inputs': [5.0, 5.0],
  342. 'skip': ['backward']}),
  343. # input is Tensor(bool)
  344. ('BiasAdd1', {
  345. 'block': (P.BiasAdd(), {'exception': TypeError, 'error_keywords': ['BiasAdd']}),
  346. 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
  347. 'skip': ['backward']}),
  348. # types of x and bias mismatch
  349. ('BiasAdd2', {
  350. 'block': (P.BiasAdd(), {'exception': TypeError, 'error_keywords': ['BiasAdd']}),
  351. 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))],
  352. 'skip': ['backward']}),
  353. # rank of x less than 2
  354. ('BiasAdd3', {
  355. 'block': (P.BiasAdd(), {'exception': ValueError, 'error_keywords': ['BiasAdd']}),
  356. 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float32))],
  357. 'skip': ['backward']}),
  358. # rank of bias is not equal to 1
  359. ('BiasAdd4', {
  360. 'block': (P.BiasAdd(), {'exception': ValueError, 'error_keywords': ['BiasAdd']}),
  361. 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32))],
  362. 'skip': ['backward']}),
  363. # b_shape[0] != x_shape[1]
  364. ('BiasAdd5', {
  365. 'block': (P.BiasAdd(), {'exception': ValueError, 'error_keywords': ['BiasAdd']}),
  366. 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float32))],
  367. 'skip': ['backward']}),
  368. # input x is scalar
  369. ('TopK0', {
  370. 'block': (TopKNet(P.TopK(), 5), {'exception': TypeError, 'error_keywords': ['TopK']}),
  371. 'desc_inputs': [5.0],
  372. 'skip': ['backward']}),
  373. # input x is Tensor(bool)
  374. ('TopK1', {
  375. 'block': (TopKNet(P.TopK(), 5), {'exception': TypeError, 'error_keywords': ['TopK']}),
  376. 'desc_inputs': [Tensor(np.ones([10]).astype(np.bool_))],
  377. 'skip': ['backward']}),
  378. # k is not integer
  379. ('TopK2', {
  380. 'block': (TopKNet(P.TopK(), 5.0), {'exception': TypeError, 'error_keywords': ['TopK']}),
  381. 'desc_inputs': [Tensor(np.ones([10]).astype(np.float32))],
  382. 'skip': ['backward']}),
  383. # input is scalar
  384. ('SoftmaxCrossEntropyWithLogits0', {
  385. 'block': (P.SoftmaxCrossEntropyWithLogits(),
  386. {'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}),
  387. 'desc_inputs': [5.0, 5.0],
  388. 'skip': ['backward']}),
  389. # input is Tensor(bool)
  390. ('SoftmaxCrossEntropyWithLogits1', {
  391. 'block': (P.SoftmaxCrossEntropyWithLogits(),
  392. {'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}),
  393. 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
  394. 'skip': ['backward']}),
  395. # types of logits and labels mismatch
  396. ('SoftmaxCrossEntropyWithLogits2', {
  397. 'block': (P.SoftmaxCrossEntropyWithLogits(),
  398. {'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}),
  399. 'desc_inputs': [Tensor(np.ones([5]).astype(np.float16)), Tensor(np.ones([5]).astype(np.float32))],
  400. 'skip': ['backward']}),
  401. # shapes of logits and labels mismatch
  402. ('SoftmaxCrossEntropyWithLogits3', {
  403. 'block': (P.SoftmaxCrossEntropyWithLogits(),
  404. {'exception': ValueError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}),
  405. 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32))],
  406. 'skip': ['backward']}),
  407. # input is scalar
  408. ('SparseSoftmaxCrossEntropyWithLogits0', {
  409. 'block': (P.SparseSoftmaxCrossEntropyWithLogits(),
  410. {'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}),
  411. 'desc_inputs': [5.0, 5.0],
  412. 'skip': ['backward']}),
  413. # logits is Tensor(bool)
  414. ('SparseSoftmaxCrossEntropyWithLogits1', {
  415. 'block': (P.SparseSoftmaxCrossEntropyWithLogits(),
  416. {'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}),
  417. 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
  418. 'skip': ['backward']}),
  419. # labels is Tensor(bool)
  420. ('SparseSoftmaxCrossEntropyWithLogits2', {
  421. 'block': (P.SparseSoftmaxCrossEntropyWithLogits(),
  422. {'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}),
  423. 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.bool_))],
  424. 'skip': ['backward']}),
  425. # logits_shape[0] != labels_shape[0]
  426. ('SparseSoftmaxCrossEntropyWithLogits3', {
  427. 'block': (P.SparseSoftmaxCrossEntropyWithLogits(),
  428. {'exception': ValueError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}),
  429. 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([3]).astype(np.int32))],
  430. 'skip': ['backward']}),
  431. ]
  432. @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception)
  433. def test_check_exception():
  434. return raise_set