You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_functional.py 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import itertools
  10. import numpy as np
  11. import pytest
  12. from utils import opr_test
  13. import megengine.core.ops.builtin as builtin
  14. import megengine.core.tensor.dtype as dtype
  15. import megengine.functional as F
  16. from megengine import Parameter, Tensor, is_cuda_available, tensor
  17. from megengine.core._trace_option import use_tensor_shape
  18. from megengine.core.autodiff.grad import Grad
  19. from megengine.core.tensor.utils import make_shape_tuple
  20. def test_where():
  21. maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
  22. xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
  23. yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
  24. maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
  25. xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
  26. yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
  27. cases = [
  28. {"input": [maskv0, xv0, yv0]},
  29. {"input": [maskv1, xv1, yv1]},
  30. ]
  31. opr_test(cases, F.where, ref_fn=np.where)
  32. maskv2 = np.array([1, 1, 1], dtype=np.bool_)
  33. xv2 = np.array([1, 3, 2], dtype=np.float32)
  34. yv2 = np.array([5, 6, 9], dtype=np.float32)
  35. maskv3 = np.array([0, 0, 0], dtype=np.bool_)
  36. xv3 = np.array([1, 3, 2], dtype=np.float32)
  37. yv3 = np.array([5, 6, 9], dtype=np.float32)
  38. cases = [
  39. {"input": [maskv2, xv2, yv2]},
  40. {"input": [maskv3, xv3, yv3]},
  41. ]
  42. opr_test(cases, F.where, ref_fn=np.where)
  43. def test_dropout():
  44. data = tensor(np.ones(10, dtype=np.float32))
  45. out = F.dropout(data, 1.0 / 3.0, training=False)
  46. assert out.numpy().sum() >= 0.0
  47. def test_matmul():
  48. shape1 = 3
  49. shape2 = 3
  50. shape3 = (3, 5)
  51. shape4 = (5, 6)
  52. data1 = np.random.random(shape1).astype("float32")
  53. data2 = np.random.random(shape2).astype("float32")
  54. data3 = np.random.random(shape3).astype("float32")
  55. data4 = np.random.random(shape4).astype("float32")
  56. cases = [
  57. {"input": [data1, data2]},
  58. {"input": [data2, data3]},
  59. {"input": [data3, data4]},
  60. ]
  61. opr_test(cases, F.matmul, ref_fn=np.matmul)
  62. batch_size = 10
  63. shape1 = (batch_size, 2, 3)
  64. shape2 = (batch_size, 3, 4)
  65. shape3 = (batch_size, 10, 4, 5)
  66. data1 = np.random.random(shape1).astype("float32")
  67. data2 = np.random.random(shape2).astype("float32")
  68. data3 = np.random.random(shape3).astype("float32")
  69. cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
  70. for i in range(0, batch_size):
  71. def compare_fn(x, y):
  72. x.numpy()[i, ...] == y
  73. opr_test(
  74. cases,
  75. F.matmul,
  76. compare_fn=compare_fn,
  77. ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
  78. )
  79. def test_interpolate():
  80. def linear_interpolate():
  81. inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
  82. out = F.interpolate(inp, scale_factor=2.0, mode="LINEAR")
  83. out2 = F.interpolate(inp, 4, mode="LINEAR")
  84. np.testing.assert_allclose(
  85. out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
  86. )
  87. np.testing.assert_allclose(
  88. out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
  89. )
  90. def many_batch_interpolate():
  91. inp = tensor(np.arange(1, 9, dtype=np.float32).reshape(2, 1, 2, 2))
  92. out = F.interpolate(inp, [4, 4])
  93. out2 = F.interpolate(inp, scale_factor=2.0)
  94. np.testing.assert_allclose(out.numpy(), out2.numpy())
  95. def assign_corner_interpolate():
  96. inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
  97. out = F.interpolate(inp, [4, 4], align_corners=True)
  98. out2 = F.interpolate(inp, scale_factor=2.0, align_corners=True)
  99. np.testing.assert_allclose(out.numpy(), out2.numpy())
  100. def error_shape_linear_interpolate():
  101. inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
  102. with pytest.raises(ValueError):
  103. F.interpolate(inp, scale_factor=2.0, mode="LINEAR")
  104. def inappropriate_scale_linear_interpolate():
  105. inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
  106. with pytest.raises(ValueError):
  107. F.interpolate(inp, scale_factor=[2.0, 3.0], mode="LINEAR")
  108. linear_interpolate()
  109. many_batch_interpolate()
  110. assign_corner_interpolate()
  111. error_shape_linear_interpolate()
  112. inappropriate_scale_linear_interpolate()
  113. def _save_to(self, name="grad"):
  114. def callback(tensor, grad):
  115. setattr(self, name, grad)
  116. return callback
  117. def _gen_roi_inp():
  118. inp_feat = np.random.randn(2, 32, 256, 256)
  119. rois = np.zeros((4, 5))
  120. rois[:, 0] = [0, 0, 1, 1]
  121. rois[:, 1:3] = np.random.rand(4, 2) * 100
  122. rois[:, 3:] = np.random.rand(4, 2) * 100 + 150
  123. inp_feat = tensor(inp_feat)
  124. rois = tensor(rois)
  125. return inp_feat, rois
  126. def test_roi_align():
  127. inp_feat, rois = _gen_roi_inp()
  128. grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
  129. output_shape = (7, 7)
  130. out_feat = F.roi_align(
  131. inp_feat,
  132. rois,
  133. output_shape=output_shape,
  134. mode="average",
  135. spatial_scale=1.0 / 4,
  136. sample_points=2,
  137. aligned=True,
  138. )
  139. assert make_shape_tuple(out_feat.shape) == (
  140. rois.shape[0],
  141. inp_feat.shape[1],
  142. *output_shape,
  143. )
  144. grad(out_feat, tensor(F.ones_like(out_feat)))
  145. assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
  146. def test_roi_pooling():
  147. inp_feat, rois = _gen_roi_inp()
  148. grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
  149. output_shape = (7, 7)
  150. out_feat = F.roi_pooling(
  151. inp_feat, rois, output_shape=output_shape, mode="max", scale=1.0 / 4,
  152. )
  153. assert make_shape_tuple(out_feat.shape) == (
  154. rois.shape[0],
  155. inp_feat.shape[1],
  156. *output_shape,
  157. )
  158. grad(out_feat, tensor(F.ones_like(out_feat)))
  159. assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
  160. def test_one_hot():
  161. def onehot_low_dimension():
  162. inp = tensor(np.arange(1, 4, dtype=np.int32))
  163. out = F.one_hot(inp, num_classes=4)
  164. np.testing.assert_allclose(
  165. out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
  166. )
  167. def onehot_high_dimension():
  168. arr = np.array(
  169. [[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
  170. dtype=np.int32,
  171. )
  172. inp = tensor(arr)
  173. out = F.one_hot(inp, 10)
  174. np.testing.assert_allclose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
  175. onehot_low_dimension()
  176. onehot_high_dimension()
  177. def test_add_update():
  178. shape = (2, 3)
  179. v = np.random.random(shape).astype(np.float32)
  180. b = Tensor(v)
  181. u = F.add_update(b, 1)
  182. np.testing.assert_allclose(u.numpy(), v + 1, atol=1e-6)
  183. u = F.add_update(b, 1)
  184. np.testing.assert_allclose(u.numpy(), v + 2, atol=1e-6)
  185. x = np.ones((2, 2), dtype=np.float32)
  186. y = x * 0.5
  187. dest = tensor(x)
  188. delta = tensor(y)
  189. r = F.add_update(dest, delta, alpha=0.9, beta=0.1, bias=0.1)
  190. np.testing.assert_allclose(r.numpy(), x * 0.9 + y * 0.1 + 0.1, atol=1e-6)
  191. def test_add_update_params():
  192. b = np.random.random((2, 3)).astype(np.float32)
  193. y = Tensor(b)
  194. # @jit.trace
  195. def f(x):
  196. return F.add_update(y, x)
  197. f(np.zeros((2, 3)).astype(np.float32))
  198. z = Tensor(np.zeros((2, 3)).astype(np.float32))
  199. F.add_update(y, z, beta=0.1)
  200. res = f(np.ones((2, 3)).astype(np.float32))
  201. np.testing.assert_allclose(res.numpy(), b + 1)
  202. def test_binary_cross_entropy():
  203. data1_shape = (2, 2)
  204. label1_shape = (2, 2)
  205. data2_shape = (2, 3)
  206. label2_shape = (2, 3)
  207. def sigmoid(x):
  208. return 1 / (1 + np.exp(-x))
  209. def compare_fn(x, y):
  210. np.testing.assert_allclose(x.numpy(), y, atol=5e-4)
  211. np.random.seed(123)
  212. data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
  213. label1 = np.random.uniform(size=label1_shape).astype(np.float32)
  214. expect1 = np.array([0.6361], dtype=np.float32)
  215. np.random.seed(123)
  216. data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
  217. label2 = np.random.uniform(size=label2_shape).astype(np.float32)
  218. expect2 = np.array([0.6750], dtype=np.float32)
  219. cases = [
  220. {"input": [data1, label1], "output": expect1,},
  221. {"input": [data2, label2], "output": expect2,},
  222. ]
  223. opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
  224. def test_hinge_loss():
  225. np.random.seed(123)
  226. # case with L1 norm
  227. cases = []
  228. for shape in [(2, 2), (2, 3)]:
  229. data = np.random.uniform(size=shape).astype(np.float32)
  230. label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
  231. expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean()
  232. cases.append({"input": [data, label], "output": expect})
  233. opr_test(cases, F.hinge_loss)
  234. # cases with L2 norm
  235. cases = []
  236. for shape in [(2, 2), (2, 3)]:
  237. data = np.random.uniform(size=shape).astype(np.float32)
  238. label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
  239. expect = ((np.clip(0, np.inf, 1 - data * label) ** 2).sum(axis=1)).mean()
  240. cases.append({"input": [data, label], "output": expect})
  241. def hinge_loss_with_l2_norm(pred, label):
  242. return F.hinge_loss(pred, label, "L2")
  243. opr_test(cases, hinge_loss_with_l2_norm)
  244. def test_nms():
  245. x = np.array(
  246. [
  247. [0, 0, 100, 100],
  248. [10, 10, 100, 100],
  249. [50, 50, 100, 100],
  250. [100, 100, 150, 150],
  251. ],
  252. dtype=np.float32,
  253. )
  254. inp = tensor(x)
  255. scores = tensor([0.5, 0.8, 0.9, 0.6], dtype=np.float32)
  256. result = F.nms(inp, scores=scores, iou_thresh=0.5)
  257. np.testing.assert_equal(result.numpy(), np.array([2, 1, 3], dtype=np.int32))
  258. def test_batched_nms():
  259. x = np.array(
  260. [
  261. [0, 0, 100, 100],
  262. [0.5, 0.5, 1.5, 1.5],
  263. [20, 20, 100, 100],
  264. [0.5, 0.5, 1.0, 1.0],
  265. [10, 10, 100, 100],
  266. [0.5, 0.5, 1.0, 1.0],
  267. ],
  268. dtype=np.float32,
  269. )
  270. inp = tensor(x)
  271. scores = tensor([0.6, 0.9, 0.5, 0.6, 0.8, 0.7], dtype=np.float32)
  272. idxs = tensor([0, 1, 0, 1, 0, 1], dtype=np.int32)
  273. results = F.batched_nms(inp, scores=scores, idxs=idxs, iou_thresh=0.5)
  274. np.testing.assert_equal(results.numpy(), np.array([1, 4, 5], dtype=np.int32))
  275. @pytest.mark.skip(reason="cuda does not support nchw int8")
  276. def test_conv_bias():
  277. inp_scale = 1.5
  278. w_scale = 2.5
  279. outp_scale = 1.5
  280. inp_dtype = dtype.qint8(inp_scale)
  281. w_dtype = dtype.qint8(w_scale)
  282. b_dtype = dtype.qint32(inp_scale * w_scale)
  283. out_dtype = dtype.qint8(outp_scale)
  284. def run(
  285. N,
  286. IC,
  287. OC,
  288. IH,
  289. IW,
  290. KH,
  291. KW,
  292. PH,
  293. PW,
  294. SH,
  295. SW,
  296. has_bias=True,
  297. nonlinear_mode="IDENTITY",
  298. ):
  299. inp_v = np.random.normal(size=(N, IC, IH, IW))
  300. w_v = np.random.normal(size=(OC, IC, KW, KW))
  301. b_v = np.random.normal(size=(1, OC, 1, 1))
  302. inp_scale = dtype.get_scale(inp_dtype)
  303. w_scale = dtype.get_scale(w_dtype)
  304. b_scale = dtype.get_scale(b_dtype)
  305. inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
  306. wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
  307. bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
  308. inp_int8 = tensor(inpv, dtype=inp_dtype)
  309. w_int8 = Parameter(wv, dtype=w_dtype)
  310. b_int32 = Parameter(bv, dtype=b_dtype)
  311. inp_fp32 = inp_int8.astype("float32")
  312. w_fp32 = w_int8.astype("float32")
  313. b_fp32 = b_int32.astype("float32")
  314. def convert_to_nchw4(var):
  315. var = F.reshape(
  316. var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
  317. )
  318. var = F.transpose(var, (0, 1, 3, 4, 2))
  319. return var
  320. def run_conv2d(inp, w, b):
  321. O = F.conv2d(
  322. inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
  323. )
  324. if nonlinear_mode == "RELU":
  325. return F.relu(O)
  326. else:
  327. return O
  328. def run_conv_bias(inp, w, b, format="NCHW"):
  329. b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
  330. if format == "NCHW4":
  331. inp = convert_to_nchw4(inp)
  332. w = convert_to_nchw4(w)
  333. b = convert_to_nchw4(b)
  334. return F.conv_bias_activation(
  335. inp,
  336. w,
  337. b,
  338. stride=(SH, SW),
  339. padding=(PH, PW),
  340. format=format,
  341. dtype=out_dtype,
  342. nonlinear_mode=nonlinear_mode,
  343. )
  344. format = "NCHW4" if is_cuda_available() else "NCHW"
  345. expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
  346. expected = expected.astype(out_dtype).astype("float32")
  347. result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
  348. "float32"
  349. )
  350. if format == "NCHW4":
  351. result = F.transpose(result, (0, 1, 4, 2, 3))
  352. expected = F.flatten(expected)
  353. result = F.flatten(result)
  354. np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
  355. run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
  356. run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
  357. run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
  358. run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
  359. run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
  360. run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
  361. run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "RELU")
  362. run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "RELU")
  363. def test_zero_stride_numpy_array():
  364. inp = np.random.randn(3, 224, 224).astype(np.float32)
  365. inp = inp[np.newaxis, :]
  366. inp = tensor(inp, dtype=np.float32)
  367. weight = tensor(np.random.randn(16, 3, 3, 3), dtype=np.float32)
  368. out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
  369. def test_condtake():
  370. x = np.array([[1, 2, 3], [4, 5, 6]])
  371. y = np.array([[True, False, True], [False, True, True]])
  372. xx = tensor(x)
  373. yy = tensor(y)
  374. val, idx = F.cond_take(yy, xx)
  375. np.testing.assert_equal(val.numpy(), x[y])
  376. np.testing.assert_equal(idx.numpy(), np.where(y.reshape(-1))[0])
  377. def test_condtake_is_same():
  378. op1 = builtin.CondTake()
  379. op2 = builtin.CondTake()
  380. assert op1 == op2
  381. def test_nms_is_same():
  382. op1 = builtin.NMSKeep(0.7, 100)
  383. op2 = builtin.NMSKeep(0.7, 100)
  384. op3 = builtin.NMSKeep(0.8, 100)
  385. op4 = builtin.NMSKeep(0.7, 200)
  386. assert op1 == op2
  387. assert op1 != op3
  388. assert op1 != op4
  389. assert op3 != op4

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台