You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_functional.py 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import numpy as np
  10. from helpers import opr_test
  11. import megengine.functional as F
  12. from megengine import Buffer, jit, tensor
  13. from megengine.test import assertTensorClose
  14. def test_flatten():
  15. data0_shape = (2, 3, 4, 5)
  16. data1_shape = (4, 5, 6, 7)
  17. data0 = np.random.random(data0_shape).astype(np.float32)
  18. data1 = np.random.random(data1_shape).astype(np.float32)
  19. def compare_fn(x, y):
  20. assert x.numpy().shape == y
  21. output0 = (2 * 3 * 4 * 5,)
  22. output1 = (4 * 5 * 6 * 7,)
  23. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  24. opr_test(cases, F.flatten, compare_fn=compare_fn)
  25. output0 = (2, 3 * 4 * 5)
  26. output1 = (4, 5 * 6 * 7)
  27. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  28. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
  29. output0 = (2, 3, 4 * 5)
  30. output1 = (4, 5, 6 * 7)
  31. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  32. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
  33. output0 = (2, 3 * 4, 5)
  34. output1 = (4, 5 * 6, 7)
  35. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  36. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
  37. def test_where():
  38. maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
  39. xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
  40. yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
  41. maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
  42. xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
  43. yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
  44. cases = [
  45. {"input": [maskv0, xv0, yv0]},
  46. {"input": [maskv1, xv1, yv1]},
  47. ]
  48. opr_test(cases, F.where, ref_fn=np.where)
  49. maskv2 = np.array([1, 1, 1], dtype=np.int32)
  50. xv2 = np.array([1, 3, 2], dtype=np.float32)
  51. yv2 = np.array([5, 6, 9], dtype=np.float32)
  52. maskv3 = np.array([0, 0, 0], dtype=np.int32)
  53. xv3 = np.array([1, 3, 2], dtype=np.float32)
  54. yv3 = np.array([5, 6, 9], dtype=np.float32)
  55. cases = [
  56. {"input": [maskv2, xv2, yv2]},
  57. {"input": [maskv3, xv3, yv3]},
  58. ]
  59. opr_test(cases, F.where, ref_fn=np.where)
  60. def test_eye():
  61. dtype = np.float32
  62. cases = [{"input": [10, 20]}, {"input": [20, 30]}]
  63. opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
  64. def test_concat():
  65. def get_data_shape(length: int):
  66. return (length, 2, 3)
  67. data1 = np.random.random(get_data_shape(5)).astype("float32")
  68. data2 = np.random.random(get_data_shape(6)).astype("float32")
  69. data3 = np.random.random(get_data_shape(7)).astype("float32")
  70. def run(data1, data2):
  71. return F.concat([data1, data2])
  72. cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
  73. opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
  74. def test_matrix_mul():
  75. shape1 = (2, 3)
  76. shape2 = (3, 4)
  77. shape3 = (4, 5)
  78. data1 = np.random.random(shape1).astype("float32")
  79. data2 = np.random.random(shape2).astype("float32")
  80. data3 = np.random.random(shape3).astype("float32")
  81. cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
  82. opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
  83. def test_batched_matrix_mul():
  84. batch_size = 10
  85. shape1 = (batch_size, 2, 3)
  86. shape2 = (batch_size, 3, 4)
  87. shape3 = (batch_size, 4, 5)
  88. data1 = np.random.random(shape1).astype("float32")
  89. data2 = np.random.random(shape2).astype("float32")
  90. data3 = np.random.random(shape3).astype("float32")
  91. cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
  92. for i in range(0, batch_size):
  93. def compare_fn(x, y):
  94. x.numpy()[i, ...] == y
  95. opr_test(
  96. cases,
  97. F.batched_matrix_mul,
  98. compare_fn=compare_fn,
  99. ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
  100. )
  101. def test_sort():
  102. data1_shape = (10, 3)
  103. data2_shape = (12, 2)
  104. data1 = np.random.random(data1_shape).astype(np.float32)
  105. data2 = np.random.random(data2_shape).astype(np.float32)
  106. output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
  107. output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
  108. cases = [
  109. {"input": data1, "output": output0},
  110. {"input": data2, "output": output1},
  111. ]
  112. opr_test(cases, F.sort)
  113. def test_round():
  114. data1_shape = (15,)
  115. data2_shape = (25,)
  116. data1 = np.random.random(data1_shape).astype(np.float32)
  117. data2 = np.random.random(data2_shape).astype(np.float32)
  118. cases = [{"input": data1}, {"input": data2}]
  119. opr_test(cases, F.round, ref_fn=np.round)
  120. def test_broadcast_to():
  121. input1_shape = (20, 30)
  122. output1_shape = (30, 20, 30)
  123. data1 = np.random.random(input1_shape).astype(np.float32)
  124. input2_shape = (10, 20)
  125. output2_shape = (20, 10, 20)
  126. data2 = np.random.random(input2_shape).astype(np.float32)
  127. def compare_fn(x, y):
  128. assert x.numpy().shape == y
  129. cases = [
  130. {"input": [data1, output1_shape], "output": output1_shape},
  131. {"input": [data2, output2_shape], "output": output2_shape},
  132. ]
  133. opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
  134. def test_linspace():
  135. cases = [
  136. {"input": [1, 9, 9]},
  137. {"input": [3, 10, 8]},
  138. ]
  139. opr_test(
  140. cases,
  141. F.linspace,
  142. ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
  143. )
  144. cases = [
  145. {"input": [9, 1, 9]},
  146. {"input": [10, 3, 8]},
  147. ]
  148. opr_test(
  149. cases,
  150. F.linspace,
  151. ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
  152. )
  153. def test_arange():
  154. cases = [
  155. {"input": [1, 9, 1]},
  156. {"input": [2, 10, 2]},
  157. ]
  158. opr_test(
  159. cases,
  160. F.arange,
  161. ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
  162. )
  163. cases = [
  164. {"input": [9, 1, -1]},
  165. {"input": [10, 2, -2]},
  166. ]
  167. opr_test(
  168. cases,
  169. F.arange,
  170. ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
  171. )
  172. cases = [
  173. {"input": [9.3, 1.2, -0.5]},
  174. {"input": [10.3, 2.1, -1.7]},
  175. ]
  176. opr_test(
  177. cases,
  178. F.arange,
  179. ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
  180. )
  181. def test_add_update():
  182. shape = (2, 3)
  183. v = np.random.random(shape).astype(np.float32)
  184. b = Buffer(v)
  185. u = F.add_update(b, 1)
  186. assertTensorClose(u.numpy(), v + 1)
  187. u = F.add_update(b, 1)
  188. assertTensorClose(u.numpy(), v + 2)
  189. x = np.ones((2, 2), dtype=np.float32)
  190. y = x * 0.5
  191. dest = tensor(x)
  192. delta = tensor(y)
  193. r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
  194. assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
  195. def test_add_update_params():
  196. b = np.random.random((2, 3)).astype(np.float32)
  197. y = Buffer(b)
  198. @jit.trace
  199. def f(x):
  200. return F.add_update(y, x)
  201. f(np.zeros((2, 3)).astype(np.float32))
  202. z = Buffer(np.zeros((2, 3)).astype(np.float32))
  203. F.add_update(y, z, beta=0.1)
  204. res = f(np.ones((2, 3)).astype(np.float32))
  205. assertTensorClose(res, b + 1)
  206. def test_cross_entropy_with_softmax():
  207. data1_shape = (1, 2)
  208. label1_shape = (1,)
  209. data2_shape = (1, 3)
  210. label2_shape = (1,)
  211. data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
  212. label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
  213. expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
  214. data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
  215. label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
  216. expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
  217. cases = [
  218. {"input": [data1, label1], "output": expect1,},
  219. {"input": [data2, label2], "output": expect2,},
  220. ]
  221. opr_test(cases, F.cross_entropy_with_softmax)
  222. def test_cross_entropy():
  223. data1_shape = (1, 2)
  224. label1_shape = (1,)
  225. data2_shape = (1, 3)
  226. label2_shape = (1,)
  227. data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
  228. label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
  229. expect1 = np.array([-np.log(0.5)], dtype=np.float32)
  230. data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
  231. label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
  232. expect2 = np.array([-np.log(0.4)], dtype=np.float32)
  233. cases = [
  234. {"input": [data1, label1], "output": expect1,},
  235. {"input": [data2, label2], "output": expect2,},
  236. ]
  237. opr_test(cases, F.cross_entropy)
  238. def test_binary_cross_entropy():
  239. data1_shape = (2, 2)
  240. label1_shape = (2, 2)
  241. data2_shape = (2, 3)
  242. label2_shape = (2, 3)
  243. def sigmoid(x):
  244. return 1 / (1 + np.exp(-x))
  245. def compare_fn(x, y):
  246. assertTensorClose(x.numpy(), y, max_err=5e-4)
  247. np.random.seed(123)
  248. data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
  249. label1 = np.random.uniform(size=label1_shape).astype(np.float32)
  250. expect1 = np.array([0.6361], dtype=np.float32)
  251. np.random.seed(123)
  252. data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
  253. label2 = np.random.uniform(size=label2_shape).astype(np.float32)
  254. expect2 = np.array([0.6750], dtype=np.float32)
  255. cases = [
  256. {"input": [data1, label1], "output": expect1,},
  257. {"input": [data2, label2], "output": expect2,},
  258. ]
  259. opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台