You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

meta_tensor_test.cc 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <iostream>
  17. #include <memory>
  18. #include <vector>
  19. #include "common/common_test.h"
  20. #include "common/py_func_graph_fetcher.h"
  21. #include "securec/include/securec.h"
  22. #include "ir/tensor.h"
  23. #include "pybind_api/ir/tensor_py.h"
  24. using mindspore::tensor::TensorPy;
  25. namespace mindspore {
  26. namespace tensor {
  27. class TestMetaTensor : public UT::Common {
  28. public:
  29. TestMetaTensor() {}
  30. virtual void SetUp() {
  31. std::vector<int64_t> dimensions({2, 3});
  32. meta_tensor_ = MetaTensor(TypeId::kNumberTypeFloat64, dimensions);
  33. }
  34. protected:
  35. MetaTensor meta_tensor_;
  36. };
  37. TEST_F(TestMetaTensor, InitTest) {
  38. std::vector<int64_t> dimensions({2, 3});
  39. MetaTensor meta_tensor(TypeId::kNumberTypeFloat64, dimensions);
  40. // Test type
  41. ASSERT_EQ(TypeId::kNumberTypeFloat64, meta_tensor.data_type());
  42. // Test dimensions
  43. ASSERT_EQ(2, meta_tensor.DimensionSize(0));
  44. ASSERT_EQ(3, meta_tensor.DimensionSize(1));
  45. ASSERT_EQ(-1, meta_tensor.DimensionSize(2));
  46. // Test number of elements
  47. ASSERT_EQ(6, meta_tensor.ElementsNum());
  48. }
  49. // Test type
  50. TEST_F(TestMetaTensor, TypeTest) {
  51. meta_tensor_.set_data_type(TypeId::kNumberTypeInt32);
  52. ASSERT_EQ(TypeId::kNumberTypeInt32, meta_tensor_.data_type());
  53. }
  54. // Test shape
  55. TEST_F(TestMetaTensor, ShapeTest) {
  56. std::vector<int64_t> dimensions({5, 6, 7});
  57. meta_tensor_.set_shape(dimensions);
  58. ASSERT_EQ(5, meta_tensor_.DimensionSize(0));
  59. ASSERT_EQ(6, meta_tensor_.DimensionSize(1));
  60. ASSERT_EQ(7, meta_tensor_.DimensionSize(2));
  61. // Test number of elements
  62. ASSERT_EQ(210, meta_tensor_.ElementsNum());
  63. }
  64. TEST_F(TestMetaTensor, EqualTest) {
  65. std::vector<int64_t> dimensions({2, 3});
  66. MetaTensor meta_tensor_x(TypeId::kNumberTypeFloat64, dimensions);
  67. MetaTensor meta_tensor_y(meta_tensor_x);
  68. ASSERT_TRUE(meta_tensor_x == meta_tensor_y);
  69. MetaTensor meta_tensor_z(TypeId::kNumberTypeFloat32, dimensions);
  70. ASSERT_FALSE(meta_tensor_x == meta_tensor_z);
  71. meta_tensor_z = meta_tensor_x;
  72. ASSERT_TRUE(meta_tensor_x == meta_tensor_z);
  73. }
  74. class TestTensor : public UT::Common {
  75. public:
  76. TestTensor() {}
  77. virtual void SetUp() { UT::InitPythonPath(); }
  78. };
  79. py::array_t<float, py::array::c_style> BuildInputTensor() {
  80. // Init tensor data by py::array_t<float>
  81. py::array_t<float, py::array::c_style> input = py::array_t<float, py::array::c_style>({2, 3});
  82. auto array = input.mutable_unchecked();
  83. float start = 0;
  84. for (int i = 0; i < array.shape(0); i++) {
  85. for (int j = 0; j < array.shape(1); j++) {
  86. array(i, j) = start++;
  87. }
  88. }
  89. return input;
  90. }
  91. TEST_F(TestTensor, PyArrayScalarTest) {
  92. std::vector<int64_t> dimensions;
  93. py::array data = py::array_t<int64_t, py::array::c_style>(dimensions);
  94. uint8_t *data_buf = reinterpret_cast<uint8_t *>(data.request(true).ptr);
  95. int64_t num = 1;
  96. errno_t ret = memcpy_s(data_buf, sizeof(int64_t), &num, sizeof(int64_t));
  97. ASSERT_EQ(0, ret);
  98. ASSERT_EQ(num, *data_buf);
  99. }
  100. TEST_F(TestTensor, InitScalarTest) {
  101. std::vector<int64_t> dimensions;
  102. Tensor tensor(TypeId::kNumberTypeInt64, dimensions);
  103. uint8_t *data_buf = reinterpret_cast<uint8_t *>(tensor.data_c());
  104. int64_t num = 1;
  105. errno_t ret = memcpy_s(data_buf, sizeof(int64_t), &num, sizeof(int64_t));
  106. ASSERT_EQ(0, ret);
  107. ASSERT_EQ(num, *data_buf);
  108. // Test type
  109. ASSERT_EQ(TypeId::kNumberTypeInt64, tensor.data_type());
  110. // Test dimensions
  111. ASSERT_EQ(0, tensor.DataDim());
  112. // Test shape
  113. ASSERT_EQ(0, tensor.shape().size());
  114. std::vector<int64_t> empty_shape;
  115. ASSERT_EQ(empty_shape, tensor.shape());
  116. // Test number of elements
  117. ASSERT_EQ(1, tensor.ElementsNum());
  118. ASSERT_EQ(1, tensor.DataSize());
  119. }
  120. TEST_F(TestTensor, InitTensorPtrTest) {
  121. std::vector<int64_t> dimensions;
  122. Tensor tensor(TypeId::kNumberTypeInt64, dimensions);
  123. std::shared_ptr<Tensor> tensor_ptr = std::make_shared<Tensor>(tensor);
  124. // Test type
  125. ASSERT_EQ(TypeId::kNumberTypeInt64, tensor_ptr->data_type());
  126. // Test dimensions
  127. ASSERT_EQ(0, tensor_ptr->DataDim());
  128. // Test shape
  129. ASSERT_EQ(0, tensor_ptr->shape().size());
  130. std::vector<int64_t> empty_shape;
  131. ASSERT_EQ(empty_shape, tensor_ptr->shape());
  132. // Test number of elements
  133. ASSERT_EQ(1, tensor_ptr->ElementsNum());
  134. ASSERT_EQ(1, tensor_ptr->DataSize());
  135. }
  136. TEST_F(TestTensor, InitByTupleTest) {
  137. const std::vector<int64_t> shape = {2, 3, 4};
  138. TypePtr data_type = kFloat32;
  139. Tensor tuple_tensor(data_type->type_id(), shape);
  140. ASSERT_EQ(2, tuple_tensor.DimensionSize(0));
  141. ASSERT_EQ(3, tuple_tensor.DimensionSize(1));
  142. ASSERT_EQ(4, tuple_tensor.DimensionSize(2));
  143. // Test number of elements
  144. ASSERT_EQ(24, tuple_tensor.ElementsNum());
  145. ASSERT_EQ(TypeId::kNumberTypeFloat32, tuple_tensor.data_type());
  146. py::tuple tuple = py::make_tuple(1.0, 2.0, 3, 4, 5, 6);
  147. TensorPtr tensor = TensorPy::MakeTensor(py::array(tuple), kFloat64);
  148. py::array array = TensorPy::AsNumpy(*tensor);
  149. std::cout << "Dim: " << array.ndim() << std::endl;
  150. ASSERT_EQ(1, array.ndim());
  151. std::cout << "Num of Elements: " << array.size() << std::endl;
  152. ASSERT_EQ(6, array.size());
  153. std::cout << "Elements: " << std::endl;
  154. // Must be double, or the result is not right
  155. double *tensor_data = reinterpret_cast<double *>(tensor->data_c());
  156. for (int i = 0; i < array.size(); i++) {
  157. std::cout << tensor_data[i] << std::endl;
  158. }
  159. }
  160. TEST_F(TestTensor, EqualTest) {
  161. py::tuple tuple = py::make_tuple(1, 2, 3, 4, 5, 6);
  162. TensorPtr tensor_int8 = TensorPy::MakeTensor(py::array(tuple), kInt8);
  163. ASSERT_TRUE(*tensor_int8 == *tensor_int8);
  164. ASSERT_EQ(TypeId::kNumberTypeInt8, tensor_int8->data_type_c());
  165. TensorPtr tensor_int16 = TensorPy::MakeTensor(py::array(tuple), kInt16);
  166. ASSERT_EQ(TypeId::kNumberTypeInt16, tensor_int16->data_type_c());
  167. TensorPtr tensor_int32 = TensorPy::MakeTensor(py::array(tuple), kInt32);
  168. ASSERT_EQ(TypeId::kNumberTypeInt32, tensor_int32->data_type_c());
  169. TensorPtr tensor_float16 = TensorPy::MakeTensor(py::array(tuple), kFloat16);
  170. ASSERT_EQ(TypeId::kNumberTypeFloat16, tensor_float16->data_type_c());
  171. TensorPtr tensor_float32 = TensorPy::MakeTensor(py::array(tuple), kFloat32);
  172. ASSERT_EQ(TypeId::kNumberTypeFloat32, tensor_float32->data_type_c());
  173. TensorPtr tensor_float64 = TensorPy::MakeTensor(py::array(tuple), kFloat64);
  174. ASSERT_EQ(TypeId::kNumberTypeFloat64, tensor_float64->data_type_c());
  175. }
  176. TEST_F(TestTensor, ValueEqualTest) {
  177. py::tuple tuple = py::make_tuple(1, 2, 3, 4, 5, 6);
  178. TensorPtr t1 = TensorPy::MakeTensor(py::array(tuple), kInt32);
  179. TensorPtr t2 = TensorPy::MakeTensor(py::array(tuple), kInt32);
  180. ASSERT_TRUE(t1->ValueEqual(*t1));
  181. ASSERT_TRUE(t1->ValueEqual(*t2));
  182. std::vector<int64_t> shape = {6};
  183. TensorPtr t3 = std::make_shared<Tensor>(kInt32->type_id(), shape);
  184. TensorPtr t4 = std::make_shared<Tensor>(kInt32->type_id(), shape);
  185. ASSERT_TRUE(t3->ValueEqual(*t3));
  186. ASSERT_FALSE(t3->ValueEqual(*t4));
  187. ASSERT_FALSE(t3->ValueEqual(*t1));
  188. ASSERT_FALSE(t1->ValueEqual(*t3));
  189. memcpy_s(t3->data_c(), t3->data().nbytes(), t1->data_c(), t1->data().nbytes());
  190. ASSERT_TRUE(t1->ValueEqual(*t3));
  191. ASSERT_FALSE(t3->ValueEqual(*t4));
  192. ASSERT_FALSE(t4->ValueEqual(*t3));
  193. }
  194. TEST_F(TestTensor, PyArrayTest) {
  195. py::array_t<float, py::array::c_style> input({2, 3});
  196. auto array = input.mutable_unchecked();
  197. float sum = 0;
  198. std::cout << "sum"
  199. << " = " << std::endl;
  200. float start = 0;
  201. for (int i = 0; i < array.shape(0); i++) {
  202. for (int j = 0; j < array.shape(1); j++) {
  203. array(i, j) = start++;
  204. sum += array(i, j);
  205. std::cout << "sum + "
  206. << "array[" << i << ", " << j << "]"
  207. << " = " << sum << std::endl;
  208. }
  209. }
  210. ASSERT_EQ(15, sum);
  211. }
  212. TEST_F(TestTensor, InitByFloatArrayDataCTest) {
  213. // Init tensor data by py::array_t<float>
  214. auto tensor = TensorPy::MakeTensor(BuildInputTensor());
  215. // Print some information of the tensor
  216. std::cout << "Datatype: " << tensor->data_type() << std::endl;
  217. ASSERT_EQ(TypeId::kNumberTypeFloat32, tensor->data_type());
  218. std::cout << "Dim: " << tensor->DataDim() << std::endl;
  219. ASSERT_EQ(2, tensor->DataDim());
  220. std::cout << "Num of Elements: " << tensor->ElementsNum() << std::endl;
  221. ASSERT_EQ(6, tensor->ElementsNum());
  222. // Print each elements
  223. std::cout << "Elements: " << std::endl;
  224. float *tensor_data = reinterpret_cast<float *>(tensor->data_c());
  225. for (int i = 0; i < tensor->ElementsNum(); i++) {
  226. std::cout << tensor_data[i] << std::endl;
  227. }
  228. }
  229. TEST_F(TestTensor, InitByFloatArrayDataTest) {
  230. // Init tensor data by py::array_t<float>
  231. TensorPtr tensor = TensorPy::MakeTensor(BuildInputTensor());
  232. // Print some information of the tensor
  233. std::cout << "Datatype: " << tensor->data_type() << std::endl;
  234. ASSERT_EQ(TypeId::kNumberTypeFloat32, tensor->data_type());
  235. std::cout << "Dim: " << tensor->DataDim() << std::endl;
  236. ASSERT_EQ(2, tensor->DataDim());
  237. std::vector<int64_t> dimensions = tensor->shape();
  238. ASSERT_GT(dimensions.size(), 1);
  239. std::cout << "Dim0: " << dimensions[0] << std::endl;
  240. ASSERT_EQ(2, dimensions[0]);
  241. std::cout << "Dim1: " << dimensions[1] << std::endl;
  242. ASSERT_EQ(3, dimensions[1]);
  243. std::cout << "Num of Elements: " << tensor->ElementsNum() << std::endl;
  244. ASSERT_EQ(6, tensor->ElementsNum());
  245. // Print each elements
  246. std::cout << "Elements: " << std::endl;
  247. py::array_t<float> data = py::cast<py::array_t<float>>(TensorPy::AsNumpy(*tensor));
  248. auto array = data.unchecked<2>();
  249. for (int i = 0; i < array.shape(0); i++) {
  250. for (int j = 0; j < array.shape(1); j++) {
  251. std::cout << array(i, j) << std::endl;
  252. }
  253. }
  254. }
  255. TEST_F(TestTensor, PyArrayDataTest) {
  256. py::array_t<float, py::array::c_style> input({2, 3});
  257. float *data = reinterpret_cast<float *>(input.request().ptr);
  258. float ge_tensor_data[] = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6};
  259. errno_t ret = memcpy_s(data, input.nbytes(), ge_tensor_data, sizeof(ge_tensor_data));
  260. ASSERT_EQ(0, ret);
  261. auto array = input.mutable_unchecked();
  262. for (int i = 0; i < array.shape(0); i++) {
  263. for (int j = 0; j < array.shape(1); j++) {
  264. ASSERT_EQ(array(i, j), ge_tensor_data[3 * i + j]);
  265. }
  266. }
  267. }
  268. TEST_F(TestTensor, TensorDataTest) {
  269. // Init a data buffer
  270. float ge_tensor_data[] = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6};
  271. // Create a Tensor with wanted data type and shape
  272. Tensor tensor(TypeId::kNumberTypeFloat32, std::vector<int64_t>({2, 3}));
  273. // Get the writable data pointer from the tensor
  274. float *me_tensor_data = reinterpret_cast<float *>(tensor.data_c());
  275. // Copy data from buffer to tensor's data
  276. errno_t ret = memcpy_s(me_tensor_data, tensor.data().nbytes(), ge_tensor_data, sizeof(ge_tensor_data));
  277. ASSERT_EQ(0, ret);
  278. // Testify if the data has been copied to the tensor data
  279. py::array_t<float> data = py::cast<py::array_t<float>>(TensorPy::AsNumpy(tensor));
  280. auto array = data.mutable_unchecked();
  281. for (int i = 0; i < array.shape(0); i++) {
  282. for (int j = 0; j < array.shape(1); j++) {
  283. std::cout << "array[" << i << ", " << j << "]"
  284. << " = " << array(i, j) << std::endl;
  285. ASSERT_EQ(array(i, j), ge_tensor_data[3 * i + j]);
  286. }
  287. }
  288. }
  289. TEST_F(TestTensor, TensorPyCast) {
  290. std::vector<int64_t> shape{2, 3, 4, 5};
  291. py::tuple py_tuple = py::make_tuple(std::make_shared<Tensor>(kNumberTypeFloat32, shape));
  292. auto shape1 = py::cast<Tensor &>(py_tuple[0]).shape();
  293. const py::tuple &t = py_tuple;
  294. auto shape2 = py::cast<const Tensor &>(t[0]).shape();
  295. auto shape3 = py::cast<Tensor &>(t[0]).shape();
  296. ASSERT_EQ(shape, shape1);
  297. ASSERT_EQ(shape, shape2);
  298. ASSERT_EQ(shape, shape3);
  299. }
  300. } // namespace tensor
  301. } // namespace mindspore