You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_test.cc 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <memory>
  17. #include <string>
  18. #include "dataset/core/client.h"
  19. #include "common/common.h"
  20. #include "gtest/gtest.h"
  21. #include "securec.h"
  22. #include "dataset/core/tensor.h"
  23. #include "dataset/core/cv_tensor.h"
  24. #include "dataset/core/data_type.h"
  25. #include "dataset/util/de_error.h"
  26. using namespace mindspore::dataset;
  27. namespace py = pybind11;
  28. class MindDataTestTensorDE : public UT::Common {
  29. public:
  30. MindDataTestTensorDE() {}
  31. void SetUp() {
  32. GlobalInit();
  33. }
  34. };
  35. TEST_F(MindDataTestTensorDE, Basics) {
  36. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_UINT64));
  37. ASSERT_TRUE((t->AllocateBuffer(t->SizeInBytes())).IsOk());
  38. ASSERT_EQ(t->shape(), TensorShape({2, 3}));
  39. ASSERT_EQ(t->type(), DataType::DE_UINT64);
  40. ASSERT_EQ(t->SizeInBytes(), 2 * 3 * 8);
  41. ASSERT_EQ(t->Rank(), 2);
  42. t->SetItemAt<uint64_t>({0, 0}, 1);
  43. t->SetItemAt<uint64_t>({0, 1}, 2);
  44. t->SetItemAt<uint64_t>({0, 2}, 3);
  45. t->SetItemAt<uint64_t>({1, 0}, 4);
  46. t->SetItemAt<uint64_t>({1, 1}, 5);
  47. t->SetItemAt<uint64_t>({1, 2}, 6);
  48. Status rc = t->SetItemAt<uint64_t>({2, 3}, 7);
  49. ASSERT_TRUE(rc.IsError());
  50. uint64_t o;
  51. t->GetItemAt<uint64_t>(&o, {0, 0});
  52. ASSERT_EQ(o, 1);
  53. t->GetItemAt<uint64_t>(&o, {0, 1});
  54. ASSERT_EQ(o, 2);
  55. t->GetItemAt<uint64_t>(&o, {0, 2});
  56. ASSERT_EQ(o, 3);
  57. t->GetItemAt<uint64_t>(&o, {1, 0});
  58. ASSERT_EQ(o, 4);
  59. t->GetItemAt<uint64_t>(&o, {1, 1});
  60. ASSERT_EQ(o, 5);
  61. t->GetItemAt<uint64_t>(&o, {1, 2});
  62. ASSERT_EQ(o, 6);
  63. rc = t->GetItemAt<uint64_t>(&o, {2, 3});
  64. ASSERT_TRUE(rc.IsError());
  65. ASSERT_EQ(t->ToString(), "Tensor (shape: <2,3>, Type: uint64)\n[[1,2,3],[4,5,6]]");
  66. std::vector<uint64_t> x = {1, 2, 3, 4, 5, 6};
  67. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_UINT64),
  68. reinterpret_cast<unsigned char *>(&x[0]));
  69. ASSERT_EQ(*t == *t2, true);
  70. ASSERT_EQ(*t != *t2, false);
  71. }
  72. TEST_F(MindDataTestTensorDE, Fill) {
  73. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_FLOAT32));
  74. t->Fill<float>(2.5);
  75. std::vector<float> x = {2.5, 2.5, 2.5, 2.5};
  76. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_FLOAT32),
  77. reinterpret_cast<unsigned char *>(&x[0]));
  78. ASSERT_EQ(*t == *t2, true);
  79. }
  80. TEST_F(MindDataTestTensorDE, Reshape) {
  81. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  82. t->Fill<uint8_t>(254);
  83. t->Reshape(TensorShape({4}));
  84. std::vector<uint8_t> x = {254, 254, 254, 254};
  85. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({4}), DataType(DataType::DE_UINT8),
  86. reinterpret_cast<unsigned char *>(&x[0]));
  87. ASSERT_EQ(*t == *t2, true);
  88. Status rc = t->Reshape(TensorShape({5}));
  89. ASSERT_TRUE(rc.IsError());
  90. t2->ExpandDim(0);
  91. ASSERT_EQ(t2->shape(), TensorShape({1, 4}));
  92. t2->ExpandDim(2);
  93. ASSERT_EQ(t2->shape(), TensorShape({1, 4, 1}));
  94. rc = t2->ExpandDim(4);
  95. ASSERT_TRUE(rc.IsError());
  96. }
  97. TEST_F(MindDataTestTensorDE, CopyTensor) {
  98. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({}), DataType(DataType::DE_INT16));
  99. t->SetItemAt<int16_t>({}, -66);
  100. ASSERT_EQ(t->shape(), TensorShape({}));
  101. ASSERT_EQ(t->type(), DataType::DE_INT16);
  102. int16_t o;
  103. t->GetItemAt<int16_t>(&o, {});
  104. ASSERT_EQ(o, -66);
  105. unsigned char *addr = t->GetMutableBuffer();
  106. auto t2 = std::make_shared<Tensor>(std::move(*t));
  107. ASSERT_EQ(t2->shape(), TensorShape({}));
  108. ASSERT_EQ(t2->type(), DataType::DE_INT16);
  109. t2->GetItemAt<int16_t>(&o, {});
  110. ASSERT_EQ(o, -66);
  111. unsigned char *new_addr = t2->GetMutableBuffer();
  112. ASSERT_EQ(addr, new_addr);
  113. ASSERT_EQ(t->shape(), TensorShape::CreateUnknownRankShape());
  114. ASSERT_EQ(t->type(), DataType::DE_UNKNOWN);
  115. ASSERT_EQ(t->GetMutableBuffer(), nullptr);
  116. Status rc = t->GetItemAt<int16_t>(&o, {});
  117. ASSERT_TRUE(rc.IsError());
  118. }
  119. TEST_F(MindDataTestTensorDE, InsertTensor) {
  120. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT64));
  121. std::vector<double> x = {1.1, 2.1, 3.1};
  122. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({3}), DataType(DataType::DE_FLOAT64),
  123. reinterpret_cast<unsigned char *>(&x[0]));
  124. std::vector<double> y = {1.2, 2.2, 3.2};
  125. std::shared_ptr<Tensor> t3 = std::make_shared<Tensor>(TensorShape({3}), DataType(DataType::DE_FLOAT64),
  126. reinterpret_cast<unsigned char *>(&y[0]));
  127. ASSERT_TRUE(t->InsertTensor({0}, t2).OK());
  128. ASSERT_TRUE(t->InsertTensor({1}, t3).OK());
  129. std::vector<double> z = {1.1, 2.1, 3.1, 1.2, 2.2, 3.2};
  130. std::shared_ptr<Tensor> t4 = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT64),
  131. reinterpret_cast<unsigned char *>(&z[0]));
  132. ASSERT_EQ(*t == *t4, true);
  133. std::vector<double> x2 = {0};
  134. std::shared_ptr<Tensor> t5 = std::make_shared<Tensor>(TensorShape({}), DataType(DataType::DE_FLOAT64),
  135. reinterpret_cast<unsigned char *>(&x2[0]));
  136. ASSERT_TRUE(t->InsertTensor({1, 2}, t5).OK());
  137. z[5] = 0;
  138. std::shared_ptr<Tensor> t6 = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT64),
  139. reinterpret_cast<unsigned char *>(&z[0]));
  140. ASSERT_EQ(*t == *t6, true);
  141. ASSERT_EQ(t->InsertTensor({2}, t5).get_code(), StatusCode::kUnexpectedError);
  142. ASSERT_EQ(t->InsertTensor({1}, t5).get_code(), StatusCode::kUnexpectedError);
  143. ASSERT_EQ(t->InsertTensor({1, 2}, t6).get_code(), StatusCode::kUnexpectedError);
  144. t6->Fill<double>(-1);
  145. ASSERT_TRUE(t->InsertTensor({}, t6).OK());
  146. ASSERT_EQ(*t == *t6, true);
  147. }
  148. // Test the bug of Tensor::ToString will exec failed for Tensor which store bool values
  149. TEST_F(MindDataTestTensorDE, BoolTensor) {
  150. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2}),
  151. DataType(DataType::DE_BOOL));
  152. t->SetItemAt<bool>({0}, true);
  153. t->SetItemAt<bool>({1}, true);
  154. std::string out = t->ToString();
  155. ASSERT_TRUE(out.find("Template type and Tensor type are not compatible") == std::string::npos);
  156. }
  157. TEST_F(MindDataTestTensorDE, GetItemAt) {
  158. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  159. t->Fill<uint8_t>(254);
  160. uint64_t o1;
  161. t->GetItemAt<uint64_t>(&o1, {0, 0});
  162. ASSERT_EQ(o1, 254);
  163. uint32_t o2;
  164. t->GetItemAt<uint32_t>(&o2, {0, 1});
  165. ASSERT_EQ(o2, 254);
  166. uint16_t o3;
  167. t->GetItemAt<uint16_t>(&o3, {1, 0});
  168. ASSERT_EQ(o3, 254);
  169. uint8_t o4;
  170. t->GetItemAt<uint8_t>(&o4, {1, 1});
  171. ASSERT_EQ(o4, 254);
  172. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_INT8));
  173. t2->Fill<int8_t>(-10);
  174. int64_t o5;
  175. t2->GetItemAt<int64_t>(&o5, {0, 0});
  176. ASSERT_EQ(o5, -10);
  177. int32_t o6;
  178. t2->GetItemAt<int32_t>(&o6, {0, 1});
  179. ASSERT_EQ(o6, -10);
  180. int16_t o7;
  181. t2->GetItemAt<int16_t>(&o7, {1, 0});
  182. ASSERT_EQ(o7, -10);
  183. int8_t o8;
  184. t2->GetItemAt<int8_t>(&o8, {1, 1});
  185. ASSERT_EQ(o8, -10);
  186. std::shared_ptr<Tensor> t3 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_FLOAT32));
  187. t3->Fill<float>(1.1);
  188. double o9;
  189. t3->GetItemAt<double>(&o9, {0, 0});
  190. ASSERT_FLOAT_EQ(o9, 1.1);
  191. float o10;
  192. t3->GetItemAt<float>(&o10, {0, 1});
  193. ASSERT_FLOAT_EQ(o10, 1.1);
  194. }
  195. TEST_F(MindDataTestTensorDE, OperatorAssign) {
  196. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  197. t->Fill<uint8_t>(1);
  198. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  199. *t2 = std::move(*t);
  200. uint8_t o;
  201. t2->GetItemAt(&o, {0, 0});
  202. ASSERT_EQ(o, 1);
  203. t2->GetItemAt(&o, {0, 1});
  204. ASSERT_EQ(o, 1);
  205. t2->GetItemAt(&o, {1, 0});
  206. ASSERT_EQ(o, 1);
  207. t2->GetItemAt(&o, {1, 1});
  208. ASSERT_EQ(o, 1);
  209. }
  210. TEST_F(MindDataTestTensorDE, Strides) {
  211. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({4, 2, 2}), DataType(DataType::DE_UINT8));
  212. std::vector<dsize_t> x1 = t->Strides();
  213. std::vector<dsize_t> x2 = {4, 2, 1};
  214. ASSERT_EQ(x1, x2);
  215. t = std::make_shared<Tensor>(TensorShape({4, 2, 2}), DataType(DataType::DE_UINT32));
  216. x1 = t->Strides();
  217. x2 = {16, 8, 4};
  218. ASSERT_EQ(x1, x2);
  219. }
  220. void checkCvMat(TensorShape shape, DataType type) {
  221. std::shared_ptr<CVTensor> t = std::make_shared<CVTensor>(shape, type);
  222. cv::Mat m = t->mat();
  223. ASSERT_EQ(m.data, t->GetMutableBuffer());
  224. ASSERT_EQ(static_cast<uchar>(m.type()) & static_cast<uchar>(CV_MAT_DEPTH_MASK), type.AsCVType());
  225. if (shape.Rank() < 4) {
  226. if (shape.Rank() > 1) {
  227. for (dsize_t i = 0; i < 2; i++)
  228. ASSERT_EQ(m.size[static_cast<int>(i)], shape[i]);
  229. } else if (shape.Rank() == 0) {
  230. ASSERT_EQ(m.size[0], 1);
  231. ASSERT_EQ(m.size[1], 1);
  232. } else {
  233. ASSERT_EQ(m.size[0], shape[0]);
  234. }
  235. if (shape.Rank() == 3) { ASSERT_EQ(m.channels(), shape[2]); }
  236. ASSERT_EQ(m.dims, 2);
  237. ASSERT_EQ(m.size.dims(), 2);
  238. if (shape.Rank() > 0) { ASSERT_EQ(m.rows, shape[0]); }
  239. if (shape.Rank() > 1) { ASSERT_EQ(m.cols, shape[1]); }
  240. } else {
  241. for (dsize_t i = 0; i < shape.Rank(); i++)
  242. ASSERT_EQ(m.size[static_cast<int>(i)], shape[i]);
  243. ASSERT_EQ(m.dims, shape.Rank());
  244. ASSERT_EQ(m.size.dims(), shape.Rank());
  245. ASSERT_EQ(m.rows, -1);
  246. ASSERT_EQ(m.cols, -1);
  247. }
  248. }
  249. TEST_F(MindDataTestTensorDE, CVTensorBasics) {
  250. checkCvMat(TensorShape({4, 5}), DataType(DataType::DE_UINT8));
  251. checkCvMat(TensorShape({4, 5, 3}), DataType(DataType::DE_UINT8));
  252. checkCvMat(TensorShape({4, 5, 10}), DataType(DataType::DE_UINT8));
  253. checkCvMat(TensorShape({4, 5, 3, 2}), DataType(DataType::DE_UINT8));
  254. checkCvMat(TensorShape({4}), DataType(DataType::DE_UINT8));
  255. checkCvMat(TensorShape({}), DataType(DataType::DE_INT16));
  256. checkCvMat(TensorShape({4, 5}), DataType(DataType::DE_INT16));
  257. checkCvMat(TensorShape({4, 5, 3}), DataType(DataType::DE_INT16));
  258. checkCvMat(TensorShape({4, 5, 10}), DataType(DataType::DE_INT16));
  259. checkCvMat(TensorShape({4, 5, 3, 2}), DataType(DataType::DE_INT16));
  260. checkCvMat(TensorShape({4}), DataType(DataType::DE_INT16));
  261. checkCvMat(TensorShape({}), DataType(DataType::DE_INT16));
  262. }
  263. TEST_F(MindDataTestTensorDE, CVTensorFromMat) {
  264. cv::Mat m(2, 2, CV_8U);
  265. m.at<uint8_t>(0, 0) = 10;
  266. m.at<uint8_t>(0, 1) = 20;
  267. m.at<uint8_t>(1, 0) = 30;
  268. m.at<uint8_t>(1, 1) = 40;
  269. std::shared_ptr<CVTensor> cvt = std::make_shared<CVTensor>(m);
  270. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  271. t->SetItemAt<uint8_t>({0, 0}, 10);
  272. t->SetItemAt<uint8_t>({0, 1}, 20);
  273. t->SetItemAt<uint8_t>({1, 0}, 30);
  274. t->SetItemAt<uint8_t>({1, 1}, 40);
  275. ASSERT_TRUE(*t == *cvt);
  276. int size[] = {4};
  277. cv::Mat m2(1, size, CV_8U);
  278. m2.at<uint8_t>(0) = 10;
  279. m2.at<uint8_t>(1) = 20;
  280. m2.at<uint8_t>(2) = 30;
  281. m2.at<uint8_t>(3) = 40;
  282. std::shared_ptr<CVTensor> cvt2 = std::make_shared<CVTensor>(m2);
  283. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({4}), DataType(DataType::DE_UINT8));
  284. t2->SetItemAt<uint8_t>({0}, 10);
  285. t2->SetItemAt<uint8_t>({1}, 20);
  286. t2->SetItemAt<uint8_t>({2}, 30);
  287. t2->SetItemAt<uint8_t>({3}, 40);
  288. t2->ExpandDim(1);
  289. ASSERT_TRUE(*t2 == *cvt2);
  290. }
  291. TEST_F(MindDataTestTensorDE, CVTensorAs) {
  292. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({3, 2}), DataType(DataType::DE_FLOAT64));
  293. t->Fill<double>(2.2);
  294. unsigned char *addr = t->GetMutableBuffer();
  295. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({3, 2}), DataType(DataType::DE_FLOAT64));
  296. t2->Fill<double>(4.4);
  297. std::shared_ptr<CVTensor> ctv = CVTensor::AsCVTensor(t);
  298. ASSERT_EQ(t->GetMutableBuffer(), nullptr);
  299. ASSERT_EQ(ctv->GetMutableBuffer(), addr);
  300. cv::Mat m = ctv->mat();
  301. m = 2 * m;
  302. ASSERT_EQ(ctv->GetMutableBuffer(), addr);
  303. ASSERT_TRUE(*t2 == *ctv);
  304. MS_LOG(DEBUG) << *t2 << std::endl << *ctv;
  305. }
  306. TEST_F(MindDataTestTensorDE, CVTensorMatSlice) {
  307. cv::Mat m(2, 3, CV_32S);
  308. m.at<int32_t>(0, 0) = 10;
  309. m.at<int32_t>(0, 1) = 20;
  310. m.at<int32_t>(0, 2) = 30;
  311. m.at<int32_t>(1, 0) = 40;
  312. m.at<int32_t>(1, 1) = 50;
  313. m.at<int32_t>(1, 2) = 60;
  314. std::shared_ptr<CVTensor> cvt = std::make_shared<CVTensor>(m);
  315. cv::Mat mat;
  316. cvt->Mat({1}, &mat);
  317. cv::Mat m2(3, 1, CV_32S);
  318. m2.at<int32_t>(0) = 40;
  319. m2.at<int32_t>(1) = 50;
  320. m2.at<int32_t>(2) = 60;
  321. std::shared_ptr<CVTensor> cvt2 = std::make_shared<CVTensor>(mat);
  322. std::shared_ptr<CVTensor> cvt3 = std::make_shared<CVTensor>(m2);
  323. ASSERT_TRUE(*cvt2 == *cvt3);
  324. cvt->Mat({0}, &mat);
  325. m2.at<int32_t>(0) = 10;
  326. m2.at<int32_t>(1) = 20;
  327. m2.at<int32_t>(2) = 30;
  328. cvt2 = std::make_shared<CVTensor>(mat);
  329. cvt3 = std::make_shared<CVTensor>(m2);
  330. ASSERT_TRUE(*cvt2 == *cvt3);
  331. }
  332. TEST_F(MindDataTestTensorDE, TensorIterator) {
  333. std::vector<uint32_t> values = {1, 2, 3, 4, 5, 6};
  334. std::vector<uint32_t> values2 = {2, 3, 4, 5, 6, 7};
  335. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({6}), DataType(DataType::DE_UINT32),
  336. reinterpret_cast<unsigned char *>(&values[0]));
  337. auto i = t->begin<uint32_t>();
  338. auto j = values.begin();
  339. uint32_t ctr = 0;
  340. for (; i != t->end<uint32_t>(); i++, j++) {
  341. ASSERT_TRUE(*i == *j);
  342. ctr++;
  343. }
  344. ASSERT_TRUE(ctr == 6);
  345. t->Reshape(TensorShape {2, 3});
  346. i = t->begin<uint32_t>();
  347. j = values.begin();
  348. ctr = 0;
  349. for (; i != t->end<uint32_t>(); i++, j++) {
  350. ASSERT_TRUE(*i == *j);
  351. ctr++;
  352. }
  353. ASSERT_TRUE(ctr == 6);
  354. for (auto it = t->begin<uint32_t>(); it != t->end<uint32_t>(); it++) {
  355. *it = *it + 1;
  356. }
  357. i = t->begin<uint32_t>();
  358. j = values2.begin();
  359. ctr = 0;
  360. for (; i != t->end<uint32_t>(); i++, j++) {
  361. ASSERT_TRUE(*i == *j);
  362. ctr++;
  363. }
  364. ASSERT_TRUE(ctr == 6);
  365. }