You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_test.cc 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <memory>
  17. #include <string>
  18. #include "dataset/core/client.h"
  19. #include "common/common.h"
  20. #include "gtest/gtest.h"
  21. #include "securec.h"
  22. #include "dataset/core/tensor.h"
  23. #include "dataset/core/cv_tensor.h"
  24. #include "dataset/core/data_type.h"
  25. #include "dataset/util/de_error.h"
  26. using namespace mindspore::dataset;
  27. namespace py = pybind11;
  28. class MindDataTestTensorDE : public UT::Common {
  29. public:
  30. MindDataTestTensorDE() {}
  31. void SetUp() {
  32. GlobalInit();
  33. }
  34. };
  35. TEST_F(MindDataTestTensorDE, Basics) {
  36. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_UINT64));
  37. ASSERT_EQ(t->shape(), TensorShape({2, 3}));
  38. ASSERT_EQ(t->type(), DataType::DE_UINT64);
  39. ASSERT_EQ(t->SizeInBytes(), 2 * 3 * 8);
  40. ASSERT_EQ(t->Rank(), 2);
  41. t->SetItemAt<uint64_t>({0, 0}, 1);
  42. t->SetItemAt<uint64_t>({0, 1}, 2);
  43. t->SetItemAt<uint64_t>({0, 2}, 3);
  44. t->SetItemAt<uint64_t>({1, 0}, 4);
  45. t->SetItemAt<uint64_t>({1, 1}, 5);
  46. t->SetItemAt<uint64_t>({1, 2}, 6);
  47. Status rc = t->SetItemAt<uint64_t>({2, 3}, 7);
  48. ASSERT_TRUE(rc.IsError());
  49. uint64_t o;
  50. t->GetItemAt<uint64_t>(&o, {0, 0});
  51. ASSERT_EQ(o, 1);
  52. t->GetItemAt<uint64_t>(&o, {0, 1});
  53. ASSERT_EQ(o, 2);
  54. t->GetItemAt<uint64_t>(&o, {0, 2});
  55. ASSERT_EQ(o, 3);
  56. t->GetItemAt<uint64_t>(&o, {1, 0});
  57. ASSERT_EQ(o, 4);
  58. t->GetItemAt<uint64_t>(&o, {1, 1});
  59. ASSERT_EQ(o, 5);
  60. t->GetItemAt<uint64_t>(&o, {1, 2});
  61. ASSERT_EQ(o, 6);
  62. rc = t->GetItemAt<uint64_t>(&o, {2, 3});
  63. ASSERT_TRUE(rc.IsError());
  64. ASSERT_EQ(t->ToString(), "Tensor (shape: <2,3>, Type: uint64)\n[[1,2,3],[4,5,6]]");
  65. std::vector<uint64_t> x = {1, 2, 3, 4, 5, 6};
  66. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_UINT64),
  67. reinterpret_cast<unsigned char *>(&x[0]));
  68. ASSERT_EQ(*t == *t2, true);
  69. ASSERT_EQ(*t != *t2, false);
  70. }
  71. TEST_F(MindDataTestTensorDE, Fill) {
  72. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_FLOAT32));
  73. t->Fill<float>(2.5);
  74. std::vector<float> x = {2.5, 2.5, 2.5, 2.5};
  75. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_FLOAT32),
  76. reinterpret_cast<unsigned char *>(&x[0]));
  77. ASSERT_EQ(*t == *t2, true);
  78. }
  79. TEST_F(MindDataTestTensorDE, Reshape) {
  80. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  81. t->Fill<uint8_t>(254);
  82. t->Reshape(TensorShape({4}));
  83. std::vector<uint8_t> x = {254, 254, 254, 254};
  84. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({4}), DataType(DataType::DE_UINT8),
  85. reinterpret_cast<unsigned char *>(&x[0]));
  86. ASSERT_EQ(*t == *t2, true);
  87. Status rc = t->Reshape(TensorShape({5}));
  88. ASSERT_TRUE(rc.IsError());
  89. t2->ExpandDim(0);
  90. ASSERT_EQ(t2->shape(), TensorShape({1, 4}));
  91. t2->ExpandDim(2);
  92. ASSERT_EQ(t2->shape(), TensorShape({1, 4, 1}));
  93. rc = t2->ExpandDim(4);
  94. ASSERT_TRUE(rc.IsError());
  95. }
  96. TEST_F(MindDataTestTensorDE, CopyTensor) {
  97. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({}), DataType(DataType::DE_INT16));
  98. t->SetItemAt<int16_t>({}, -66);
  99. ASSERT_EQ(t->shape(), TensorShape({}));
  100. ASSERT_EQ(t->type(), DataType::DE_INT16);
  101. int16_t o;
  102. t->GetItemAt<int16_t>(&o, {});
  103. ASSERT_EQ(o, -66);
  104. unsigned char *addr = t->StartAddr();
  105. auto t2 = std::make_shared<Tensor>(std::move(*t));
  106. ASSERT_EQ(t2->shape(), TensorShape({}));
  107. ASSERT_EQ(t2->type(), DataType::DE_INT16);
  108. t2->GetItemAt<int16_t>(&o, {});
  109. ASSERT_EQ(o, -66);
  110. unsigned char *new_addr = t2->StartAddr();
  111. ASSERT_EQ(addr, new_addr);
  112. ASSERT_EQ(t->shape(), TensorShape::CreateUnknownRankShape());
  113. ASSERT_EQ(t->type(), DataType::DE_UNKNOWN);
  114. ASSERT_EQ(t->StartAddr(), nullptr);
  115. Status rc = t->GetItemAt<int16_t>(&o, {});
  116. ASSERT_TRUE(rc.IsError());
  117. }
  118. TEST_F(MindDataTestTensorDE, InsertTensor) {
  119. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT64));
  120. std::vector<double> x = {1.1, 2.1, 3.1};
  121. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({3}), DataType(DataType::DE_FLOAT64),
  122. reinterpret_cast<unsigned char *>(&x[0]));
  123. std::vector<double> y = {1.2, 2.2, 3.2};
  124. std::shared_ptr<Tensor> t3 = std::make_shared<Tensor>(TensorShape({3}), DataType(DataType::DE_FLOAT64),
  125. reinterpret_cast<unsigned char *>(&y[0]));
  126. ASSERT_TRUE(t->InsertTensor({0}, t2).OK());
  127. ASSERT_TRUE(t->InsertTensor({1}, t3).OK());
  128. std::vector<double> z = {1.1, 2.1, 3.1, 1.2, 2.2, 3.2};
  129. std::shared_ptr<Tensor> t4 = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT64),
  130. reinterpret_cast<unsigned char *>(&z[0]));
  131. ASSERT_EQ(*t == *t4, true);
  132. std::vector<double> x2 = {0};
  133. std::shared_ptr<Tensor> t5 = std::make_shared<Tensor>(TensorShape({}), DataType(DataType::DE_FLOAT64),
  134. reinterpret_cast<unsigned char *>(&x2[0]));
  135. ASSERT_TRUE(t->InsertTensor({1, 2}, t5).OK());
  136. z[5] = 0;
  137. std::shared_ptr<Tensor> t6 = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT64),
  138. reinterpret_cast<unsigned char *>(&z[0]));
  139. ASSERT_EQ(*t == *t6, true);
  140. ASSERT_EQ(t->InsertTensor({2}, t5).get_code(), StatusCode::kUnexpectedError);
  141. ASSERT_EQ(t->InsertTensor({1}, t5).get_code(), StatusCode::kUnexpectedError);
  142. ASSERT_EQ(t->InsertTensor({1, 2}, t6).get_code(), StatusCode::kUnexpectedError);
  143. t6->Fill<double>(-1);
  144. ASSERT_TRUE(t->InsertTensor({}, t6).OK());
  145. ASSERT_EQ(*t == *t6, true);
  146. }
  147. // Test the bug of Tensor::ToString will exec failed for Tensor which store bool values
  148. TEST_F(MindDataTestTensorDE, BoolTensor) {
  149. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2}),
  150. DataType(DataType::DE_BOOL));
  151. t->SetItemAt<bool>({0}, true);
  152. t->SetItemAt<bool>({1}, true);
  153. std::string out = t->ToString();
  154. ASSERT_TRUE(out.find("Template type and Tensor type are not compatible") == std::string::npos);
  155. }
  156. TEST_F(MindDataTestTensorDE, GetItemAt) {
  157. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  158. t->Fill<uint8_t>(254);
  159. uint64_t o1;
  160. t->GetItemAt<uint64_t>(&o1, {0, 0});
  161. ASSERT_EQ(o1, 254);
  162. uint32_t o2;
  163. t->GetItemAt<uint32_t>(&o2, {0, 1});
  164. ASSERT_EQ(o2, 254);
  165. uint16_t o3;
  166. t->GetItemAt<uint16_t>(&o3, {1, 0});
  167. ASSERT_EQ(o3, 254);
  168. uint8_t o4;
  169. t->GetItemAt<uint8_t>(&o4, {1, 1});
  170. ASSERT_EQ(o4, 254);
  171. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_INT8));
  172. t2->Fill<int8_t>(-10);
  173. int64_t o5;
  174. t2->GetItemAt<int64_t>(&o5, {0, 0});
  175. ASSERT_EQ(o5, -10);
  176. int32_t o6;
  177. t2->GetItemAt<int32_t>(&o6, {0, 1});
  178. ASSERT_EQ(o6, -10);
  179. int16_t o7;
  180. t2->GetItemAt<int16_t>(&o7, {1, 0});
  181. ASSERT_EQ(o7, -10);
  182. int8_t o8;
  183. t2->GetItemAt<int8_t>(&o8, {1, 1});
  184. ASSERT_EQ(o8, -10);
  185. std::shared_ptr<Tensor> t3 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_FLOAT32));
  186. t3->Fill<float>(1.1);
  187. double o9;
  188. t3->GetItemAt<double>(&o9, {0, 0});
  189. ASSERT_FLOAT_EQ(o9, 1.1);
  190. float o10;
  191. t3->GetItemAt<float>(&o10, {0, 1});
  192. ASSERT_FLOAT_EQ(o10, 1.1);
  193. }
  194. TEST_F(MindDataTestTensorDE, OperatorAssign) {
  195. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  196. t->Fill<uint8_t>(1);
  197. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  198. *t2 = std::move(*t);
  199. uint8_t o;
  200. t2->GetItemAt(&o, {0, 0});
  201. ASSERT_EQ(o, 1);
  202. t2->GetItemAt(&o, {0, 1});
  203. ASSERT_EQ(o, 1);
  204. t2->GetItemAt(&o, {1, 0});
  205. ASSERT_EQ(o, 1);
  206. t2->GetItemAt(&o, {1, 1});
  207. ASSERT_EQ(o, 1);
  208. }
  209. TEST_F(MindDataTestTensorDE, Strides) {
  210. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({4, 2, 2}), DataType(DataType::DE_UINT8));
  211. std::vector<dsize_t> x1 = t->Strides();
  212. std::vector<dsize_t> x2 = {4, 2, 1};
  213. ASSERT_EQ(x1, x2);
  214. t = std::make_shared<Tensor>(TensorShape({4, 2, 2}), DataType(DataType::DE_UINT32));
  215. x1 = t->Strides();
  216. x2 = {16, 8, 4};
  217. ASSERT_EQ(x1, x2);
  218. }
  219. void checkCvMat(TensorShape shape, DataType type) {
  220. std::shared_ptr<CVTensor> t = std::make_shared<CVTensor>(shape, type);
  221. cv::Mat m = t->mat();
  222. ASSERT_EQ(m.data, t->StartAddr());
  223. ASSERT_EQ(static_cast<uchar>(m.type()) & static_cast<uchar>(CV_MAT_DEPTH_MASK), type.AsCVType());
  224. if (shape.Rank() < 4) {
  225. if (shape.Rank() > 1) {
  226. for (dsize_t i = 0; i < 2; i++)
  227. ASSERT_EQ(m.size[static_cast<int>(i)], shape[i]);
  228. } else if (shape.Rank() == 0) {
  229. ASSERT_EQ(m.size[0], 1);
  230. ASSERT_EQ(m.size[1], 1);
  231. } else {
  232. ASSERT_EQ(m.size[0], shape[0]);
  233. }
  234. if (shape.Rank() == 3) { ASSERT_EQ(m.channels(), shape[2]); }
  235. ASSERT_EQ(m.dims, 2);
  236. ASSERT_EQ(m.size.dims(), 2);
  237. if (shape.Rank() > 0) { ASSERT_EQ(m.rows, shape[0]); }
  238. if (shape.Rank() > 1) { ASSERT_EQ(m.cols, shape[1]); }
  239. } else {
  240. for (dsize_t i = 0; i < shape.Rank(); i++)
  241. ASSERT_EQ(m.size[static_cast<int>(i)], shape[i]);
  242. ASSERT_EQ(m.dims, shape.Rank());
  243. ASSERT_EQ(m.size.dims(), shape.Rank());
  244. ASSERT_EQ(m.rows, -1);
  245. ASSERT_EQ(m.cols, -1);
  246. }
  247. }
  248. TEST_F(MindDataTestTensorDE, CVTensorBasics) {
  249. checkCvMat(TensorShape({4, 5}), DataType(DataType::DE_UINT8));
  250. checkCvMat(TensorShape({4, 5, 3}), DataType(DataType::DE_UINT8));
  251. checkCvMat(TensorShape({4, 5, 10}), DataType(DataType::DE_UINT8));
  252. checkCvMat(TensorShape({4, 5, 3, 2}), DataType(DataType::DE_UINT8));
  253. checkCvMat(TensorShape({4}), DataType(DataType::DE_UINT8));
  254. checkCvMat(TensorShape({}), DataType(DataType::DE_INT16));
  255. checkCvMat(TensorShape({4, 5}), DataType(DataType::DE_INT16));
  256. checkCvMat(TensorShape({4, 5, 3}), DataType(DataType::DE_INT16));
  257. checkCvMat(TensorShape({4, 5, 10}), DataType(DataType::DE_INT16));
  258. checkCvMat(TensorShape({4, 5, 3, 2}), DataType(DataType::DE_INT16));
  259. checkCvMat(TensorShape({4}), DataType(DataType::DE_INT16));
  260. checkCvMat(TensorShape({}), DataType(DataType::DE_INT16));
  261. }
  262. TEST_F(MindDataTestTensorDE, CVTensorFromMat) {
  263. cv::Mat m(2, 2, CV_8U);
  264. m.at<uint8_t>(0, 0) = 10;
  265. m.at<uint8_t>(0, 1) = 20;
  266. m.at<uint8_t>(1, 0) = 30;
  267. m.at<uint8_t>(1, 1) = 40;
  268. std::shared_ptr<CVTensor> cvt = std::make_shared<CVTensor>(m);
  269. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  270. t->SetItemAt<uint8_t>({0, 0}, 10);
  271. t->SetItemAt<uint8_t>({0, 1}, 20);
  272. t->SetItemAt<uint8_t>({1, 0}, 30);
  273. t->SetItemAt<uint8_t>({1, 1}, 40);
  274. ASSERT_TRUE(*t == *cvt);
  275. int size[] = {4};
  276. cv::Mat m2(1, size, CV_8U);
  277. m2.at<uint8_t>(0) = 10;
  278. m2.at<uint8_t>(1) = 20;
  279. m2.at<uint8_t>(2) = 30;
  280. m2.at<uint8_t>(3) = 40;
  281. std::shared_ptr<CVTensor> cvt2 = std::make_shared<CVTensor>(m2);
  282. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({4}), DataType(DataType::DE_UINT8));
  283. t2->SetItemAt<uint8_t>({0}, 10);
  284. t2->SetItemAt<uint8_t>({1}, 20);
  285. t2->SetItemAt<uint8_t>({2}, 30);
  286. t2->SetItemAt<uint8_t>({3}, 40);
  287. t2->ExpandDim(1);
  288. ASSERT_TRUE(*t2 == *cvt2);
  289. }
  290. TEST_F(MindDataTestTensorDE, CVTensorAs) {
  291. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({3, 2}), DataType(DataType::DE_FLOAT64));
  292. t->Fill<double>(2.2);
  293. unsigned char *addr = t->StartAddr();
  294. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({3, 2}), DataType(DataType::DE_FLOAT64));
  295. t2->Fill<double>(4.4);
  296. std::shared_ptr<CVTensor> ctv = CVTensor::AsCVTensor(t);
  297. ASSERT_EQ(t->StartAddr(), nullptr);
  298. ASSERT_EQ(ctv->StartAddr(), addr);
  299. cv::Mat m = ctv->mat();
  300. m = 2 * m;
  301. ASSERT_EQ(ctv->StartAddr(), addr);
  302. ASSERT_TRUE(*t2 == *ctv);
  303. MS_LOG(DEBUG) << *t2 << std::endl << *ctv;
  304. }
  305. TEST_F(MindDataTestTensorDE, CVTensorMatSlice) {
  306. cv::Mat m(2, 3, CV_32S);
  307. m.at<int32_t>(0, 0) = 10;
  308. m.at<int32_t>(0, 1) = 20;
  309. m.at<int32_t>(0, 2) = 30;
  310. m.at<int32_t>(1, 0) = 40;
  311. m.at<int32_t>(1, 1) = 50;
  312. m.at<int32_t>(1, 2) = 60;
  313. std::shared_ptr<CVTensor> cvt = std::make_shared<CVTensor>(m);
  314. cv::Mat mat;
  315. cvt->Mat({1}, &mat);
  316. cv::Mat m2(3, 1, CV_32S);
  317. m2.at<int32_t>(0) = 40;
  318. m2.at<int32_t>(1) = 50;
  319. m2.at<int32_t>(2) = 60;
  320. std::shared_ptr<CVTensor> cvt2 = std::make_shared<CVTensor>(mat);
  321. std::shared_ptr<CVTensor> cvt3 = std::make_shared<CVTensor>(m2);
  322. ASSERT_TRUE(*cvt2 == *cvt3);
  323. cvt->Mat({0}, &mat);
  324. m2.at<int32_t>(0) = 10;
  325. m2.at<int32_t>(1) = 20;
  326. m2.at<int32_t>(2) = 30;
  327. cvt2 = std::make_shared<CVTensor>(mat);
  328. cvt3 = std::make_shared<CVTensor>(m2);
  329. ASSERT_TRUE(*cvt2 == *cvt3);
  330. }
  331. TEST_F(MindDataTestTensorDE, TensorIterator) {
  332. std::vector<uint32_t> values = {1, 2, 3, 4, 5, 6};
  333. std::vector<uint32_t> values2 = {2, 3, 4, 5, 6, 7};
  334. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({6}), DataType(DataType::DE_UINT32),
  335. reinterpret_cast<unsigned char *>(&values[0]));
  336. auto i = t->begin<uint32_t>();
  337. auto j = values.begin();
  338. uint32_t ctr = 0;
  339. for (; i != t->end<uint32_t>(); i++, j++) {
  340. ASSERT_TRUE(*i == *j);
  341. ctr++;
  342. }
  343. ASSERT_TRUE(ctr == 6);
  344. t->Reshape(TensorShape {2, 3});
  345. i = t->begin<uint32_t>();
  346. j = values.begin();
  347. ctr = 0;
  348. for (; i != t->end<uint32_t>(); i++, j++) {
  349. ASSERT_TRUE(*i == *j);
  350. ctr++;
  351. }
  352. ASSERT_TRUE(ctr == 6);
  353. for (auto it = t->begin<uint32_t>(); it != t->end<uint32_t>(); it++) {
  354. *it = *it + 1;
  355. }
  356. i = t->begin<uint32_t>();
  357. j = values2.begin();
  358. ctr = 0;
  359. for (; i != t->end<uint32_t>(); i++, j++) {
  360. ASSERT_TRUE(*i == *j);
  361. ctr++;
  362. }
  363. ASSERT_TRUE(ctr == 6);
  364. }