You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_test.cc 15 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <memory>
  17. #include <string>
  18. #include "minddata/dataset/core/client.h"
  19. #include "common/common.h"
  20. #include "gtest/gtest.h"
  21. #include "securec.h"
  22. #include "minddata/dataset/core/tensor.h"
  23. #include "minddata/dataset/core/cv_tensor.h"
  24. #include "minddata/dataset/core/data_type.h"
  25. using namespace mindspore::dataset;
  26. namespace py = pybind11;
  27. class MindDataTestTensorDE : public UT::Common {
  28. public:
  29. MindDataTestTensorDE() {}
  30. void SetUp() { GlobalInit(); }
  31. };
  32. TEST_F(MindDataTestTensorDE, Basics) {
  33. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_UINT64));
  34. ASSERT_TRUE((t->AllocateBuffer(t->SizeInBytes())).IsOk());
  35. ASSERT_EQ(t->shape(), TensorShape({2, 3}));
  36. ASSERT_EQ(t->type(), DataType::DE_UINT64);
  37. ASSERT_EQ(t->SizeInBytes(), 2 * 3 * 8);
  38. ASSERT_EQ(t->Rank(), 2);
  39. t->SetItemAt<uint64_t>({0, 0}, 1);
  40. t->SetItemAt<uint64_t>({0, 1}, 2);
  41. t->SetItemAt<uint64_t>({0, 2}, 3);
  42. t->SetItemAt<uint64_t>({1, 0}, 4);
  43. t->SetItemAt<uint64_t>({1, 1}, 5);
  44. t->SetItemAt<uint64_t>({1, 2}, 6);
  45. Status rc = t->SetItemAt<uint64_t>({2, 3}, 7);
  46. ASSERT_TRUE(rc.IsError());
  47. uint64_t o;
  48. t->GetItemAt<uint64_t>(&o, {0, 0});
  49. ASSERT_EQ(o, 1);
  50. t->GetItemAt<uint64_t>(&o, {0, 1});
  51. ASSERT_EQ(o, 2);
  52. t->GetItemAt<uint64_t>(&o, {0, 2});
  53. ASSERT_EQ(o, 3);
  54. t->GetItemAt<uint64_t>(&o, {1, 0});
  55. ASSERT_EQ(o, 4);
  56. t->GetItemAt<uint64_t>(&o, {1, 1});
  57. ASSERT_EQ(o, 5);
  58. t->GetItemAt<uint64_t>(&o, {1, 2});
  59. ASSERT_EQ(o, 6);
  60. rc = t->GetItemAt<uint64_t>(&o, {2, 3});
  61. ASSERT_TRUE(rc.IsError());
  62. ASSERT_EQ(t->ToString(), "Tensor (shape: <2,3>, Type: uint64)\n[[1,2,3],[4,5,6]]");
  63. std::vector<uint64_t> x = {1, 2, 3, 4, 5, 6};
  64. std::shared_ptr<Tensor> t2;
  65. Tensor::CreateTensor(&t2, x, TensorShape({2, 3}));
  66. ASSERT_EQ(*t == *t2, true);
  67. ASSERT_EQ(*t != *t2, false);
  68. }
  69. TEST_F(MindDataTestTensorDE, Fill) {
  70. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_FLOAT32));
  71. t->Fill<float>(2.5);
  72. std::vector<float> x = {2.5, 2.5, 2.5, 2.5};
  73. std::shared_ptr<Tensor> t2;
  74. Tensor::CreateTensor(&t2, x, TensorShape({2, 2}));
  75. ASSERT_EQ(*t == *t2, true);
  76. }
  77. TEST_F(MindDataTestTensorDE, Reshape) {
  78. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  79. t->Fill<uint8_t>(254);
  80. t->Reshape(TensorShape({4}));
  81. std::vector<uint8_t> x = {254, 254, 254, 254};
  82. std::shared_ptr<Tensor> t2;
  83. Tensor::CreateTensor(&t2, x);
  84. ASSERT_EQ(*t == *t2, true);
  85. Status rc = t->Reshape(TensorShape({5}));
  86. ASSERT_TRUE(rc.IsError());
  87. t2->ExpandDim(0);
  88. ASSERT_EQ(t2->shape(), TensorShape({1, 4}));
  89. t2->ExpandDim(2);
  90. ASSERT_EQ(t2->shape(), TensorShape({1, 4, 1}));
  91. rc = t2->ExpandDim(4);
  92. ASSERT_TRUE(rc.IsError());
  93. }
  94. TEST_F(MindDataTestTensorDE, CopyTensor) {
  95. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({}), DataType(DataType::DE_INT16));
  96. t->SetItemAt<int16_t>({}, -66);
  97. ASSERT_EQ(t->shape(), TensorShape({}));
  98. ASSERT_EQ(t->type(), DataType::DE_INT16);
  99. int16_t o;
  100. t->GetItemAt<int16_t>(&o, {});
  101. ASSERT_EQ(o, -66);
  102. const unsigned char *addr = t->GetBuffer();
  103. auto t2 = std::make_shared<Tensor>(std::move(*t));
  104. ASSERT_EQ(t2->shape(), TensorShape({}));
  105. ASSERT_EQ(t2->type(), DataType::DE_INT16);
  106. t2->GetItemAt<int16_t>(&o, {});
  107. ASSERT_EQ(o, -66);
  108. const unsigned char *new_addr = t2->GetBuffer();
  109. ASSERT_EQ(addr, new_addr);
  110. ASSERT_EQ(t->shape(), TensorShape::CreateUnknownRankShape());
  111. ASSERT_EQ(t->type(), DataType::DE_UNKNOWN);
  112. ASSERT_EQ(t->GetBuffer(), nullptr);
  113. Status rc = t->GetItemAt<int16_t>(&o, {});
  114. ASSERT_TRUE(rc.IsError());
  115. }
  116. TEST_F(MindDataTestTensorDE, InsertTensor) {
  117. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_FLOAT64));
  118. std::vector<double> x = {1.1, 2.1, 3.1};
  119. std::shared_ptr<Tensor> t2;
  120. Tensor::CreateTensor(&t2, x);
  121. std::vector<double> y = {1.2, 2.2, 3.2};
  122. std::shared_ptr<Tensor> t3;
  123. Tensor::CreateTensor(&t3, y);
  124. ASSERT_TRUE(t->InsertTensor({0}, t2).OK());
  125. ASSERT_TRUE(t->InsertTensor({1}, t3).OK());
  126. std::vector<double> z = {1.1, 2.1, 3.1, 1.2, 2.2, 3.2};
  127. std::shared_ptr<Tensor> t4;
  128. Tensor::CreateTensor(&t4, z, TensorShape({2, 3}));
  129. ASSERT_EQ(*t == *t4, true);
  130. std::shared_ptr<Tensor> t5;
  131. Tensor::CreateTensor<double>(&t5, 0);
  132. ASSERT_TRUE(t->InsertTensor({1, 2}, t5).OK());
  133. z[5] = 0;
  134. std::shared_ptr<Tensor> t6;
  135. Tensor::CreateTensor(&t6, z, TensorShape({2, 3}));
  136. ASSERT_EQ(*t == *t6, true);
  137. ASSERT_EQ(t->InsertTensor({2}, t5).get_code(), StatusCode::kUnexpectedError);
  138. ASSERT_EQ(t->InsertTensor({1}, t5).get_code(), StatusCode::kUnexpectedError);
  139. ASSERT_EQ(t->InsertTensor({1, 2}, t6).get_code(), StatusCode::kUnexpectedError);
  140. t6->Fill<double>(-1);
  141. ASSERT_TRUE(t->InsertTensor({}, t6).OK());
  142. ASSERT_EQ(*t == *t6, true);
  143. }
  144. // Test the bug of Tensor::ToString will exec failed for Tensor which store bool values
  145. TEST_F(MindDataTestTensorDE, BoolTensor) {
  146. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2}), DataType(DataType::DE_BOOL));
  147. t->SetItemAt<bool>({0}, true);
  148. t->SetItemAt<bool>({1}, true);
  149. std::string out = t->ToString();
  150. ASSERT_TRUE(out.find("Template type and Tensor type are not compatible") == std::string::npos);
  151. }
  152. TEST_F(MindDataTestTensorDE, GetItemAt) {
  153. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  154. t->Fill<uint8_t>(254);
  155. uint64_t o1;
  156. t->GetItemAt<uint64_t>(&o1, {0, 0});
  157. ASSERT_EQ(o1, 254);
  158. uint32_t o2;
  159. t->GetItemAt<uint32_t>(&o2, {0, 1});
  160. ASSERT_EQ(o2, 254);
  161. uint16_t o3;
  162. t->GetItemAt<uint16_t>(&o3, {1, 0});
  163. ASSERT_EQ(o3, 254);
  164. uint8_t o4;
  165. t->GetItemAt<uint8_t>(&o4, {1, 1});
  166. ASSERT_EQ(o4, 254);
  167. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_INT8));
  168. t2->Fill<int8_t>(-10);
  169. int64_t o5;
  170. t2->GetItemAt<int64_t>(&o5, {0, 0});
  171. ASSERT_EQ(o5, -10);
  172. int32_t o6;
  173. t2->GetItemAt<int32_t>(&o6, {0, 1});
  174. ASSERT_EQ(o6, -10);
  175. int16_t o7;
  176. t2->GetItemAt<int16_t>(&o7, {1, 0});
  177. ASSERT_EQ(o7, -10);
  178. int8_t o8;
  179. t2->GetItemAt<int8_t>(&o8, {1, 1});
  180. ASSERT_EQ(o8, -10);
  181. std::shared_ptr<Tensor> t3 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_FLOAT32));
  182. t3->Fill<float>(1.1);
  183. double o9;
  184. t3->GetItemAt<double>(&o9, {0, 0});
  185. ASSERT_FLOAT_EQ(o9, 1.1);
  186. float o10;
  187. t3->GetItemAt<float>(&o10, {0, 1});
  188. ASSERT_FLOAT_EQ(o10, 1.1);
  189. }
  190. TEST_F(MindDataTestTensorDE, OperatorAssign) {
  191. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  192. t->Fill<uint8_t>(1);
  193. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  194. *t2 = std::move(*t);
  195. uint8_t o;
  196. t2->GetItemAt(&o, {0, 0});
  197. ASSERT_EQ(o, 1);
  198. t2->GetItemAt(&o, {0, 1});
  199. ASSERT_EQ(o, 1);
  200. t2->GetItemAt(&o, {1, 0});
  201. ASSERT_EQ(o, 1);
  202. t2->GetItemAt(&o, {1, 1});
  203. ASSERT_EQ(o, 1);
  204. }
  205. TEST_F(MindDataTestTensorDE, Strides) {
  206. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({4, 2, 2}), DataType(DataType::DE_UINT8));
  207. std::vector<dsize_t> x1 = t->Strides();
  208. std::vector<dsize_t> x2 = {4, 2, 1};
  209. ASSERT_EQ(x1, x2);
  210. t = std::make_shared<Tensor>(TensorShape({4, 2, 2}), DataType(DataType::DE_UINT32));
  211. x1 = t->Strides();
  212. x2 = {16, 8, 4};
  213. ASSERT_EQ(x1, x2);
  214. }
  215. void checkCvMat(TensorShape shape, DataType type) {
  216. std::shared_ptr<CVTensor> t = std::make_shared<CVTensor>(shape, type);
  217. cv::Mat m = t->mat();
  218. ASSERT_EQ(m.data, t->GetBuffer());
  219. ASSERT_EQ(static_cast<uchar>(m.type()) & static_cast<uchar>(CV_MAT_DEPTH_MASK), type.AsCVType());
  220. if (shape.Rank() < 4) {
  221. if (shape.Rank() > 1) {
  222. for (dsize_t i = 0; i < 2; i++) ASSERT_EQ(m.size[static_cast<int>(i)], shape[i]);
  223. } else if (shape.Rank() == 0) {
  224. ASSERT_EQ(m.size[0], 1);
  225. ASSERT_EQ(m.size[1], 1);
  226. } else {
  227. ASSERT_EQ(m.size[0], shape[0]);
  228. }
  229. if (shape.Rank() == 3) {
  230. ASSERT_EQ(m.channels(), shape[2]);
  231. }
  232. ASSERT_EQ(m.dims, 2);
  233. ASSERT_EQ(m.size.dims(), 2);
  234. if (shape.Rank() > 0) {
  235. ASSERT_EQ(m.rows, shape[0]);
  236. }
  237. if (shape.Rank() > 1) {
  238. ASSERT_EQ(m.cols, shape[1]);
  239. }
  240. } else {
  241. for (dsize_t i = 0; i < shape.Rank(); i++) ASSERT_EQ(m.size[static_cast<int>(i)], shape[i]);
  242. ASSERT_EQ(m.dims, shape.Rank());
  243. ASSERT_EQ(m.size.dims(), shape.Rank());
  244. ASSERT_EQ(m.rows, -1);
  245. ASSERT_EQ(m.cols, -1);
  246. }
  247. }
  248. TEST_F(MindDataTestTensorDE, CVTensorBasics) {
  249. checkCvMat(TensorShape({4, 5}), DataType(DataType::DE_UINT8));
  250. checkCvMat(TensorShape({4, 5, 3}), DataType(DataType::DE_UINT8));
  251. checkCvMat(TensorShape({4, 5, 10}), DataType(DataType::DE_UINT8));
  252. checkCvMat(TensorShape({4, 5, 3, 2}), DataType(DataType::DE_UINT8));
  253. checkCvMat(TensorShape({4}), DataType(DataType::DE_UINT8));
  254. checkCvMat(TensorShape({}), DataType(DataType::DE_INT16));
  255. checkCvMat(TensorShape({4, 5}), DataType(DataType::DE_INT16));
  256. checkCvMat(TensorShape({4, 5, 3}), DataType(DataType::DE_INT16));
  257. checkCvMat(TensorShape({4, 5, 10}), DataType(DataType::DE_INT16));
  258. checkCvMat(TensorShape({4, 5, 3, 2}), DataType(DataType::DE_INT16));
  259. checkCvMat(TensorShape({4}), DataType(DataType::DE_INT16));
  260. checkCvMat(TensorShape({}), DataType(DataType::DE_INT16));
  261. }
  262. TEST_F(MindDataTestTensorDE, CVTensorFromMat) {
  263. cv::Mat m(2, 2, CV_8U);
  264. m.at<uint8_t>(0, 0) = 10;
  265. m.at<uint8_t>(0, 1) = 20;
  266. m.at<uint8_t>(1, 0) = 30;
  267. m.at<uint8_t>(1, 1) = 40;
  268. std::shared_ptr<CVTensor> cvt = std::make_shared<CVTensor>(m);
  269. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 2}), DataType(DataType::DE_UINT8));
  270. t->SetItemAt<uint8_t>({0, 0}, 10);
  271. t->SetItemAt<uint8_t>({0, 1}, 20);
  272. t->SetItemAt<uint8_t>({1, 0}, 30);
  273. t->SetItemAt<uint8_t>({1, 1}, 40);
  274. ASSERT_TRUE(*t == *cvt);
  275. int size[] = {4};
  276. cv::Mat m2(1, size, CV_8U);
  277. m2.at<uint8_t>(0) = 10;
  278. m2.at<uint8_t>(1) = 20;
  279. m2.at<uint8_t>(2) = 30;
  280. m2.at<uint8_t>(3) = 40;
  281. std::shared_ptr<CVTensor> cvt2 = std::make_shared<CVTensor>(m2);
  282. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({4}), DataType(DataType::DE_UINT8));
  283. t2->SetItemAt<uint8_t>({0}, 10);
  284. t2->SetItemAt<uint8_t>({1}, 20);
  285. t2->SetItemAt<uint8_t>({2}, 30);
  286. t2->SetItemAt<uint8_t>({3}, 40);
  287. t2->ExpandDim(1);
  288. ASSERT_TRUE(*t2 == *cvt2);
  289. }
  290. TEST_F(MindDataTestTensorDE, CVTensorAs) {
  291. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({3, 2}), DataType(DataType::DE_FLOAT64));
  292. t->Fill<double>(2.2);
  293. const unsigned char *addr = t->GetBuffer();
  294. std::shared_ptr<Tensor> t2 = std::make_shared<Tensor>(TensorShape({3, 2}), DataType(DataType::DE_FLOAT64));
  295. t2->Fill<double>(4.4);
  296. std::shared_ptr<CVTensor> ctv = CVTensor::AsCVTensor(t);
  297. ASSERT_EQ(t->GetBuffer(), nullptr);
  298. ASSERT_EQ(ctv->GetBuffer(), addr);
  299. cv::Mat m = ctv->mat();
  300. m = 2 * m;
  301. ASSERT_EQ(ctv->GetBuffer(), addr);
  302. ASSERT_TRUE(*t2 == *ctv);
  303. MS_LOG(DEBUG) << *t2 << std::endl << *ctv;
  304. }
  305. TEST_F(MindDataTestTensorDE, CVTensorMatSlice) {
  306. cv::Mat m(2, 3, CV_32S);
  307. m.at<int32_t>(0, 0) = 10;
  308. m.at<int32_t>(0, 1) = 20;
  309. m.at<int32_t>(0, 2) = 30;
  310. m.at<int32_t>(1, 0) = 40;
  311. m.at<int32_t>(1, 1) = 50;
  312. m.at<int32_t>(1, 2) = 60;
  313. std::shared_ptr<CVTensor> cvt = std::make_shared<CVTensor>(m);
  314. cv::Mat mat;
  315. cvt->Mat({1}, &mat);
  316. cv::Mat m2(3, 1, CV_32S);
  317. m2.at<int32_t>(0) = 40;
  318. m2.at<int32_t>(1) = 50;
  319. m2.at<int32_t>(2) = 60;
  320. std::shared_ptr<CVTensor> cvt2 = std::make_shared<CVTensor>(mat);
  321. std::shared_ptr<CVTensor> cvt3 = std::make_shared<CVTensor>(m2);
  322. ASSERT_TRUE(*cvt2 == *cvt3);
  323. cvt->Mat({0}, &mat);
  324. m2.at<int32_t>(0) = 10;
  325. m2.at<int32_t>(1) = 20;
  326. m2.at<int32_t>(2) = 30;
  327. cvt2 = std::make_shared<CVTensor>(mat);
  328. cvt3 = std::make_shared<CVTensor>(m2);
  329. ASSERT_TRUE(*cvt2 == *cvt3);
  330. }
  331. TEST_F(MindDataTestTensorDE, TensorIterator) {
  332. std::vector<uint32_t> values = {1, 2, 3, 4, 5, 6};
  333. std::vector<uint32_t> values2 = {2, 3, 4, 5, 6, 7};
  334. std::shared_ptr<Tensor> t;
  335. Tensor::CreateTensor(&t, values);
  336. auto i = t->begin<uint32_t>();
  337. auto j = values.begin();
  338. uint32_t ctr = 0;
  339. for (; i != t->end<uint32_t>(); i++, j++) {
  340. ASSERT_TRUE(*i == *j);
  341. ctr++;
  342. }
  343. ASSERT_TRUE(ctr == 6);
  344. t->Reshape(TensorShape{2, 3});
  345. i = t->begin<uint32_t>();
  346. j = values.begin();
  347. ctr = 0;
  348. for (; i != t->end<uint32_t>(); i++, j++) {
  349. ASSERT_TRUE(*i == *j);
  350. ctr++;
  351. }
  352. ASSERT_TRUE(ctr == 6);
  353. for (auto it = t->begin<uint32_t>(); it != t->end<uint32_t>(); it++) {
  354. *it = *it + 1;
  355. }
  356. i = t->begin<uint32_t>();
  357. j = values2.begin();
  358. ctr = 0;
  359. for (; i != t->end<uint32_t>(); i++, j++) {
  360. ASSERT_TRUE(*i == *j);
  361. ctr++;
  362. }
  363. ASSERT_TRUE(ctr == 6);
  364. }
  365. TEST_F(MindDataTestTensorDE, TensorSlice) {
  366. std::shared_ptr<Tensor> t;
  367. Tensor::CreateTensor(&t, std::vector<dsize_t>{0, 1, 2, 3, 4});
  368. std::shared_ptr<Tensor> t2;
  369. auto x = std::vector<dsize_t>{0, 3, 4};
  370. std::shared_ptr<Tensor> expected;
  371. Tensor::CreateTensor(&expected, x);
  372. t->Slice(&t2, x);
  373. ASSERT_EQ(*t2, *expected);
  374. t->Slice(&t2, std::vector<dsize_t>{0, 1, 2, 3, 4});
  375. ASSERT_EQ(*t2, *t);
  376. }
  377. TEST_F(MindDataTestTensorDE, TensorConcatenate) {
  378. std::vector<uint32_t> values1 = {1, 2, 3, 0, 0, 0};
  379. std::vector<uint32_t> values2 = {4, 5, 6};
  380. std::vector<uint32_t> expected = {1, 2, 3, 4, 5, 6};
  381. std::shared_ptr<Tensor> t1;
  382. Tensor::CreateTensor(&t1, values1);
  383. std::shared_ptr<Tensor> t2;
  384. Tensor::CreateTensor(&t2, values2);
  385. std::shared_ptr<Tensor> out;
  386. Tensor::CreateTensor(&out, expected);
  387. Status s = t1->Concatenate({3}, t2);
  388. EXPECT_TRUE(s.IsOk());
  389. auto i = out->begin<uint32_t>();
  390. auto j = t1->begin<uint32_t>();
  391. for (; i != out->end<uint32_t>(); i++, j++) {
  392. ASSERT_TRUE(*i == *j);
  393. }
  394. // should fail if the concatenated vector is too large
  395. s = t1->Concatenate({5}, t2);
  396. EXPECT_FALSE(s.IsOk());
  397. }
  398. TEST_F(MindDataTestTensorDE, TensorEmpty) {
  399. std::shared_ptr<Tensor> t = std::make_shared<Tensor>(TensorShape({2, 3}), DataType(DataType::DE_UINT64));
  400. ASSERT_TRUE(t->HasData());
  401. }
  402. TEST_F(MindDataTestTensorDE, TensorEmptyInvalidate) {
  403. std::vector<uint32_t> values1 = {1, 2, 3, 0, 0, 0};
  404. std::shared_ptr<Tensor> t;
  405. Tensor::CreateTensor(&t, values1);
  406. t->Invalidate();
  407. ASSERT_TRUE(t->HasData());
  408. }