You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_util.cc 28 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /**
  2. * Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "tools/common/graph_util.h"
  17. #include <algorithm>
  18. #include <functional>
  19. #include <ctime>
  20. #include <utility>
  21. #include <set>
  22. #include "schema/inner/model_generated.h"
  23. #include "tools/common/tensor_util.h"
  24. #include "tools/converter/quantizer/bitpacking.h"
  25. #include "tools/common/node_util.h"
  26. #include "src/common/log_adapter.h"
  27. #include "src/common/utils.h"
  28. namespace mindspore {
  29. namespace lite {
  30. OpDefCopyer GetSimpleOpCopyer() {
  31. return [](CNodeT *inCNode) -> std::unique_ptr<CNodeT> {
  32. std::unique_ptr<CNodeT> newCNode = std::make_unique<CNodeT>();
  33. if (newCNode == nullptr) {
  34. return nullptr;
  35. }
  36. newCNode->name = inCNode->name;
  37. newCNode->quantType = inCNode->quantType;
  38. newCNode->primitive = std::make_unique<schema::PrimitiveT>();
  39. newCNode->primitive->value.type = inCNode->primitive->value.type;
  40. return newCNode;
  41. };
  42. }
  43. std::vector<size_t> GetInputNodeIdx(const schema::MetaGraphT &graphT, const size_t &nodeIdx, const int inputIndexIdx) {
  44. return GetInputNodeIdx(graphT, *(graphT.nodes.at(nodeIdx).get()), inputIndexIdx);
  45. }
  46. std::vector<size_t> GetInputNodeIdx(const schema::MetaGraphT &graphT, const CNodeT &node, const int inputIndexIdx) {
  47. std::vector<uint32_t> inputIndexes;
  48. if (inputIndexIdx == -1) {
  49. inputIndexes = node.inputIndex;
  50. } else {
  51. MS_ASSERT(node.inputIndex.size() > inputIndexIdx);
  52. inputIndexes.emplace_back(node.inputIndex.at(inputIndexIdx));
  53. }
  54. std::set<size_t> inputNodeIdx;
  55. for (uint32_t inputIdx : inputIndexes) {
  56. auto linkedPreIdx = GetLinkedPreIdx(graphT, inputIdx);
  57. inputNodeIdx.insert(linkedPreIdx.begin(), linkedPreIdx.end());
  58. }
  59. std::vector<size_t> ret;
  60. ret.insert(ret.end(), inputNodeIdx.begin(), inputNodeIdx.end());
  61. return ret;
  62. }
  63. std::vector<size_t> GetOutputNodeIdx(const schema::MetaGraphT &graphT, const size_t &nodeIdx,
  64. const int outputIndexIdx) {
  65. return GetOutputNodeIdx(graphT, *(graphT.nodes.at(nodeIdx).get()), outputIndexIdx);
  66. }
  67. std::vector<size_t> GetOutputNodeIdx(const schema::MetaGraphT &graphT, const CNodeT &node, const int outputIndexIdx) {
  68. std::vector<uint32_t> outputIndexes;
  69. if (outputIndexIdx == -1) {
  70. outputIndexes = node.outputIndex;
  71. } else {
  72. MS_ASSERT(node.outputIndex.size() > outputIndexIdx);
  73. outputIndexes.emplace_back(node.outputIndex.at(outputIndexIdx));
  74. }
  75. std::set<size_t> outputNodeIdx;
  76. for (uint32_t outputIdx : outputIndexes) {
  77. auto linkedPostIdx = GetLinkedPostIdx(graphT, outputIdx);
  78. outputNodeIdx.insert(linkedPostIdx.begin(), linkedPostIdx.end());
  79. }
  80. std::vector<size_t> ret;
  81. ret.insert(ret.end(), outputNodeIdx.begin(), outputNodeIdx.end());
  82. return ret;
  83. }
  84. std::vector<size_t> GetLinkedPreIdx(const schema::MetaGraphT &graphT, const size_t &tensorIdx) {
  85. std::vector<size_t> preNodeIdx;
  86. for (size_t i = 0; i < graphT.nodes.size(); i++) {
  87. auto &oldNode = graphT.nodes.at(i);
  88. if (oldNode == nullptr) {
  89. continue;
  90. }
  91. auto outputIndexes = oldNode->outputIndex;
  92. if (IsContain<uint32_t>(outputIndexes, tensorIdx)) {
  93. preNodeIdx.emplace_back(i);
  94. }
  95. }
  96. return preNodeIdx;
  97. }
  98. std::vector<size_t> GetLinkedPostIdx(const schema::MetaGraphT &graphT, const size_t &tensorIdx) {
  99. std::vector<size_t> postNodeIdx;
  100. for (size_t i = 0; i < graphT.nodes.size(); i++) {
  101. auto &oldNode = graphT.nodes.at(i);
  102. if (oldNode == nullptr) {
  103. continue;
  104. }
  105. auto inputIndexes = oldNode->inputIndex;
  106. if (IsContain<uint32_t>(inputIndexes, tensorIdx)) {
  107. postNodeIdx.emplace_back(i);
  108. }
  109. }
  110. return postNodeIdx;
  111. }
  112. STATUS IsolateNode(schema::MetaGraphT *graphT, CNodeT *node) {
  113. MS_ASSERT(graphT != nullptr);
  114. MS_ASSERT(node != nullptr);
  115. size_t nodeIdx = 0;
  116. for (size_t i = 0; i < graphT->nodes.size(); i++) {
  117. auto &inNode = graphT->nodes.at(i);
  118. MS_ASSERT(inNode != nullptr);
  119. if (inNode->name == node->name) {
  120. nodeIdx = i;
  121. break;
  122. }
  123. }
  124. auto inputTensorIdxes = node->inputIndex;
  125. auto outputTensorIdxes = node->outputIndex;
  126. if (inputTensorIdxes.empty()) {
  127. MS_LOG(ERROR) << "Node " << node->name.c_str() << "should has no inputs";
  128. return RET_ERROR;
  129. }
  130. if (outputTensorIdxes.size() != 1) {
  131. MS_LOG(ERROR) << "FakeQuantNode " << node->name.c_str()
  132. << "should has 1 output, in fact: " << outputTensorIdxes.size();
  133. return RET_ERROR;
  134. }
  135. auto inDataTensorIdx = inputTensorIdxes.front();
  136. auto outDataTensorIdx = outputTensorIdxes.front();
  137. MS_ASSERT(graphT->allTensors.size() > inDataTensorIdx);
  138. auto &gOutTensorIdx = graphT->outputIndex;
  139. for (auto iter = gOutTensorIdx.begin(); iter != gOutTensorIdx.end(); iter++) {
  140. if (*iter == outDataTensorIdx) {
  141. *iter = inDataTensorIdx;
  142. break;
  143. }
  144. }
  145. // find poseNode
  146. auto postNodeIdxes = GetOutputNodeIdx(*graphT, nodeIdx, 0);
  147. for (auto postNodeIdx : postNodeIdxes) {
  148. MS_ASSERT(graphT->nodes.size() > postNodeIdx);
  149. auto &postNode = graphT->nodes.at(postNodeIdx);
  150. MS_ASSERT(postNode != nullptr);
  151. for (auto iter = postNode->inputIndex.begin(); iter != postNode->inputIndex.end(); iter++) {
  152. if (*iter == outDataTensorIdx) {
  153. *iter = inDataTensorIdx;
  154. break;
  155. }
  156. }
  157. }
  158. RemoveTensor(graphT, outputTensorIdxes);
  159. node->inputIndex.clear();
  160. node->outputIndex.clear();
  161. return RET_OK;
  162. }
  163. STATUS IsolateOneWayNode(schema::MetaGraphT *graph, size_t subGraphIdx, size_t nodeIdx, bool removeTensor) {
  164. MS_ASSERT(graph != nullptr);
  165. return IsolateOneWayNode(graph, nodeIdx, removeTensor);
  166. }
  167. STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, size_t nodeIdx, bool removeTensor) {
  168. MS_ASSERT(graphT != nullptr);
  169. if (graphT->nodes.size() <= nodeIdx) {
  170. MS_LOG(ERROR) << "nodeIdx out of range: " << nodeIdx;
  171. return RET_PARAM_INVALID;
  172. }
  173. CNodeT *node = graphT->nodes.at(nodeIdx).get();
  174. if (node == nullptr) {
  175. MS_LOG(ERROR) << "node is null";
  176. return RET_NULL_PTR;
  177. }
  178. auto inputTensorIdxes = node->inputIndex;
  179. auto outputTensorIdxes = node->outputIndex;
  180. auto preNodeIdxes = GetInputNodeIdx(*graphT, nodeIdx);
  181. if (preNodeIdxes.size() > 1 || outputTensorIdxes.size() > 1) {
  182. MS_LOG(ERROR) << "Only support node who has no more than one input and one output";
  183. return RET_ERROR;
  184. }
  185. if (inputTensorIdxes.empty()) {
  186. MS_LOG(ERROR) << "Error, " << nodeIdx << "th node has no input tensor";
  187. return RET_ERROR;
  188. }
  189. auto inDataTensorIdx = inputTensorIdxes.front();
  190. if (!outputTensorIdxes.empty()) {
  191. auto outDataTensorIdx = outputTensorIdxes.front();
  192. MS_ASSERT(graphT->allTensors.size() > inDataTensorIdx);
  193. MS_ASSERT(graphT->allTensors.at(inDataTensorIdx) != nullptr);
  194. auto &gOutTensorIdx = graphT->outputIndex;
  195. for (auto iter = gOutTensorIdx.begin(); iter != gOutTensorIdx.end(); iter++) {
  196. if (*iter == outDataTensorIdx) {
  197. *iter = inDataTensorIdx;
  198. break;
  199. }
  200. }
  201. // find poseNode
  202. auto postNodeIdxes = GetOutputNodeIdx(*graphT, nodeIdx, 0);
  203. for (auto postNodeIdx : postNodeIdxes) {
  204. MS_ASSERT(graphT->nodes.size() > postNodeIdx);
  205. auto &postNode = graphT->nodes.at(postNodeIdx);
  206. MS_ASSERT(postNode != nullptr);
  207. for (auto iter = postNode->inputIndex.begin(); iter != postNode->inputIndex.end(); iter++) {
  208. if (*iter == outDataTensorIdx) {
  209. *iter = inDataTensorIdx;
  210. break;
  211. }
  212. }
  213. }
  214. }
  215. if (removeTensor) {
  216. // now all node's outputTensors are useless
  217. // remove all node's outputTensors
  218. auto status = RemoveTensor(graphT, outputTensorIdxes);
  219. if (status != RET_OK) {
  220. MS_LOG(ERROR) << "RemoveOutputTensors of node " << node->name.c_str() << "failed";
  221. return RET_ERROR;
  222. }
  223. }
  224. node->inputIndex.clear();
  225. node->outputIndex.clear();
  226. return RET_OK;
  227. }
  228. STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, CNodeT *node, bool removeTensor) {
  229. MS_ASSERT(graphT != nullptr);
  230. MS_ASSERT(node != nullptr);
  231. bool isSubNode = false;
  232. size_t nodeIdx = 0;
  233. for (size_t i = 0; i < graphT->nodes.size(); i++) {
  234. auto &inNode = graphT->nodes.at(i);
  235. MS_ASSERT(inNode != nullptr);
  236. if (inNode->name == node->name) {
  237. isSubNode = true;
  238. nodeIdx = i;
  239. break;
  240. }
  241. }
  242. if (!isSubNode) {
  243. MS_LOG(ERROR) << "Node " << node->name.c_str() << "is not in graphT " << graphT->name.c_str();
  244. return RET_PARAM_INVALID;
  245. } else {
  246. return IsolateOneWayNode(graphT, nodeIdx, removeTensor);
  247. }
  248. }
  249. STATUS RemoveTensor(schema::MetaGraphT *graphT, std::vector<uint32_t> toDeleteTensorIdxes, bool forceDelete) {
  250. MS_ASSERT(graphT != nullptr);
  251. for (auto iter = toDeleteTensorIdxes.begin(); iter != toDeleteTensorIdxes.end();) {
  252. uint32_t deleteIdx = *iter;
  253. if (!forceDelete) {
  254. if (GetRefCount(graphT, deleteIdx) > 1) {
  255. iter++;
  256. continue;
  257. }
  258. }
  259. // update graph input indices
  260. for (auto gInIdx = graphT->inputIndex.begin(); gInIdx != graphT->inputIndex.end(); gInIdx++) {
  261. if (*gInIdx > deleteIdx) {
  262. (*gInIdx)--;
  263. }
  264. }
  265. // update graph output indices
  266. for (auto gOutIdx = graphT->outputIndex.begin(); gOutIdx != graphT->outputIndex.end(); gOutIdx++) {
  267. if (*gOutIdx > deleteIdx) {
  268. (*gOutIdx)--;
  269. }
  270. }
  271. for (auto &subgraph : graphT->subGraph) {
  272. // update subgraph input indices
  273. for (auto gInIdx = subgraph->inputIndices.begin(); gInIdx != subgraph->inputIndices.end(); gInIdx++) {
  274. if (*gInIdx > deleteIdx) {
  275. (*gInIdx)--;
  276. }
  277. }
  278. // update subgraph output indices
  279. for (auto gOutIdx = subgraph->outputIndices.begin(); gOutIdx != subgraph->outputIndices.end(); gOutIdx++) {
  280. if (*gOutIdx > deleteIdx) {
  281. (*gOutIdx)--;
  282. }
  283. }
  284. // update subgraph output indices
  285. for (auto idx = subgraph->tensorIndices.begin(); idx != subgraph->tensorIndices.end(); idx++) {
  286. if (*idx > deleteIdx) {
  287. (*idx)--;
  288. }
  289. }
  290. }
  291. // update nodes indexes
  292. for (auto node_iter = graphT->nodes.begin(); node_iter != graphT->nodes.end(); node_iter++) {
  293. // update nodes input indexes
  294. UpdateNodeIndex((*node_iter).get(), deleteIdx);
  295. }
  296. // update deleteTensorIdx
  297. for (auto selfIt = toDeleteTensorIdxes.begin(); selfIt != toDeleteTensorIdxes.end(); selfIt++) {
  298. if (*selfIt > deleteIdx) {
  299. (*selfIt)--;
  300. }
  301. }
  302. graphT->allTensors.erase(graphT->allTensors.begin() + deleteIdx);
  303. iter = toDeleteTensorIdxes.erase(iter);
  304. }
  305. return RET_OK;
  306. }
  307. STATUS UpdateNodeIndex(CNodeT *node, uint32_t deleteIdx) {
  308. MS_ASSERT(node != nullptr);
  309. for (auto inIdxIt = node->inputIndex.begin(); inIdxIt != node->inputIndex.end();) {
  310. if (*inIdxIt == deleteIdx) {
  311. inIdxIt = node->inputIndex.erase(inIdxIt);
  312. } else {
  313. if (*inIdxIt > deleteIdx) {
  314. (*inIdxIt)--;
  315. }
  316. inIdxIt++;
  317. }
  318. }
  319. // update nodes output indexes
  320. for (auto outIdxIt = node->outputIndex.begin(); outIdxIt != node->outputIndex.end();) {
  321. if (*outIdxIt == deleteIdx) {
  322. outIdxIt = node->outputIndex.erase(outIdxIt);
  323. } else {
  324. if (*outIdxIt > deleteIdx) {
  325. (*outIdxIt)--;
  326. }
  327. outIdxIt++;
  328. }
  329. }
  330. return RET_OK;
  331. }
  332. STATUS AddTensor2Node(schema::MetaGraphT *graphT, uint32_t nodeIdx, std::unique_ptr<TensorT> tensor,
  333. InsertPlace place) {
  334. if (nodeIdx >= graphT->nodes.size()) {
  335. MS_LOG(ERROR) << "nodeIdx out of range: " << nodeIdx;
  336. return RET_PARAM_INVALID;
  337. }
  338. graphT->allTensors.emplace_back(std::move(tensor));
  339. uint32_t newTensorIdx = graphT->allTensors.size() - 1;
  340. auto node = graphT->nodes.at(nodeIdx).get();
  341. MS_ASSERT(node != nullptr);
  342. if (place == kBefore) {
  343. node->inputIndex.emplace_back(newTensorIdx);
  344. } else {
  345. node->outputIndex.emplace_back(newTensorIdx);
  346. }
  347. return RET_OK;
  348. }
  349. STATUS ReplaceTensorOfNode(schema::MetaGraphT *graphT, uint32_t nodeIdx, uint32_t inTensorIdx,
  350. std::unique_ptr<TensorT> tensor) {
  351. MS_ASSERT(graphT != nullptr);
  352. if (nodeIdx >= graphT->nodes.size()) {
  353. MS_LOG(ERROR) << "nodeIdx out of range: " << nodeIdx;
  354. return RET_PARAM_INVALID;
  355. }
  356. auto node = graphT->nodes.at(nodeIdx).get();
  357. MS_ASSERT(node != nullptr);
  358. if (inTensorIdx >= graphT->allTensors.size()) {
  359. MS_LOG(ERROR) << "inTensorIdx out of range: " << nodeIdx;
  360. return RET_PARAM_INVALID;
  361. }
  362. if (!IsContain(node->inputIndex, inTensorIdx)) {
  363. MS_LOG(ERROR) << "inTensorIdx(" << inTensorIdx << ") is not a inputIdx of node(" << nodeIdx << ")";
  364. return RET_PARAM_INVALID;
  365. }
  366. graphT->allTensors.at(inTensorIdx).swap(tensor);
  367. return RET_OK;
  368. }
  369. int DoBitPack(const int &bit_num, schema::TensorT *tensor_input) {
  370. if (bit_num > 0 && bit_num < 8) {
  371. std::vector<int8_t> origin_data(tensor_input->data.size());
  372. auto status = memcpy_s(origin_data.data(), origin_data.size() * sizeof(int8_t), tensor_input->data.data(),
  373. tensor_input->data.size() * sizeof(uint8_t));
  374. if (status != EOK) {
  375. MS_LOG(ERROR) << "memcpy failed. " << status;
  376. return RET_ERROR;
  377. }
  378. std::vector<uint8_t> pack_data{};
  379. BitPack::BitPacking<int8_t, uint8_t>(bit_num, origin_data, &pack_data);
  380. tensor_input->data.resize(pack_data.size() * sizeof(uint8_t));
  381. status = memcpy_s(tensor_input->data.data(), tensor_input->data.size() * sizeof(uint8_t), pack_data.data(),
  382. pack_data.size() * sizeof(uint8_t));
  383. if (status != EOK) {
  384. MS_LOG(ERROR) << "memcpy_s failed. " << status;
  385. return RET_ERROR;
  386. }
  387. } else if (bit_num > 8 && bit_num < 16) {
  388. auto shape_size =
  389. std::accumulate(tensor_input->dims.begin(), tensor_input->dims.end(), size_t(1), std::multiplies<size_t>());
  390. std::vector<int16_t> origin_data(shape_size);
  391. auto status = memcpy_s(origin_data.data(), origin_data.size() * sizeof(int16_t), tensor_input->data.data(),
  392. tensor_input->data.size() * sizeof(uint8_t));
  393. if (status != EOK) {
  394. MS_LOG(ERROR) << "memcpy failed. " << status;
  395. return RET_ERROR;
  396. }
  397. std::vector<uint16_t> pack_data{};
  398. BitPack::BitPacking<int16_t, uint16_t>(bit_num, origin_data, &pack_data);
  399. tensor_input->data.resize(pack_data.size() * sizeof(uint16_t));
  400. status = memcpy_s(tensor_input->data.data(), tensor_input->data.size() * sizeof(uint8_t), pack_data.data(),
  401. pack_data.size() * sizeof(uint16_t));
  402. if (status != EOK) {
  403. MS_LOG(ERROR) << "memcpy_s failed. " << status;
  404. return RET_ERROR;
  405. }
  406. }
  407. return RET_OK;
  408. }
  409. NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPlace place, size_t inoutIndex,
  410. std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, int *insert_num,
  411. const OpDefCopyer &opDefCopyer) {
  412. MS_ASSERT(graphT != nullptr);
  413. MS_ASSERT(errorCode != nullptr);
  414. if (existNodeIdx >= graphT->nodes.size()) {
  415. MS_LOG(ERROR) << "nodeIdx out of range: " << existNodeIdx;
  416. return graphT->nodes.end();
  417. }
  418. auto node_iter = graphT->nodes.begin() + existNodeIdx;
  419. MS_ASSERT(node_iter != graphT->nodes.begin());
  420. MS_ASSERT((*node_iter) != nullptr);
  421. return InsertNode(graphT, node_iter, place, inoutIndex, std::move(toAddNode), errorCode, insert_num);
  422. }
  423. NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPlace place, size_t inoutIndexIdx,
  424. std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, int *insert_num,
  425. const OpDefCopyer &opDefCopyer) {
  426. MS_ASSERT(graphT != nullptr);
  427. MS_ASSERT(errorCode != nullptr);
  428. if (place == kBefore) {
  429. return InsertNodeBefore(graphT, existNodeIter, inoutIndexIdx, std::move(toAddNode), errorCode, insert_num,
  430. opDefCopyer);
  431. } else if (place == kAfter) {
  432. return InsertNodeAfter(graphT, existNodeIter, inoutIndexIdx, std::move(toAddNode), errorCode, insert_num,
  433. opDefCopyer);
  434. } else {
  435. MS_LOG(ERROR) << "Invalid InsertPlace : " << place;
  436. return graphT->nodes.end();
  437. }
  438. }
  439. NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t inputIndexIdx,
  440. std::unique_ptr<CNodeT> toAddNodeIn, STATUS *errorCode, int *insert_num,
  441. const OpDefCopyer &opDefCopyer) {
  442. MS_ASSERT(graphT != nullptr);
  443. MS_ASSERT(errorCode != nullptr);
  444. auto &existNode = *existNodeIter;
  445. MS_ASSERT(existNode != nullptr);
  446. MS_ASSERT(existNode->inputIndex.size() > inputIndexIdx);
  447. MS_ASSERT(toAddNodeIn != nullptr);
  448. auto preTensorIdx = existNode->inputIndex.at(inputIndexIdx);
  449. MS_ASSERT(graphT->allTensors.size() > preTensorIdx);
  450. auto preNodeIdxes = GetInputNodeIdx(*graphT, *(existNode), inputIndexIdx);
  451. size_t insert_node_num = preNodeIdxes.empty() ? 1 : preNodeIdxes.size();
  452. std::vector<std::unique_ptr<CNodeT>> toAddNodes;
  453. for (size_t i = 0; i < insert_node_num; ++i) {
  454. auto &preTensor = graphT->allTensors.at(preTensorIdx);
  455. MS_ASSERT(preTensor != nullptr);
  456. auto toAddTensor = CopyTensorDefT(preTensor);
  457. if (toAddTensor == nullptr) {
  458. *errorCode = RET_NULL_PTR;
  459. MS_LOG(ERROR) << "Copy Tensor failed";
  460. return graphT->nodes.end();
  461. }
  462. toAddTensor->nodeType = NodeType_CNode;
  463. toAddTensor->refCount = 0;
  464. toAddTensor->data.clear();
  465. MS_ASSERT(toAddNodeIn->primitive != nullptr);
  466. if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) {
  467. auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast();
  468. MS_ASSERT(prim != nullptr);
  469. if (prim->src_t == TypeId::kNumberTypeUInt8) {
  470. if (preTensor->dataType == TypeId::kNumberTypeUInt8) {
  471. toAddTensor->quantParams.front()->zeroPoint -= 128;
  472. } else {
  473. preTensor->quantParams.front()->zeroPoint += 128;
  474. }
  475. } else if (prim->dst_t == TypeId::kNumberTypeUInt8) {
  476. if (preTensor->dataType == TypeId::kNumberTypeInt8) {
  477. toAddTensor->quantParams.front()->zeroPoint += 128;
  478. } else {
  479. preTensor->quantParams.front()->zeroPoint -= 128;
  480. }
  481. }
  482. preTensor->dataType = prim->src_t;
  483. toAddTensor->dataType = prim->dst_t;
  484. }
  485. graphT->allTensors.emplace_back(std::move(toAddTensor));
  486. size_t toAddTensorIdx = graphT->allTensors.size() - 1;
  487. auto toAddNode = opDefCopyer(toAddNodeIn.get());
  488. if (toAddNode == nullptr) {
  489. MS_LOG(ERROR) << "copy toAddNodeIn failed";
  490. *errorCode = RET_NULL_PTR;
  491. return graphT->nodes.end();
  492. }
  493. if (!preNodeIdxes.empty()) {
  494. toAddNode->name = toAddNodeIn->name + "_" + std::to_string(i);
  495. }
  496. toAddNode->inputIndex.clear();
  497. toAddNode->inputIndex.push_back(preTensorIdx);
  498. toAddNode->outputIndex.clear();
  499. toAddNode->outputIndex.push_back(toAddTensorIdx);
  500. for (auto iter = existNode->inputIndex.begin(); iter != existNode->inputIndex.end(); iter++) {
  501. if (*iter == preTensorIdx) {
  502. *iter = toAddTensorIdx;
  503. break;
  504. }
  505. }
  506. toAddNodes.emplace_back(std::move(toAddNode));
  507. }
  508. for (auto &toAddNode : toAddNodes) {
  509. existNodeIter = graphT->nodes.insert(existNodeIter, std::move(toAddNode));
  510. existNodeIter++;
  511. *insert_num += 1;
  512. }
  513. *errorCode = RET_OK;
  514. return existNodeIter;
  515. }
  516. NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t outputIndexIdx,
  517. std::unique_ptr<schema::CNodeT> toAddNodeIn, STATUS *errorCode, int *insert_num,
  518. const OpDefCopyer &opDefCopyer) {
  519. MS_ASSERT(graphT != nullptr);
  520. MS_ASSERT(errorCode != nullptr);
  521. auto &existNode = *existNodeIter;
  522. MS_ASSERT(existNode != nullptr);
  523. MS_ASSERT(existNode->outputIndex.size() > outputIndexIdx);
  524. MS_ASSERT(toAddNodeIn != nullptr);
  525. auto postTensorIdx = existNode->outputIndex.at(outputIndexIdx);
  526. MS_ASSERT(graphT->allTensors.size() > postTensorIdx);
  527. auto postNodeIdxes = GetOutputNodeIdx(*graphT, *(existNode), outputIndexIdx);
  528. bool is_output_index = IsContain(graphT->outputIndex, postTensorIdx);
  529. size_t insert_node_num = (postNodeIdxes.empty() || is_output_index) ? postNodeIdxes.size() + 1 : postNodeIdxes.size();
  530. bool has_insert_for_graph_out = postNodeIdxes.empty() || is_output_index;
  531. std::vector<std::unique_ptr<schema::CNodeT>> toAddNodes;
  532. for (size_t i = 0; i < insert_node_num; ++i) {
  533. auto &postTensor = graphT->allTensors.at(postTensorIdx);
  534. MS_ASSERT(postTensor != nullptr);
  535. auto toAddTensor = CopyTensorDefT(postTensor);
  536. if (toAddTensor == nullptr) {
  537. MS_LOG(ERROR) << "Copy TensorT failed";
  538. *errorCode = RET_NULL_PTR;
  539. return graphT->nodes.end();
  540. }
  541. toAddTensor->nodeType = NodeType_CNode;
  542. MS_ASSERT(toAddNodeIn->primitive != nullptr);
  543. if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) {
  544. auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast();
  545. MS_ASSERT(prim != nullptr);
  546. if (prim->dst_t == TypeId::kNumberTypeUInt8) {
  547. if (postTensor->dataType == TypeId::kNumberTypeUInt8) {
  548. postTensor->quantParams.front()->zeroPoint -= 128;
  549. } else {
  550. toAddTensor->quantParams.front()->zeroPoint += 128;
  551. }
  552. } else if (prim->src_t == TypeId::kNumberTypeUInt8) {
  553. if (postTensor->dataType == TypeId::kNumberTypeUInt8) {
  554. toAddTensor->quantParams.front()->zeroPoint -= 128;
  555. } else {
  556. postTensor->quantParams.front()->zeroPoint += 128;
  557. }
  558. }
  559. postTensor->dataType = prim->src_t;
  560. toAddTensor->dataType = prim->dst_t;
  561. }
  562. graphT->allTensors.emplace_back(std::move(toAddTensor));
  563. size_t toAddTensorIdx = graphT->allTensors.size() - 1;
  564. auto toAddNode = opDefCopyer(toAddNodeIn.get());
  565. if (toAddNode == nullptr) {
  566. MS_LOG(ERROR) << "copy toAddNodeIn failed";
  567. *errorCode = RET_NULL_PTR;
  568. return graphT->nodes.end();
  569. }
  570. toAddNode->inputIndex.clear();
  571. toAddNode->inputIndex.push_back(postTensorIdx);
  572. toAddNode->outputIndex.clear();
  573. toAddNode->outputIndex.push_back(toAddTensorIdx);
  574. if (!postNodeIdxes.empty()) {
  575. toAddNode->name = toAddNodeIn->name + "_" + std::to_string(i);
  576. }
  577. if (has_insert_for_graph_out) {
  578. for (auto iter = graphT->outputIndex.begin(); iter != graphT->outputIndex.end(); iter++) {
  579. if (*iter == postTensorIdx) {
  580. *iter = toAddTensorIdx;
  581. }
  582. }
  583. has_insert_for_graph_out = false;
  584. } else {
  585. auto &postNode = graphT->nodes.at(postNodeIdxes[is_output_index ? i - 1 : i]);
  586. for (auto iter = postNode->inputIndex.begin(); iter != postNode->inputIndex.end(); iter++) {
  587. if (*iter == postTensorIdx) {
  588. *iter = toAddTensorIdx;
  589. }
  590. }
  591. }
  592. toAddNodes.emplace_back(std::move(toAddNode));
  593. }
  594. for (auto &toAddNode : toAddNodes) {
  595. existNodeIter = graphT->nodes.insert(existNodeIter, std::move(toAddNode));
  596. existNodeIter++;
  597. *insert_num += 1;
  598. }
  599. *errorCode = RET_OK;
  600. return existNodeIter;
  601. }
  602. STATUS ValidateFileStr(const std::string &modelFile, const std::string &fileType) {
  603. if (modelFile.size() > fileType.size() && modelFile.substr(modelFile.size() - fileType.size()) == fileType) {
  604. return RET_OK;
  605. } else {
  606. return RET_ERROR;
  607. }
  608. }
  609. std::string GetModelName(const std::string &modelFile) {
  610. std::string modelName = modelFile;
  611. modelName = modelName.substr(modelName.find_last_of('/') + 1);
  612. modelName = modelName.substr(0, modelName.find_last_of('.'));
  613. return modelName;
  614. }
  615. int SetSubgraphTensorIndices(schema::MetaGraphT *meta_graphT) {
  616. for (auto &subgraph : meta_graphT->subGraph) {
  617. std::vector<uint32_t> subgraph_indices{};
  618. for (auto &node_idx : subgraph->nodeIndices) {
  619. auto &node = meta_graphT->nodes.at(node_idx);
  620. for (auto &input_idx : node->inputIndex) {
  621. if (IsContain(subgraph_indices, input_idx)) {
  622. continue;
  623. } else {
  624. subgraph_indices.push_back(input_idx);
  625. }
  626. }
  627. for (auto &output_idx : node->outputIndex) {
  628. if (IsContain(subgraph_indices, output_idx)) {
  629. continue;
  630. } else {
  631. subgraph_indices.push_back(output_idx);
  632. }
  633. }
  634. }
  635. subgraph->tensorIndices.assign(subgraph_indices.begin(), subgraph_indices.end());
  636. }
  637. return RET_OK;
  638. }
  639. std::vector<int> GetTransposePerm(MetaGraphT *graph, const std::unique_ptr<CNodeT> &cnode) {
  640. MS_ASSERT(graph != nullptr && cnode != nullptr);
  641. std::vector<int> perm;
  642. if (cnode->primitive->value.type != schema::PrimitiveType_Transpose) {
  643. return perm;
  644. }
  645. if (cnode->inputIndex.size() < 2) {
  646. MS_LOG(ERROR) << "transpose node input size is less than 2.";
  647. return perm;
  648. }
  649. MS_ASSERT(cnode->outputIndex.at(1) < graph->allTensors.size());
  650. auto &perm_tensor = graph->allTensors.at(cnode->inputIndex.at(1));
  651. if (perm_tensor->data.empty()) {
  652. return perm;
  653. }
  654. MS_ASSERT(perm_tensor->dims.size() != 0);
  655. perm.resize(perm_tensor->dims[0]);
  656. if (memcpy_s(perm.data(), perm_tensor->dims[0] * sizeof(int), perm_tensor->data.data(),
  657. perm_tensor->dims[0] * sizeof(int)) != EOK) {
  658. MS_LOG(ERROR) << "memcpy data failed.";
  659. return {};
  660. }
  661. return perm;
  662. }
  663. std::string BoolVectorToString(const std::vector<bool> &bool_vec) {
  664. size_t size_in_byte = ceil(bool_vec.size() / 8.0);
  665. std::string str(size_in_byte, '\0');
  666. auto iter = str.begin();
  667. size_t shift = 8;
  668. for (bool bit : bool_vec) {
  669. *iter |= bit << (shift - 1);
  670. if (--shift == 0) {
  671. iter++;
  672. shift = 8;
  673. }
  674. }
  675. return str;
  676. }
  677. TypeId GetAbstractTensorDtype(const abstract::AbstractTensorPtr &tensor) {
  678. if (tensor == nullptr || tensor->element() == nullptr) {
  679. MS_LOG(ERROR) << "abstract_tensor or abstract_tensor->element() is nullptr";
  680. return kTypeUnknown;
  681. }
  682. auto type_ptr = tensor->element()->GetTypeTrack();
  683. return type_ptr->type_id();
  684. }
  685. TypeId GetParameterDtype(const ParameterPtr &param_node) {
  686. auto abstract_base = param_node->abstract();
  687. auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(abstract_base);
  688. auto type_ptr = abstract_tensor->element()->GetTypeTrack();
  689. return type_ptr->type_id();
  690. }
  691. STATUS UpdateFuncGraphInputsAndOutputsDtype(const FuncGraphPtr &func_graph) {
  692. // update graph inputs dtype
  693. size_t idx = 0;
  694. for (auto &input : func_graph->get_inputs()) {
  695. TypeId type = GetParameterDtype(input->cast<ParameterPtr>());
  696. TensorDataType::GetInstance()->UpdateGraphInputDType(idx, type);
  697. idx++;
  698. }
  699. // update graph outputs dtype
  700. auto graph_return = func_graph->get_return();
  701. idx = 0;
  702. for (auto &input : graph_return->inputs()) {
  703. if (input->isa<CNode>()) {
  704. if (utils::isa<abstract::AbstractTuple>(input->abstract())) {
  705. auto tuple = std::reinterpret_pointer_cast<abstract::AbstractTuple>(input->abstract());
  706. if (tuple == nullptr) {
  707. MS_LOG(ERROR) << "tuple is nullptr";
  708. return RET_ERROR;
  709. }
  710. for (const auto &tuple_item : tuple->elements()) {
  711. TypeId type = GetAbstractTensorDtype(tuple_item->cast<abstract::AbstractTensorPtr>());
  712. TensorDataType::GetInstance()->UpdateGraphOutputDType(idx, type);
  713. idx++;
  714. }
  715. } else if (utils::isa<abstract::AbstractTensor>(input->abstract())) {
  716. TypeId type = GetAbstractTensorDtype(input->abstract()->cast<abstract::AbstractTensorPtr>());
  717. TensorDataType::GetInstance()->UpdateGraphOutputDType(idx, type);
  718. idx++;
  719. } else {
  720. TensorDataType::GetInstance()->UpdateGraphOutputDType(idx, kTypeUnknown);
  721. idx++;
  722. }
  723. }
  724. }
  725. return RET_OK;
  726. }
  727. } // namespace lite
  728. } // namespace mindspore