You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

serializer_oss.cpp 35 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970
  1. /**
  2. * \file src/serialization/impl/serializer_oss.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. /*
  12. * Dump file layout:
  13. * [uint32_t fourcc]
  14. * [00 00 00 00]
  15. * [uint64_t offset to graph from tensor start]
  16. * [Tensor 1]
  17. * [Tensor 2]
  18. * [...]
  19. * [Tensor N]
  20. * [SizePrefixed FlatBuffers Graph]
  21. */
  22. #if MGB_ENABLE_FBS_SERIALIZATION
  23. #include "batched_device_value_loader.h"
  24. #include "megbrain/graph/exc_extra_info.h"
  25. #include "megbrain/opr/io.h"
  26. #include "megbrain/serialization/helper.h"
  27. #include "megbrain/serialization/internal/flatbuffers_helper.h"
  28. #include "megbrain/serialization/internal/schema_generated.h"
  29. #include "megbrain/serialization/metadata.h"
  30. #include "megbrain/serialization/opr_load_dump.h"
  31. #include "megbrain/serialization/serializer.h"
  32. #include "megbrain/version.h"
  33. #include <flatbuffers/flatbuffers.h>
  34. #include <cerrno>
  35. #include <cinttypes>
  36. #include <cstdio>
  37. using namespace mgb;
  38. using namespace mgb::serialization;
  39. namespace {
  40. constexpr uint32_t MGB_VERSION = (MGE_MAJOR * 1000 + MGE_MINOR) * 100 + MGE_PATCH;
  41. constexpr uint32_t MGB_MAGIC = 0x4342474D;
  42. // In order to maintain compatibility and to allow old models to be loaded, we keep
  43. // the old magic(MAGIC_V0) value and creat a new magic(MGB_MAGIC)
  44. constexpr uint32_t MAGIC_V0 = 0x5342474D;
  45. // Used to judge whether Magic is old or new, the new magic(MGB_MAGIC) is true and the
  46. // old magic(MAGIC_V0) is false.
  47. bool magic_compare = true;
  48. template <typename T>
  49. bool contains_any_in_set(const SmallVector<T>& list, const ThinHashSet<T>& set) {
  50. for (const auto& x : list) {
  51. if (set.count(x)) {
  52. return true;
  53. }
  54. }
  55. return false;
  56. }
  57. void check_tensor_value_valid(const std::string& name, const HostTensorND& tensor) {
  58. bool cond_normal = tensor.layout().format.is_default() &&
  59. tensor.layout().is_physical_contiguous();
  60. bool cond_lowbit = tensor.layout().dtype.is_quantized_lowbit() &&
  61. tensor.layout().format.is_lowbit_aligned() &&
  62. tensor.layout().is_contiguous();
  63. mgb_assert(
  64. cond_normal || cond_lowbit, "non-contiguous tensor: name=%s layout=%s",
  65. name.c_str(), tensor.layout().to_string().c_str());
  66. if (tensor.dtype() == dtype::Float32()) {
  67. auto ptr = tensor.ptr<float>();
  68. for (size_t i = 0, it = tensor.shape().total_nr_elems(); i < it; ++i) {
  69. if (!std::isfinite(ptr[i])) {
  70. mgb_log_warn("invalid tensor value in %s: %g", name.c_str(), ptr[i]);
  71. break;
  72. }
  73. }
  74. }
  75. }
  76. //! feature bits for backward compatibility; default value should be 0
  77. struct FeatureBits64 {
  78. //! reserved for new fields
  79. uint64_t : 64;
  80. static void write(OutputFile& fout) {
  81. static_assert(sizeof(FeatureBits64) == 8, "bad feature bits");
  82. FeatureBits64 fb64;
  83. memset(&fb64, 0, sizeof(fb64));
  84. fout.write(&fb64, 8);
  85. }
  86. };
  87. } // namespace
  88. namespace mgb {
  89. namespace serialization {
  90. class GraphDumperOSS final : public GraphDumper, OprDumpContextFlatBuffers {
  91. const std::unique_ptr<OutputFile> m_file;
  92. flatbuffers::FlatBufferBuilder m_builder;
  93. DumpConfig m_config;
  94. DumpResult m_cur_rst;
  95. size_t m_nr_shared_tensor;
  96. std::vector<std::pair<cg::OperatorNodeBase*, const OprRegistry*>> m_oprs_to_dump;
  97. ThinHashMap<VarNode*, size_t> m_var2id;
  98. //! set of output vars specified by user
  99. ThinHashSet<VarNode*> m_output_vars;
  100. std::unordered_set<std::string> m_used_input_names, m_used_param_names;
  101. //! current opr to be dumped
  102. cg::OperatorNodeBase* m_cur_opr = nullptr;
  103. // Will be filled in dump_tensor
  104. std::vector<flatbuffers::Offset<fbs::Tensor>> m_cur_opr_tensor;
  105. std::vector<flatbuffers::Offset<fbs::Blob>> m_blobs;
  106. std::vector<fbs::OperatorParam> m_cur_opr_param_type;
  107. std::vector<flatbuffers::Offset<void>> m_cur_opr_param;
  108. void init_oprs_to_dump(const SymbolVarArray& endpoints);
  109. flatbuffers::Offset<fbs::Metadata> build_metadata(const Metadata& metadata);
  110. flatbuffers::Offset<fbs::Operator> build_single_opr(
  111. cg::OperatorNodeBase* opr, const OprRegistry* registry);
  112. flatbuffers::Offset<fbs::DType> build_dtype(DType dtype);
  113. public:
  114. GraphDumperOSS(std::unique_ptr<OutputFile> file) : m_file{std::move(file)} {}
  115. DumpResult dump(
  116. const SymbolVarArray& output_vars, const DumpConfig& config = {},
  117. const Metadata& metadata = {}) override;
  118. const GraphDumpConfig& config() const override { return m_config; }
  119. void dump_tensor(
  120. const std::string& name, const HostTensorND& tensor,
  121. TensorWriteMethod method) override;
  122. flatbuffers::FlatBufferBuilder& builder() override { return m_builder; }
  123. void append_param(uint32_t type, uint32_t value) override {
  124. static_assert(
  125. std::is_same<uint32_t, flatbuffers::uoffset_t>::value,
  126. "append_param depends on uoffset_t being uint32_t");
  127. static_assert(
  128. std::is_standard_layout<flatbuffers::Offset<void>>::value,
  129. "append_param depends on flatbuffers::Offset having "
  130. "standard memory layout");
  131. mgb_assert(type != fbs::OperatorParam_NONE);
  132. m_cur_opr_param_type.emplace_back(static_cast<fbs::OperatorParam>(type));
  133. m_cur_opr_param.emplace_back(value);
  134. }
  135. void dump_buf_with_len(const void* data, uint32_t size) override;
  136. GraphDumpFormat format() const override { return GraphDumpFormat::FLATBUFFERS; }
  137. };
  138. flatbuffers::Offset<fbs::DType> GraphDumperOSS::build_dtype(DType dtype) {
  139. return fbs::intl::build_dtype(m_builder, dtype);
  140. }
  141. void GraphDumperOSS::init_oprs_to_dump(const SymbolVarArray& endpoints) {
  142. m_oprs_to_dump.clear();
  143. m_var2id.clear();
  144. // iterate oprs to init m_var2id
  145. size_t next_id = 0;
  146. auto on_opr = [&](cg::OperatorNodeBase* opr) {
  147. if (should_remove_in_dump(opr)) {
  148. mgb_assert(opr->input().size() == 1);
  149. // Copy input ID to output
  150. auto id = m_var2id.at(opr->input(0));
  151. for (auto i : opr->output())
  152. m_var2id[i] = id;
  153. } else {
  154. auto registry = OprRegistry::find_by_type(opr->dyn_typeinfo());
  155. if (!registry || !registry->dumper) {
  156. mgb_throw(
  157. cg::OperatorNodeExcExtraInfo::ExcMaker{opr}.make<MegBrainError>,
  158. "serialization as FlatBuffers is not supported for "
  159. "operator %s",
  160. opr->dyn_typeinfo()->name);
  161. }
  162. m_oprs_to_dump.emplace_back(opr, registry);
  163. for (auto i : opr->output()) {
  164. if (!i->contain_flag(VarNode::Flag::VOLATILE_CONTENT)) {
  165. m_var2id[i] = next_id++;
  166. }
  167. }
  168. }
  169. };
  170. cg::DepOprIter dep_opr_iter{on_opr};
  171. for (auto i : endpoints) {
  172. dep_opr_iter.add(i.node()->owner_opr());
  173. }
  174. }
  175. flatbuffers::Offset<fbs::Metadata> GraphDumperOSS::build_metadata(
  176. const Metadata& metadata) {
  177. auto user_info = m_builder.CreateSharedString(metadata.user_info);
  178. fbs::MetadataBuilder builder(m_builder);
  179. builder.add_is_valid(metadata.is_valid);
  180. builder.add_graph_modified(metadata.graph_modified);
  181. builder.add_user_info(user_info);
  182. builder.add_optimize_options(metadata.optimize_options);
  183. return builder.Finish();
  184. }
  185. flatbuffers::Offset<fbs::Operator> GraphDumperOSS::build_single_opr(
  186. cg::OperatorNodeBase* opr, const OprRegistry* registry) {
  187. m_cur_opr = opr;
  188. ++m_cur_rst.nr_opr;
  189. using namespace flatbuffers;
  190. Offset<Vector<Offset<fbs::CompNode>>> comp_node;
  191. auto& config = opr->config();
  192. if (config.has_comp_node_set()) {
  193. std::vector<flatbuffers::Offset<fbs::CompNode>> cns;
  194. for (const auto& cn : config.comp_node()) {
  195. cns.emplace_back(fbs::CreateCompNode(
  196. m_builder, m_builder.CreateSharedString(cn.to_string_logical())));
  197. }
  198. comp_node = m_builder.CreateVector(cns);
  199. }
  200. Offset<Vector<uint32_t>> inputs;
  201. if (opr->input().size()) {
  202. std::vector<uint32_t> v;
  203. v.reserve(opr->input().size());
  204. for (auto inp : opr->input()) {
  205. v.emplace_back(m_var2id.at(inp));
  206. }
  207. inputs = m_builder.CreateVector(v);
  208. }
  209. Offset<String> operator_name;
  210. if (m_config.keep_op_name) {
  211. operator_name = m_builder.CreateSharedString(opr->name());
  212. }
  213. Offset<Vector<Offset<String>>> output_names;
  214. if (m_config.keep_var_name >= 2 ||
  215. (m_config.keep_var_name == 1 &&
  216. contains_any_in_set(opr->output(), m_output_vars))) {
  217. std::vector<std::string> onames;
  218. for (auto i : opr->output()) {
  219. if (!i->contain_flag(VarNode::Flag::VOLATILE_CONTENT)) {
  220. onames.emplace_back(i->name());
  221. }
  222. }
  223. output_names = m_builder.CreateVectorOfStrings(onames);
  224. }
  225. auto output_dtype = build_dtype(config.output_dtype());
  226. m_cur_opr_tensor.clear();
  227. m_blobs.clear();
  228. m_cur_opr_param.clear();
  229. m_cur_opr_param_type.clear();
  230. registry->dumper(*this, *opr);
  231. Offset<Vector<Offset<fbs::Tensor>>> tensors;
  232. if (m_cur_opr_tensor.size())
  233. tensors = m_builder.CreateVector(m_cur_opr_tensor);
  234. Offset<Vector<Offset<fbs::Blob>>> blobs;
  235. if (m_blobs.size())
  236. blobs = m_builder.CreateVector(m_blobs);
  237. Offset<Vector<uint8_t>> additional_params_type;
  238. Offset<Vector<Offset<void>>> additional_params;
  239. auto param_cnt = m_cur_opr_param_type.size();
  240. if (param_cnt > 1) {
  241. additional_params_type = m_builder.CreateVectorScalarCast<uint8_t>(
  242. m_cur_opr_param_type.data() + 1, param_cnt - 1);
  243. additional_params =
  244. m_builder.CreateVector(m_cur_opr_param.data() + 1, param_cnt - 1);
  245. }
  246. fbs::OperatorBuilder builder(m_builder);
  247. builder.add_type_id(registry->persist_type_id);
  248. builder.add_inputs(inputs);
  249. if (m_config.keep_opr_priority) {
  250. builder.add_priority(opr->node_prop().attribute().priority);
  251. }
  252. builder.add_comp_node(comp_node);
  253. builder.add_output_name(output_names);
  254. builder.add_name(operator_name);
  255. builder.add_output_dtype(output_dtype);
  256. if (param_cnt > 0) {
  257. builder.add_param_type(m_cur_opr_param_type[0]);
  258. builder.add_param(m_cur_opr_param[0]);
  259. }
  260. if (param_cnt > 1) {
  261. builder.add_additional_params_type(additional_params_type);
  262. builder.add_additional_params(additional_params);
  263. }
  264. builder.add_tensors(tensors);
  265. builder.add_blobs(blobs);
  266. m_cur_opr = nullptr;
  267. return builder.Finish();
  268. }
  269. GraphDumper::DumpResult GraphDumperOSS::dump(
  270. const SymbolVarArray& output_vars, const DumpConfig& config,
  271. const Metadata& metadata) {
  272. mgb_throw_if(output_vars.empty(), SerializationError, "Can't dump empty graph");
  273. auto begin_pos = m_file->tell();
  274. m_config = config;
  275. m_builder.Reset();
  276. m_output_vars.clear();
  277. m_cur_rst = {};
  278. m_used_input_names.clear();
  279. m_used_param_names.clear();
  280. m_nr_shared_tensor = 0;
  281. // process output vars
  282. bool keep_output_var_name = m_config.keep_var_name >= 1;
  283. std::unordered_set<std::string> output_var_names;
  284. for (auto i : output_vars) {
  285. mgb_assert(
  286. !i.node()->contain_flag(VarNode::Flag::VOLATILE_CONTENT),
  287. "can not dump var with VOLATILE_CONTENT flag: %s",
  288. cg::dump_var_info({i.node()}).c_str());
  289. if (m_output_vars.insert(i.node()).second && keep_output_var_name) {
  290. auto name_ins = output_var_names.insert(i.node()->name()).second;
  291. mgb_assert(name_ins, "duplicated output var name: %s", i.node()->cname());
  292. }
  293. }
  294. // Write magic
  295. uint32_t magic = MGB_MAGIC;
  296. m_file->write(&magic, sizeof(magic));
  297. // write FeatureBits
  298. FeatureBits64::write(*m_file);
  299. // Padding
  300. uint32_t reserved = 0;
  301. m_file->write(&reserved, sizeof(reserved));
  302. // Write placeholder for offset_to_fbs
  303. auto offset_pos = m_file->tell();
  304. uint64_t offset_to_fbs = 0;
  305. m_file->write(&offset_to_fbs, sizeof(offset_to_fbs));
  306. // Dump metadata
  307. auto fbmeta = build_metadata(metadata);
  308. // Dump operators
  309. init_oprs_to_dump(output_vars);
  310. std::vector<flatbuffers::Offset<fbs::Operator>> oprs;
  311. for (auto&& i : m_oprs_to_dump) {
  312. oprs.emplace_back(build_single_opr(i.first, i.second));
  313. }
  314. auto fb_oprs = m_builder.CreateVector(oprs);
  315. // Dump output vars
  316. std::vector<fbs::OutputVar> output_vars_idx;
  317. output_vars_idx.reserve(output_vars.size());
  318. for (auto i : output_vars) {
  319. output_vars_idx.emplace_back(m_var2id.at(i.node()), i.node()->id());
  320. }
  321. auto fb_output_vars = m_builder.CreateVectorOfStructs(output_vars_idx);
  322. XXHash content_hash;
  323. content_hash.update(m_builder.GetCurrentBufferPointer(), m_builder.GetSize());
  324. auto graph_hash = content_hash.digest();
  325. fbs::GraphBuilder graph(m_builder);
  326. graph.add_mgb_version(MGB_VERSION);
  327. graph.add_hash(graph_hash);
  328. graph.add_oprs(fb_oprs);
  329. graph.add_output_vars_idx(fb_output_vars);
  330. graph.add_nr_shared_tensor(m_nr_shared_tensor);
  331. graph.add_metadata(fbmeta);
  332. m_builder.FinishSizePrefixed(graph.Finish(), fbs::GraphIdentifier());
  333. // Write actual offset_to_fbs
  334. auto cur = m_file->tell();
  335. mgb_assert(cur >= offset_pos && cur - offset_pos >= sizeof(offset_to_fbs));
  336. offset_to_fbs = cur - offset_pos - sizeof(offset_to_fbs);
  337. m_file->seek(offset_pos);
  338. m_file->write(&offset_to_fbs, sizeof(offset_to_fbs));
  339. m_file->seek(cur);
  340. // Write serialized fbs::Graph
  341. m_file->write(m_builder.GetBufferPointer(), m_builder.GetSize());
  342. // Finalize DumpResult
  343. auto&& ret = m_cur_rst;
  344. for (size_t i = 0; i < output_vars.size(); i++) {
  345. ret.outputs.emplace_back(
  346. keep_output_var_name ? output_vars[i].node()->cname()
  347. : ssprintf("unnamed%zu", i));
  348. }
  349. ret.content_hash = graph_hash;
  350. std::sort(ret.inputs.begin(), ret.inputs.end());
  351. mgb_assert(ret.nr_opr == m_oprs_to_dump.size());
  352. ret.tot_bytes = m_file->tell() - begin_pos;
  353. return ret;
  354. }
  355. void GraphDumperOSS::dump_tensor(
  356. const std::string& name, const HostTensorND& tensor, TensorWriteMethod method) {
  357. using namespace flatbuffers;
  358. using Meth = TensorWriteMethod;
  359. mgb_assert(
  360. (method == Meth::VALUE_ANONYMOUS) ^ (!name.empty()),
  361. "name must be non-empty for non Meth::VALUE_ANONYMOUS tensors");
  362. bool has_value = method != Meth::META_INPUT;
  363. bool should_keep_name = true;
  364. switch (method) {
  365. case Meth::VALUE_ANONYMOUS:
  366. should_keep_name = false;
  367. break;
  368. case Meth::VALUE_SHARED:
  369. should_keep_name = m_config.keep_param_name;
  370. ++m_nr_shared_tensor;
  371. if (m_config.keep_param_name) {
  372. mgb_assert(
  373. m_used_param_names.insert(name).second,
  374. "duplicated VALUE_SHARED tensor name: %s", name.c_str());
  375. m_cur_rst.params.emplace_back(name);
  376. }
  377. break;
  378. case Meth::META_INPUT:
  379. case Meth::VALUE_INPUT:
  380. mgb_assert(!name.empty(), "empty input tensor name");
  381. mgb_assert(
  382. m_used_input_names.insert(name).second,
  383. "duplicated input tensor name: %s", name.c_str());
  384. m_cur_rst.inputs.emplace_back(name);
  385. break;
  386. }
  387. size_t value_size = 0;
  388. if (has_value) {
  389. check_tensor_value_valid(name, tensor);
  390. auto begin = m_file->tell();
  391. auto&& dumper = m_config.tensor_value_dumper;
  392. if (dumper) {
  393. dumper(*m_file, *m_cur_opr, tensor);
  394. } else {
  395. m_file->write(tensor.raw_ptr(), tensor.layout().span().high_byte);
  396. }
  397. value_size = m_file->tell() - begin;
  398. m_cur_rst.tensor_value_bytes += value_size;
  399. }
  400. auto fbname = should_keep_name ? m_builder.CreateSharedString(name) : 0;
  401. auto shape = m_builder.CreateVectorScalarCast<uint32_t>(
  402. tensor.shape().shape, tensor.shape().ndim);
  403. auto comp_node = fbs::CreateCompNode(
  404. m_builder,
  405. m_builder.CreateSharedString(tensor.comp_node().to_string_logical()));
  406. auto dtype = build_dtype(tensor.dtype());
  407. auto serialized_tensor =
  408. fbs::CreateTensor(m_builder, fbname, shape, comp_node, dtype, value_size);
  409. m_cur_opr_tensor.emplace_back(serialized_tensor);
  410. }
  411. void GraphDumperOSS::dump_buf_with_len(const void* data, uint32_t size) {
  412. auto blob = fbs::CreateBlob(
  413. m_builder, m_builder.CreateVector(static_cast<const uint8_t*>(data), size));
  414. m_blobs.emplace_back(blob);
  415. }
  416. // ----------------------------- Loader --------------------------------------
  417. class GraphLoaderOSS final : public GraphLoader {
  418. const LoadConfig* m_cur_load_config = nullptr;
  419. std::unique_ptr<InputFile> m_file;
  420. FeatureBits64 m_feature_bits;
  421. SharedBuffer m_graph_buf{{}, 0};
  422. const fbs::Graph* m_graph;
  423. SharedTensorIDMap m_shared_tensor_map;
  424. uint32_t m_mgb_version = 0;
  425. uint64_t m_graph_hash = 0;
  426. class OprLoadContextImpl;
  427. friend class OprLoadContextImpl;
  428. void verify();
  429. public:
  430. GraphLoaderOSS(std::unique_ptr<InputFile> input_file)
  431. : m_file{std::move(input_file)} {}
  432. std::unique_ptr<InputFile> reset_file(std::unique_ptr<InputFile> file) override {
  433. file.swap(m_file);
  434. return file;
  435. }
  436. LoadResult load(const LoadConfig& config, bool rewind) override;
  437. const SharedTensorIDMap& shared_tensor_id_map() const override {
  438. mgb_assert(m_graph_hash, "graph not loaded yet");
  439. return m_shared_tensor_map;
  440. }
  441. GraphDumpFormat format() const override { return GraphDumpFormat::FLATBUFFERS; }
  442. };
  443. class GraphLoaderOSS::OprLoadContextImpl final : public OprLoadContextFlatBuffers {
  444. GraphLoaderOSS* const m_loader;
  445. size_t m_cur_shared_tensor_idx = 0;
  446. std::shared_ptr<ComputingGraph> m_graph;
  447. LoadResult::TensorMap m_tensor_map;
  448. VarNodeArray m_id2varnode;
  449. BatchedDeviceValueLoader m_device_value_loader;
  450. const fbs::Operator* m_current_opr;
  451. size_t m_cur_opr_tensor_cnt;
  452. size_t m_cur_opr_blob_cnt;
  453. size_t m_cur_opr_param_cnt;
  454. ComputingGraph& graph() override { return *m_graph; }
  455. const GraphLoadConfig& config() const override {
  456. return *m_loader->m_cur_load_config;
  457. }
  458. void load_tensor_value(
  459. HostTensorND* dest, const TensorLayout& layout, const fbs::Tensor* tensor);
  460. std::shared_ptr<HostTensorND> load_tensor() override;
  461. std::shared_ptr<DeviceTensorND> load_tensor_shared() override;
  462. void load_single_opr(const fbs::Operator* opr);
  463. public:
  464. OprLoadContextImpl(GraphLoaderOSS* loader, uint32_t version)
  465. : OprLoadContextFlatBuffers(version), m_loader{loader} {
  466. m_graph = loader->m_cur_load_config->comp_graph;
  467. if (!m_graph) {
  468. m_graph = ComputingGraph::make();
  469. }
  470. auto maker = [this]() {
  471. return std::shared_ptr<OprLoadContext>{
  472. std::shared_ptr<OprLoadContext>{}, this};
  473. };
  474. auto got = m_graph->options().user_data.get_user_data_or_create<OprLoadContext>(
  475. maker);
  476. mgb_assert(got == this);
  477. }
  478. ~OprLoadContextImpl() noexcept {
  479. auto nr = m_graph->options().user_data.pop_user_data<OprLoadContext>();
  480. mgb_assert(nr == 1);
  481. }
  482. Metadata load_metadata();
  483. LoadResult load_oprs();
  484. CompNode load_comp_node(const fbs::CompNode* comp_node);
  485. const void* get_next_param(uint32_t enumv) override {
  486. auto type = static_cast<fbs::OperatorParam>(enumv);
  487. if (m_cur_opr_param_cnt == 0) {
  488. m_cur_opr_param_cnt++;
  489. if (m_current_opr->param_type() == type) {
  490. return m_current_opr->param();
  491. }
  492. } else {
  493. mgb_assert(
  494. m_current_opr->additional_params() &&
  495. m_cur_opr_param_cnt - 1 <
  496. m_current_opr->additional_params()->size());
  497. auto i = m_cur_opr_param_cnt++ - 1;
  498. if (m_current_opr->additional_params_type()->Get(i) == type) {
  499. return m_current_opr->additional_params()->Get(i);
  500. }
  501. }
  502. return nullptr;
  503. }
  504. std::string load_buf_with_len() override {
  505. mgb_assert(
  506. m_current_opr->blobs() &&
  507. m_cur_opr_blob_cnt < m_current_opr->blobs()->size());
  508. auto blob = m_current_opr->blobs()->Get(m_cur_opr_blob_cnt++);
  509. mgb_assert(blob && blob->data());
  510. auto data = blob->data()->data();
  511. return {reinterpret_cast<const char*>(data), blob->data()->size()};
  512. }
  513. SharedBuffer load_shared_buf_with_len() override {
  514. mgb_assert(
  515. m_current_opr->blobs() &&
  516. m_cur_opr_blob_cnt < m_current_opr->blobs()->size());
  517. auto blob = m_current_opr->blobs()->Get(m_cur_opr_blob_cnt++);
  518. mgb_assert(blob && blob->data());
  519. auto size = blob->data()->size();
  520. std::shared_ptr<uint8_t> shptr{
  521. new uint8_t[size], [](uint8_t* p) { delete[] p; }};
  522. memcpy(shptr.get(), blob->data()->data(), size);
  523. return {std::move(shptr), size};
  524. }
  525. };
  526. CompNode GraphLoaderOSS::OprLoadContextImpl::load_comp_node(
  527. const fbs::CompNode* comp_node) {
  528. mgb_assert(comp_node);
  529. if (!comp_node->logical_locator())
  530. return {};
  531. auto loc = CompNode::Locator::parse(comp_node->logical_locator()->str());
  532. m_loader->m_cur_load_config->comp_node_mapper(loc);
  533. return CompNode::load(loc);
  534. }
  535. TensorLayout load_tensor_layout(const fbs::Tensor* tensor) {
  536. TensorLayout layout;
  537. if (tensor->shape()) {
  538. layout.ndim = tensor->shape()->size();
  539. std::copy(tensor->shape()->begin(), tensor->shape()->end(), layout.shape);
  540. }
  541. if (tensor->dtype()) {
  542. // modify data type inplace for TensorLayout
  543. layout.modify_dtype_inplace(fbs::intl::load_dtype(tensor->dtype()));
  544. }
  545. layout.init_contiguous_stride();
  546. return layout;
  547. }
  548. void GraphLoaderOSS::OprLoadContextImpl::load_tensor_value(
  549. HostTensorND* dest, const TensorLayout& layout, const fbs::Tensor* tensor) {
  550. auto&& loader = m_loader->m_cur_load_config->tensor_value_loader;
  551. auto&& file = m_loader->m_file;
  552. auto begin_pos = file->tell();
  553. file->skip(tensor->offset());
  554. if (loader) {
  555. // call custom loader
  556. void* dest_ptr = nullptr;
  557. if (dest) {
  558. dest->dtype(layout.dtype).resize(layout);
  559. dest_ptr = dest->raw_ptr();
  560. }
  561. loader(dest_ptr, layout, *file);
  562. } else {
  563. if (dest) {
  564. file->read_into_tensor(*dest, layout);
  565. } else {
  566. file->skip(layout.span().high_byte);
  567. }
  568. }
  569. mgb_throw_if(
  570. file->tell() < begin_pos, SerializationError,
  571. "Custom tensor value loader accessed out of range data before "
  572. "start of data blob");
  573. auto data_size = tensor->data_size();
  574. auto consumed_size = file->tell() - begin_pos;
  575. mgb_throw_if(
  576. consumed_size > data_size, SerializationError,
  577. "Custom tensor value loader consumed more data than "
  578. "available: consumed %zu, has %u",
  579. consumed_size, data_size);
  580. if (consumed_size < data_size) {
  581. mgb_log_warn(
  582. "Tensor value loader consumed less data than available: "
  583. "consumed %zu bytes, has %u bytes",
  584. consumed_size, data_size);
  585. file->skip(data_size - consumed_size);
  586. }
  587. }
  588. std::shared_ptr<HostTensorND> GraphLoaderOSS::OprLoadContextImpl::load_tensor() {
  589. mgb_assert(
  590. m_current_opr->tensors() &&
  591. m_cur_opr_tensor_cnt < m_current_opr->tensors()->size());
  592. auto tensor = m_current_opr->tensors()->Get(m_cur_opr_tensor_cnt++);
  593. auto comp_node = load_comp_node(tensor->comp_node());
  594. auto layout = load_tensor_layout(tensor);
  595. auto ret = std::make_shared<HostTensorND>(comp_node, layout);
  596. if (tensor->data_size()) {
  597. load_tensor_value(ret.get(), layout, tensor);
  598. }
  599. if (tensor->name()) {
  600. m_tensor_map[tensor->name()->str()] = ret;
  601. }
  602. if (auto&& mod = m_loader->m_cur_load_config->tensor_modifier) {
  603. mod(tensor->name() ? tensor->name()->str() : "", tensor->data_size() != 0,
  604. *ret);
  605. }
  606. return ret;
  607. }
  608. std::shared_ptr<DeviceTensorND> GraphLoaderOSS::OprLoadContextImpl::
  609. load_tensor_shared() {
  610. mgb_assert(
  611. m_current_opr->tensors() &&
  612. m_cur_opr_tensor_cnt < m_current_opr->tensors()->size());
  613. auto tensor = m_current_opr->tensors()->Get(m_cur_opr_tensor_cnt++);
  614. auto comp_node = load_comp_node(tensor->comp_node());
  615. auto layout = load_tensor_layout(tensor);
  616. mgb_assert(tensor->data_size());
  617. auto&& sh_reg = m_loader->m_shared_tensor_map.at(m_cur_shared_tensor_idx++);
  618. auto&& sh_ptr_ref = sh_reg.second[comp_node.mem_node()];
  619. if (sh_ptr_ref) {
  620. // cached tensor value is valid so we can reuse it
  621. load_tensor_value(nullptr, layout, tensor);
  622. if (sh_ptr_ref->comp_node() == comp_node)
  623. return sh_ptr_ref;
  624. // same mem node but different comp node, change comp node and share
  625. // value
  626. auto ret = std::make_shared<DeviceTensorND>(*sh_ptr_ref);
  627. ret->comp_node(comp_node);
  628. return ret;
  629. }
  630. if (tensor->name()) {
  631. sh_reg.first = tensor->name()->str();
  632. }
  633. if (comp_node.mem_node() == CompNode::default_cpu().mem_node()) {
  634. // directly forward CPU memory
  635. HostTensorND hv{comp_node};
  636. load_tensor_value(&hv, layout, tensor);
  637. sh_ptr_ref = std::make_shared<DeviceTensorND>();
  638. *sh_ptr_ref = DeviceTensorND::make_proxy(hv);
  639. } else {
  640. // use lazy load for non-CPU devices
  641. HostTensorND hv{CompNode::default_cpu()};
  642. load_tensor_value(&hv, layout, tensor);
  643. sh_ptr_ref = m_device_value_loader.make(comp_node, std::move(hv));
  644. }
  645. return sh_ptr_ref;
  646. }
  647. Metadata GraphLoaderOSS::OprLoadContextImpl::load_metadata() {
  648. const auto* fbmeta = m_loader->m_graph->metadata();
  649. Metadata ret;
  650. if (fbmeta) {
  651. ret.is_valid = fbmeta->is_valid();
  652. ret.graph_modified = fbmeta->graph_modified();
  653. if (fbmeta->user_info()) {
  654. ret.user_info = fbmeta->user_info()->str();
  655. ret.has_user_info = true;
  656. }
  657. if (fbmeta->optimize_options()) {
  658. ret.optimize_options = fbmeta->optimize_options();
  659. ret.optimized_for_inference = true;
  660. }
  661. }
  662. return ret;
  663. }
  664. void GraphLoaderOSS::OprLoadContextImpl::load_single_opr(const fbs::Operator* fbopr) {
  665. m_cur_opr_tensor_cnt = 0;
  666. m_cur_opr_blob_cnt = 0;
  667. m_cur_opr_param_cnt = 0;
  668. OperatorNodeConfig config;
  669. if (fbopr->output_dtype()) {
  670. config.output_dtype(fbs::intl::load_dtype(fbopr->output_dtype()));
  671. }
  672. if (fbopr->name()) {
  673. config.name(fbopr->name()->str());
  674. }
  675. if (fbopr->comp_node()) {
  676. auto cnt = fbopr->comp_node()->size();
  677. cg::OperatorNodeConfig::CompNodeArray comp_node_arr(cnt);
  678. for (size_t i = 0; i < cnt; i++) {
  679. CompNode cn{};
  680. auto node = fbopr->comp_node()->Get(i);
  681. if (node) {
  682. cn = load_comp_node(node);
  683. }
  684. comp_node_arr[i] = cn;
  685. }
  686. config.comp_node_arr(comp_node_arr);
  687. }
  688. const OprRegistry* registry;
  689. if (magic_compare) {
  690. registry = OprRegistry::find_by_id(fbopr->type_id());
  691. } else {
  692. registry = OprRegistry::find_by_unversioned_id(fbopr->type_id());
  693. }
  694. mgb_throw_if(
  695. !registry, SerializationError,
  696. "failed to find opr with type %s, use python env "
  697. "config.dump_registered_oprs() to get a dict that maps from "
  698. "opr id to opr name",
  699. std::to_string(fbopr->type_id()).c_str());
  700. // load inputs
  701. VarNodeArray inputs;
  702. if (fbopr->inputs()) {
  703. inputs.resize(fbopr->inputs()->size());
  704. for (size_t i = 0; i < inputs.size(); ++i) {
  705. inputs[i] = m_id2varnode.at(fbopr->inputs()->Get(i));
  706. }
  707. }
  708. // call loader
  709. auto accessor = registry->loader(*this, inputs, config);
  710. auto opr = accessor.opr();
  711. // check opr type; note that:
  712. // 1. registry->type may be empty for dynamic opr loaders or legacy oprs
  713. // 2. due to some optimization, an opr may be replaced by ImmutableTensor
  714. mgb_assert(
  715. opr && (opr->dyn_typeinfo() == registry->type || !registry->type ||
  716. opr->same_type<opr::ImmutableTensor>()),
  717. "got_type=%s expected_type=%s", opr ? opr->dyn_typeinfo()->name : nullptr,
  718. registry->type->name);
  719. // record output vars; read output names
  720. size_t i = 0;
  721. for (auto ovar : accessor.output()) {
  722. if (!ovar->contain_flag(VarNode::Flag::VOLATILE_CONTENT)) {
  723. m_id2varnode.push_back(ovar);
  724. if (fbopr->output_name()) {
  725. ovar->name(fbopr->output_name()->Get(i++)->str());
  726. }
  727. }
  728. }
  729. opr->node_prop().attribute().priority = fbopr->priority();
  730. }
  731. GraphLoader::LoadResult GraphLoaderOSS::OprLoadContextImpl::load_oprs() {
  732. // load oprs
  733. const auto* oprs = m_loader->m_graph->oprs();
  734. {
  735. // inplace arith graph optimization is disabled during opr load
  736. // it tries to restore the same graph as it was dumped
  737. // see test TestSerializer2.LOGEXP for example
  738. GraphLoader::ScopedGraphOptDisabler _(m_graph);
  739. for (flatbuffers::uoffset_t i = 0; i < oprs->size(); ++i) {
  740. m_current_opr = oprs->Get(i);
  741. load_single_opr(m_current_opr);
  742. }
  743. }
  744. // batched loading device values
  745. m_device_value_loader.apply();
  746. LoadResult ret;
  747. ret.graph = m_graph;
  748. ret.tensor_map = m_tensor_map;
  749. const auto* outputs = m_loader->m_graph->output_vars_idx();
  750. ret.output_var_list.resize(outputs->size());
  751. for (flatbuffers::uoffset_t i = 0; i < outputs->size(); i++) {
  752. auto out = outputs->Get(i);
  753. auto var = m_id2varnode.at(out->compact_id());
  754. ret.output_var_map[var->name()] = var;
  755. ret.output_var_map_id[out->original_id()] = var;
  756. ret.output_var_list[i] = var;
  757. }
  758. mgb_assert(m_cur_shared_tensor_idx == m_loader->m_shared_tensor_map.size());
  759. return ret;
  760. }
  761. GraphLoader::LoadResult GraphLoaderOSS::load(const LoadConfig& config, bool rewind) {
  762. mgb_assert(m_file);
  763. m_cur_load_config = &config;
  764. if (rewind) {
  765. m_file->rewind();
  766. }
  767. uint32_t magic;
  768. m_file->read(&magic, sizeof(magic));
  769. mgb_throw_if(
  770. (magic != MGB_MAGIC) && (magic != MAGIC_V0), SerializationError,
  771. "wrong magic: wanted %#08x or %#08x, actual %#08x (not a invalid fbs "
  772. "model?)",
  773. MGB_MAGIC, MAGIC_V0, magic);
  774. if (magic == MGB_MAGIC) {
  775. // read FeatureBits
  776. magic_compare = true;
  777. m_file->read(&m_feature_bits, 8);
  778. } else {
  779. magic_compare = false;
  780. }
  781. m_file->skip(4);
  782. uint64_t offset_to_fbs;
  783. m_file->read(&offset_to_fbs, sizeof(offset_to_fbs));
  784. auto tensor_begin = m_file->tell();
  785. // Skip tensor data
  786. m_file->skip(offset_to_fbs);
  787. // Read fbs::Graph
  788. uint32_t size;
  789. m_file->read(&size, sizeof(size));
  790. m_graph_buf = m_file->read_shared(size);
  791. // Rewind back to tensor data
  792. m_file->rewind();
  793. m_file->skip(tensor_begin);
  794. mgb_throw_if(
  795. !fbs::GraphBufferHasIdentifier(m_graph_buf.data()), SerializationError,
  796. "invalid fbs model");
  797. {
  798. flatbuffers::Verifier verifier(
  799. static_cast<const uint8_t*>(m_graph_buf.data()), m_graph_buf.size());
  800. mgb_throw_if(
  801. !fbs::VerifyGraphBuffer(verifier), SerializationError,
  802. "model verification failed (invalid or corrupted model?)");
  803. }
  804. m_graph = fbs::GetGraph(m_graph_buf.data());
  805. m_mgb_version = m_graph->mgb_version();
  806. if (m_graph->mgb_version() > MGB_VERSION) {
  807. mgb_log_warn(
  808. "loading model from future runtime: version=%u "
  809. "model_version=%u",
  810. MGB_VERSION, m_graph->mgb_version());
  811. }
  812. if (!m_graph_hash) {
  813. m_graph_hash = m_graph->hash();
  814. mgb_assert(
  815. m_graph_hash,
  816. "invalid graph hash; maybe error "
  817. "occurred during graph dump");
  818. } else {
  819. mgb_assert(
  820. m_graph_hash == m_graph->hash(),
  821. "A GraphLoader instance can be used to load only one graph,"
  822. " since the tensor values are shared. Previous graph hash "
  823. "is 0x%llx, current graph hash is 0x%llx.",
  824. static_cast<unsigned long long>(m_graph_hash),
  825. static_cast<unsigned long long>(m_graph->hash()));
  826. }
  827. if (m_shared_tensor_map.empty()) {
  828. m_shared_tensor_map.resize(m_graph->nr_shared_tensor());
  829. } else {
  830. mgb_assert(m_shared_tensor_map.size() == m_graph->nr_shared_tensor());
  831. }
  832. OprLoadContextImpl ctx{this, m_graph->mgb_version()};
  833. auto metadata = ctx.load_metadata();
  834. auto result = ctx.load_oprs();
  835. result.metadata = metadata;
  836. auto fbs_end = tensor_begin + offset_to_fbs + sizeof(size) + size;
  837. auto cur = m_file->tell();
  838. mgb_assert(fbs_end > cur);
  839. // Skip to Graph end
  840. m_file->skip(fbs_end - cur);
  841. return result;
  842. }
  843. std::unique_ptr<GraphDumper> make_fbs_dumper(std::unique_ptr<OutputFile> file) {
  844. return std::make_unique<GraphDumperOSS>(std::move(file));
  845. }
  846. std::unique_ptr<GraphLoader> make_fbs_loader(std::unique_ptr<InputFile> file) {
  847. return std::make_unique<GraphLoaderOSS>(std::move(file));
  848. }
  849. bool is_fbs_file(InputFile& file) {
  850. uint64_t magic_with_reserved = 0;
  851. file.read(&magic_with_reserved, sizeof(magic_with_reserved));
  852. file.skip(-sizeof(magic_with_reserved));
  853. return (magic_with_reserved == MGB_MAGIC) || (magic_with_reserved == MAGIC_V0);
  854. }
  855. } // namespace serialization
  856. } // namespace mgb
  857. #endif

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台