From c25e37e7bf8f97b805cdd4f95f30db2edb591d14 Mon Sep 17 00:00:00 2001 From: zhousiyi Date: Thu, 27 Aug 2020 02:09:56 +0000 Subject: [PATCH] make backend/optimizer pybind free --- .../ascend/ascend_backend_optimization.cc | 2 +- .../ccsrc/backend/session/ascend_session.cc | 2 +- .../ccsrc/backend/session/gpu_session.cc | 2 +- mindspore/ccsrc/debug/anf_ir_utils.cc | 60 --- mindspore/ccsrc/debug/anf_ir_utils.h | 8 - mindspore/ccsrc/debug/dump_proto.cc | 68 ++- mindspore/ccsrc/debug/dump_proto.h | 33 ++ .../operator/ops_front_infer_function.cc | 2 + mindspore/ccsrc/frontend/optimizer/cse.cc | 4 + mindspore/ccsrc/frontend/optimizer/cse.h | 10 +- mindspore/ccsrc/frontend/optimizer/cse_pass.h | 53 +++ .../parallel/graph_util/generate_graph.cc | 1 + mindspore/ccsrc/pipeline/jit/pass.cc | 8 +- mindspore/ccsrc/pipeline/jit/pipeline.cc | 2 + mindspore/ccsrc/pipeline/jit/pipeline_ge.cc | 1 + .../pipeline/jit/remove_value_node_dup.cc | 2 + .../pipeline/jit/static_analysis/prim.cc | 1 + .../pipeline/pynative/pynative_execute.cc | 1 + mindspore/ccsrc/pybind_api/ir/primitive_py.cc | 1 + .../ccsrc/utils/context/context_extends.cc | 7 + .../ccsrc/utils/context/context_extends.h | 1 - mindspore/ccsrc/utils/convert_utils.cc | 376 ---------------- mindspore/ccsrc/utils/convert_utils.h | 14 +- mindspore/ccsrc/utils/convert_utils_py.cc | 409 ++++++++++++++++++ mindspore/ccsrc/utils/convert_utils_py.h | 43 ++ mindspore/ccsrc/utils/primitive_utils.cc | 1 + mindspore/ccsrc/vm/vmimpl.h | 1 - tests/ut/cpp/optimizer/optimizer_test.cc | 4 +- tests/ut/cpp/parallel/step_parallel_test.cc | 1 + tests/ut/cpp/stub/anf_ir/dump_proto_stub.cc | 4 +- tests/ut/cpp/vm/segment_runner_test.cc | 1 + 31 files changed, 643 insertions(+), 480 deletions(-) create mode 100644 mindspore/ccsrc/debug/dump_proto.h create mode 100644 mindspore/ccsrc/frontend/optimizer/cse_pass.h create mode 100644 mindspore/ccsrc/utils/convert_utils_py.cc create mode 100644 mindspore/ccsrc/utils/convert_utils_py.h diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc index 844219d3ac..9401696fa4 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc @@ -108,7 +108,7 @@ #include "utils/ms_context.h" #include "utils/config_manager.h" #include "debug/anf_ir_dump.h" -#include "debug/anf_ir_utils.h" +#include "debug/dump_proto.h" namespace mindspore { namespace opt { diff --git a/mindspore/ccsrc/backend/session/ascend_session.cc b/mindspore/ccsrc/backend/session/ascend_session.cc index 63e9fa1cc0..676f832ba6 100644 --- a/mindspore/ccsrc/backend/session/ascend_session.cc +++ b/mindspore/ccsrc/backend/session/ascend_session.cc @@ -34,7 +34,7 @@ #include "runtime/device/ascend/ascend_stream_assign.h" #include "backend/session/anf_runtime_algorithm.h" #include "debug/anf_ir_dump.h" -#include "debug/anf_ir_utils.h" +#include "debug/dump_proto.h" #include "utils/ms_utils.h" #include "backend/optimizer/common/helper.h" #include "runtime/device/kernel_runtime_manager.h" diff --git a/mindspore/ccsrc/backend/session/gpu_session.cc b/mindspore/ccsrc/backend/session/gpu_session.cc index 037780a8c8..19bf3a2b7c 100644 --- a/mindspore/ccsrc/backend/session/gpu_session.cc +++ b/mindspore/ccsrc/backend/session/gpu_session.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "debug/anf_ir_utils.h" +#include "debug/dump_proto.h" #include "backend/session/gpu_session.h" #include "runtime/device/gpu/kernel_info_setter.h" #include "runtime/device/gpu/gpu_kernel_build.h" diff --git a/mindspore/ccsrc/debug/anf_ir_utils.cc b/mindspore/ccsrc/debug/anf_ir_utils.cc index 42bf1824b7..d4b0f82668 100644 --- a/mindspore/ccsrc/debug/anf_ir_utils.cc +++ b/mindspore/ccsrc/debug/anf_ir_utils.cc @@ -2258,64 +2258,4 @@ std::vector ImportIR(const std::string &filename) { parser.ParseFile(); return parser.GetFuncGraphs(); } - -#ifdef ENABLE_DUMP_IR -void DumpIRProto(const FuncGraphPtr &func_graph, const std::string &suffix) { - if (func_graph == nullptr) { - MS_LOG(ERROR) << "Func graph is nullptr"; - return; - } - auto ms_context = MsContext::GetInstance(); - if (ms_context == nullptr) { - MS_LOG(ERROR) << "ms_context is nullptr"; - return; - } - auto save_graphs_path = ms_context->save_graphs_path(); - if (save_graphs_path.empty()) { - save_graphs_path = "."; - } - std::string file_path = save_graphs_path + "/" + "ms_output_" + suffix + ".pb"; - if (file_path.size() > PATH_MAX) { - MS_LOG(ERROR) << "File path " << file_path << " is too long."; - return; - } - char real_path[PATH_MAX] = {0}; - char *real_path_ret = nullptr; -#if defined(_WIN32) || defined(_WIN64) - real_path_ret = _fullpath(real_path, file_path.c_str(), PATH_MAX); -#else - real_path_ret = realpath(file_path.c_str(), real_path); -#endif - if (nullptr == real_path_ret) { - MS_LOG(DEBUG) << "dir " << file_path << " does not exit."; - } else { - std::string path_string = real_path; - if (chmod(common::SafeCStr(path_string), S_IRUSR | S_IWUSR) == -1) { - MS_LOG(ERROR) << "Modify file:" << real_path << " to rw fail."; - return; - } - } - - // write to pb file - std::ofstream ofs(real_path); - if (!ofs.is_open()) { - MS_LOG(ERROR) << "Open file '" << real_path << "' failed!"; - return; - } - ofs << GetFuncGraphProtoString(func_graph); - ofs.close(); - // set file mode to read only by user - ChangeFileMode(file_path, S_IRUSR); -} -#else -void DumpIRProto(const FuncGraphPtr &, const std::string &) { - static bool already_printed = false; - if (already_printed) { - return; - } - already_printed = true; - MS_LOG(WARNING) << "The functionality of dumping function graph IR in protobuf format is disabled, " - << "please recompile source to enable it. See help of building script."; -} -#endif } // namespace mindspore diff --git a/mindspore/ccsrc/debug/anf_ir_utils.h b/mindspore/ccsrc/debug/anf_ir_utils.h index ed5e3b8a5d..359fdef57b 100644 --- a/mindspore/ccsrc/debug/anf_ir_utils.h +++ b/mindspore/ccsrc/debug/anf_ir_utils.h @@ -112,14 +112,6 @@ void ExportIR(const std::string &filename, const std::string &id, const FuncGrap void ExportIR(const std::string &filename, const std::vector &graphs); std::vector ImportIR(const std::string &filename); - -std::string GetFuncGraphProtoString(const FuncGraphPtr &func_graph); - -void DumpIRProto(const FuncGraphPtr &func_graph, const std::string &suffix); - -std::string GetOnnxProtoString(const FuncGraphPtr &func_graph); - -std::string GetBinaryProtoString(const FuncGraphPtr &func_graph); } // namespace mindspore #endif // MINDSPORE_CCSRC_DEBUG_ANF_IR_UTILS_H_ diff --git a/mindspore/ccsrc/debug/dump_proto.cc b/mindspore/ccsrc/debug/dump_proto.cc index 1f266123ec..cd8a2a982d 100644 --- a/mindspore/ccsrc/debug/dump_proto.cc +++ b/mindspore/ccsrc/debug/dump_proto.cc @@ -13,16 +13,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#include "debug/dump_proto.h" +#include +#include #include #include #include -#include +#include -#include "debug/anf_ir_utils.h" #include "proto/anf_ir.pb.h" #include "ir/graph_utils.h" +#include "utils/ms_context.h" #include "utils/symbolic.h" +#include "utils/utils.h" namespace mindspore { class ProtoExporter { @@ -514,4 +518,64 @@ std::string GetFuncGraphProtoString(const FuncGraphPtr &func_graph) { ProtoExporter exporter; return exporter.GetFuncGraphProtoString(func_graph); } + +#ifdef ENABLE_DUMP_IR +void DumpIRProto(const FuncGraphPtr &func_graph, const std::string &suffix) { + if (func_graph == nullptr) { + MS_LOG(ERROR) << "Func graph is nullptr"; + return; + } + auto ms_context = MsContext::GetInstance(); + if (ms_context == nullptr) { + MS_LOG(ERROR) << "ms_context is nullptr"; + return; + } + auto save_graphs_path = ms_context->save_graphs_path(); + if (save_graphs_path.empty()) { + save_graphs_path = "."; + } + std::string file_path = save_graphs_path + "/" + "ms_output_" + suffix + ".pb"; + if (file_path.size() > PATH_MAX) { + MS_LOG(ERROR) << "File path " << file_path << " is too long."; + return; + } + char real_path[PATH_MAX] = {0}; + char *real_path_ret = nullptr; +#if defined(_WIN32) || defined(_WIN64) + real_path_ret = _fullpath(real_path, file_path.c_str(), PATH_MAX); +#else + real_path_ret = realpath(file_path.c_str(), real_path); +#endif + if (nullptr == real_path_ret) { + MS_LOG(DEBUG) << "dir " << file_path << " does not exit."; + } else { + std::string path_string = real_path; + if (chmod(common::SafeCStr(path_string), S_IRUSR | S_IWUSR) == -1) { + MS_LOG(ERROR) << "Modify file:" << real_path << " to rw fail."; + return; + } + } + + // write to pb file + std::ofstream ofs(real_path); + if (!ofs.is_open()) { + MS_LOG(ERROR) << "Open file '" << real_path << "' failed!"; + return; + } + ofs << GetFuncGraphProtoString(func_graph); + ofs.close(); + // set file mode to read only by user + ChangeFileMode(file_path, S_IRUSR); +} +#else +void DumpIRProto(const FuncGraphPtr &, const std::string &) { + static bool already_printed = false; + if (already_printed) { + return; + } + already_printed = true; + MS_LOG(WARNING) << "The functionality of dumping function graph IR in protobuf format is disabled, " + << "please recompile source to enable it. See help of building script."; +} +#endif } // namespace mindspore diff --git a/mindspore/ccsrc/debug/dump_proto.h b/mindspore/ccsrc/debug/dump_proto.h new file mode 100644 index 0000000000..a464eca9f8 --- /dev/null +++ b/mindspore/ccsrc/debug/dump_proto.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_DEBUG_DUMP_PROTO_H_ +#define MINDSPORE_CCSRC_DEBUG_DUMP_PROTO_H_ + +#include + +#include "ir/func_graph.h" + +namespace mindspore { +std::string GetFuncGraphProtoString(const FuncGraphPtr &func_graph); + +std::string GetOnnxProtoString(const FuncGraphPtr &func_graph); + +std::string GetBinaryProtoString(const FuncGraphPtr &func_graph); + +void DumpIRProto(const FuncGraphPtr &func_graph, const std::string &suffix); +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_DEBUG_DUMP_PROTO_H_ diff --git a/mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc b/mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc index 3e3149a081..68912c0b40 100644 --- a/mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc +++ b/mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc @@ -27,6 +27,8 @@ #include "pybind_api/ir/tensor_py.h" #include "frontend/operator/ops.h" #include "abstract/infer_functions.h" +#include "utils/convert_utils_py.h" + namespace mindspore { namespace abstract { enum State { diff --git a/mindspore/ccsrc/frontend/optimizer/cse.cc b/mindspore/ccsrc/frontend/optimizer/cse.cc index 9e287103cf..350e9fa7ed 100644 --- a/mindspore/ccsrc/frontend/optimizer/cse.cc +++ b/mindspore/ccsrc/frontend/optimizer/cse.cc @@ -17,10 +17,14 @@ */ #include "frontend/optimizer/cse.h" + #include #include #include +#include "abstract/abstract_function.h" +#include "utils/flags.h" + namespace mindspore { /* namespace to support opt */ namespace opt { diff --git a/mindspore/ccsrc/frontend/optimizer/cse.h b/mindspore/ccsrc/frontend/optimizer/cse.h index 55058f60e8..b35004c593 100644 --- a/mindspore/ccsrc/frontend/optimizer/cse.h +++ b/mindspore/ccsrc/frontend/optimizer/cse.h @@ -24,23 +24,16 @@ #include #include "ir/anf.h" #include "ir/manager.h" -#include "frontend/optimizer/optimizer.h" namespace mindspore { /* namespace to support opt */ namespace opt { - // Common subexpression elimination. class CSE { public: - explicit CSE(bool report_changes = true) : report_changes_(report_changes) {} + CSE() = default; virtual ~CSE() = default; - bool operator()(const FuncGraphPtr &root, const OptimizerPtr &optimizer) { - bool chg = Cse(root, optimizer->resource()->manager()); - return chg && report_changes_; - } - virtual bool CheckReplace(const AnfNodePtr &main, const AnfNodePtr &node, bool check_side_effect = true) const; virtual bool CheckRandomEffect(const AnfNodePtr &main, const AnfNodePtr &node) const; @@ -51,7 +44,6 @@ class CSE { bool BuildOrderGroupAndDoReplace(const FuncGraphManagerPtr manager) const; bool DoReplace(const FuncGraphManagerPtr manager, const std::vector &order_group, std::unordered_map> *groups) const; - bool report_changes_; }; BasePtr AbsOf(const AnfNodePtr &node); diff --git a/mindspore/ccsrc/frontend/optimizer/cse_pass.h b/mindspore/ccsrc/frontend/optimizer/cse_pass.h new file mode 100644 index 0000000000..9c2d655205 --- /dev/null +++ b/mindspore/ccsrc/frontend/optimizer/cse_pass.h @@ -0,0 +1,53 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_CSE_PASS_H_ +#define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_CSE_PASS_H_ + +#include +#include +#include + +#include "frontend/optimizer/cse.h" + +#include "frontend/optimizer/optimizer.h" + +namespace mindspore { +/* namespace to support opt */ +namespace opt { + +// Common subexpression elimination. +class CSEPass : public CSE { + public: + explicit CSEPass(bool report_changes = true) : CSE(), report_changes_(report_changes) {} + virtual ~CSEPass() = default; + + bool operator()(const FuncGraphPtr &root, const OptimizerPtr &optimizer) { + bool chg = Cse(root, optimizer->resource()->manager()); + return chg && report_changes_; + } + + private: + bool report_changes_; +}; + +BasePtr AbsOf(const AnfNodePtr &node); +} // namespace opt +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_CSE_PASS_H_ diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc b/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc index 034d006932..b4841a1991 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc +++ b/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc @@ -21,6 +21,7 @@ #include #include "pipeline/jit/parse/python_adapter.h" +#include "utils/convert_utils_py.h" using mindspore::tensor::Tensor; diff --git a/mindspore/ccsrc/pipeline/jit/pass.cc b/mindspore/ccsrc/pipeline/jit/pass.cc index 113545491f..538a8894f5 100644 --- a/mindspore/ccsrc/pipeline/jit/pass.cc +++ b/mindspore/ccsrc/pipeline/jit/pass.cc @@ -28,7 +28,7 @@ #include "pipeline/jit/validator.h" #include "pipeline/jit/remove_value_node_dup.h" #include "frontend/optimizer/optimizer.h" -#include "frontend/optimizer/cse.h" +#include "frontend/optimizer/cse_pass.h" #include "frontend/optimizer/graph_kernel_reuse.h" #include "frontend/optimizer/clean.h" #include "frontend/optimizer/irpass.h" @@ -158,7 +158,7 @@ OptPassGroupMap GetOptPassesA(const opt::irpass::OptimizeIRPassLib &irpass) { {"resolve", resolve_pass}, {"a_after_grad", a_after_grad}, {"renormalize", opt::OptPassConfig::Renormalize()}, - {"cse", opt::OptPassConfig(opt::CSE(false))}, + {"cse", opt::OptPassConfig(opt::CSEPass(false))}, {"a_3", a_3}}); return map_a; @@ -192,7 +192,7 @@ OptPassGroupMap GetOptPassesB(const opt::irpass::OptimizeIRPassLib &irpass) { {"b_1", b_1}, {"b_2", b_2}, {"renormalize", opt::OptPassConfig::Renormalize()}, - {"cse", opt::OptPassConfig(opt::CSE(false))}, + {"cse", opt::OptPassConfig(opt::CSEPass(false))}, }); return map; } @@ -205,7 +205,7 @@ OptPassGroupMap GetOptPassesGraphKernelA(const opt::irpass::OptimizeIRPassLib &i {"graph_kernel_reuse", opt::OptPassConfig(opt::GraphKernelReuse())}, {"interface_fusion", interface_fusion}, {"renormalize", opt::OptPassConfig::Renormalize()}, - {"cse", opt::OptPassConfig(opt::CSE(false))}, + {"cse", opt::OptPassConfig(opt::CSEPass(false))}, }); return map; } diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.cc b/mindspore/ccsrc/pipeline/jit/pipeline.cc index afd59d29ae..1e42d9959b 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline.cc @@ -29,9 +29,11 @@ #include "pipeline/jit/parse/data_converter.h" #include "frontend/optimizer/ad/dfunctor.h" #include "debug/anf_ir_dump.h" +#include "debug/dump_proto.h" #include "debug/anf_ir_utils.h" #include "utils/config_manager.h" #include "utils/convert_utils.h" +#include "utils/convert_utils_py.h" #include "utils/context/context_extends.h" #include "vm/segment_runner.h" #include "frontend/parallel/context.h" diff --git a/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc b/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc index a4254ea152..9c1b7f714d 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc @@ -30,6 +30,7 @@ #include "transform/graph_ir/graph_runner.h" #include "debug/draw.h" #include "abstract/abstract_value.h" +#include "utils/convert_utils_py.h" namespace mindspore { namespace pipeline { diff --git a/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc b/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc index 4c6ed7e718..afe72e7272 100644 --- a/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc +++ b/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc @@ -17,11 +17,13 @@ #include "pipeline/jit/remove_value_node_dup.h" #include "ir/anf.h" +#include "ir/func_graph.h" #include "ir/tensor.h" #include "ir/manager.h" #include "frontend/optimizer/cse.h" #include "utils/log_adapter.h" #include "utils/hashing.h" +#include "utils/convert_utils.h" namespace mindspore { namespace pipeline { diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/prim.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/prim.cc index d7d5aed651..5490221531 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/prim.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/prim.cc @@ -34,6 +34,7 @@ #include "pipeline/jit/resource.h" #include "pipeline/jit/parse/resolve.h" #include "utils/convert_utils.h" +#include "utils/convert_utils_py.h" #include "utils/ms_context.h" #include "pipeline/jit/parse/data_converter.h" #include "abstract/primitive_infer_map.h" diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc index fe1420ad15..0dadfd18c3 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc @@ -30,6 +30,7 @@ #include "utils/ms_context.h" #include "utils/context/context_extends.h" #include "utils/config_manager.h" +#include "utils/convert_utils_py.h" #include "frontend/operator/ops.h" #include "frontend/operator/composite/composite.h" #include "frontend/operator/composite/do_signature.h" diff --git a/mindspore/ccsrc/pybind_api/ir/primitive_py.cc b/mindspore/ccsrc/pybind_api/ir/primitive_py.cc index 1d27f1be59..699a3adebd 100644 --- a/mindspore/ccsrc/pybind_api/ir/primitive_py.cc +++ b/mindspore/ccsrc/pybind_api/ir/primitive_py.cc @@ -21,6 +21,7 @@ #include "pipeline/jit/parse/data_converter.h" #include "pybind11/pytypes.h" #include "utils/convert_utils_base.h" +#include "utils/convert_utils_py.h" #include "utils/primitive_utils.h" #include "utils/base_ref_extends.h" #include "utils/ms_context.h" diff --git a/mindspore/ccsrc/utils/context/context_extends.cc b/mindspore/ccsrc/utils/context/context_extends.cc index ff482c11c5..21d1dabbbf 100644 --- a/mindspore/ccsrc/utils/context/context_extends.cc +++ b/mindspore/ccsrc/utils/context/context_extends.cc @@ -21,6 +21,13 @@ #include #include +#include "pybind11/pybind11.h" + +#include "utils/ms_utils.h" +#include "utils/convert_utils_base.h" + +namespace py = pybind11; + namespace mindspore { namespace context { #ifdef ENABLE_GE diff --git a/mindspore/ccsrc/utils/context/context_extends.h b/mindspore/ccsrc/utils/context/context_extends.h index f425042bec..64408e277b 100644 --- a/mindspore/ccsrc/utils/context/context_extends.h +++ b/mindspore/ccsrc/utils/context/context_extends.h @@ -22,7 +22,6 @@ #include #include "utils/ms_context.h" #include "utils/tensorprint_utils.h" -#include "utils/convert_utils.h" #ifndef NO_DLIB #include "tdt/tsd_client.h" diff --git a/mindspore/ccsrc/utils/convert_utils.cc b/mindspore/ccsrc/utils/convert_utils.cc index 1819f12570..c6ff17ad18 100644 --- a/mindspore/ccsrc/utils/convert_utils.cc +++ b/mindspore/ccsrc/utils/convert_utils.cc @@ -24,211 +24,14 @@ #include #include -#include "pybind11/pybind11.h" #include "abstract/abstract_value.h" -#include "pipeline/jit/parse/parse.h" -#include "pipeline/jit/parse/parse_base.h" #include "ir/value.h" #include "ir/tensor.h" #include "ir/param_info.h" -#include "utils/base_ref_extends.h" #include "utils/ms_context.h" #include "utils/shape_utils.h" namespace mindspore { -py::object BuiltinsToPyData(const Any &value); -py::object BuiltinsToPyData(const BaseRef &value); -py::object VectorToPyData(const Any &value); -py::object VectorRefToPyData(const VectorRef &value); - -py::object ValuePtrToPyData(const ValuePtr &value) { - if (value == nullptr) { - MS_LOG(EXCEPTION) << "value is null"; - } - py::object ret; - if (value->isa()) { - MS_LOG(DEBUG) << "int8"; - py::int_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "int16"; - py::int_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "int32"; - py::int_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "int64"; - py::int_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "uint8"; - py::int_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "uint16"; - py::int_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "uint32"; - py::int_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "uint64"; - py::int_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "bool"; - py::bool_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "double"; - py::float_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "float"; - py::float_ v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "String"; - py::str v = value->cast()->value(); - ret = v; - } else if (value->isa()) { - MS_LOG(DEBUG) << "tensor"; - py::tuple v(1); - v[0] = value->cast(); - ret = v[0]; - } else if (value->isa()) { - MS_LOG(DEBUG) << "MetaTensor"; - py::tuple v(1); - v[0] = value->cast(); - ret = v[0]; - } else if (value->isa()) { - MS_LOG(DEBUG) << "RefKey"; - py::tuple v(1); - v[0] = value->cast(); - ret = v[0]; - } else if (value->isa()) { - MS_LOG(DEBUG) << "tuple"; - auto value_tuple = value->cast()->value(); - py::tuple rets(value_tuple.size()); - - size_t i = 0; - for (auto &v : value_tuple) { - rets[i] = ValuePtrToPyData(v); - i++; - } - ret = rets; - } else if (value->isa()) { - MS_LOG(DEBUG) << "list"; - auto value_list = value->cast()->value(); - py::list rets(value_list.size()); - - size_t i = 0; - for (auto &v : value_list) { - rets[i] = ValuePtrToPyData(v); - i++; - } - ret = rets; - } else if (value->isa()) { - ret = py::ellipsis(); - } else if (value->isa()) { - auto slice = value->cast(); - auto start = ValuePtrToPyData(slice->start()); - auto end = ValuePtrToPyData(slice->stop()); - auto step = ValuePtrToPyData(slice->step()); - ret = parse::python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE, parse::PYTHON_PARSE_CLASS_SLICE, start, end, - step); - } else if (value->isa()) { - py::tuple v(1); - v[0] = value->cast(); - ret = v[0]; - } else if (value->isa()) { - ret = py::none(); - } else if (value->isa()) { - ret = py::none(); - } else { - MS_LOG(INFO) << "Unsupported convert value: " << value->ToString() << " to a PyData."; - } - return ret; -} - -py::object AnyToPyData(const Any &value) { - py::object ret; - MS_LOG(DEBUG) << "AnyToPyData " << value.GetString(); - if (value.is() || value.is() || value.is() || value.is()) { - ret = BuiltinsToPyData(value); - } else if (value.is()) { - MS_LOG(DEBUG) << "ValuePtr"; - ValuePtr v = value.cast(); - ret = ValuePtrToPyData(v); - } else if (value.is()) { - MS_LOG(DEBUG) << "tensor"; - py::tuple v(1); - v[0] = value.cast(); - ret = v[0]; - } else if (value.is()) { - MS_LOG(DEBUG) << "py obj"; - ret = value.cast(); - } else if (value.is>() || value.is>()) { - ret = VectorToPyData(value); - } else if (value.is>()) { - MS_LOG(DEBUG) << "list_any"; - auto value_list = value.cast>(); - py::list rets = py::list(); - for (auto &v : value_list) { - rets.append(AnyToPyData(v)); - } - ret = rets; - } else if (value.is>()) { - auto value_list = value.cast>(); - py::tuple rets(value_list.size()); - for (size_t i = 0; i < value_list.size(); i++) { - rets[i] = AnyToPyData(value_list[i]); - } - ret = rets; - } else if (value.is()) { - py::tuple v(1); - v[0] = value.cast(); - ret = v[0]; - } else { - MS_LOG(EXCEPTION) << "value is not support type"; - } - return ret; -} - -py::object BaseRefToPyData(const BaseRef &value) { - py::object ret; - MS_LOG(DEBUG) << "BaseRefToPyData " << value.ToString(); - if (utils::isa(value) || utils::isa(value) || utils::isa(value) || utils::isa(value)) { - ret = BuiltinsToPyData(value); - } else if (utils::isa(value)) { - MS_LOG(DEBUG) << "ValuePtr"; - ValuePtr v = utils::cast(value); - ret = ValuePtrToPyData(v); - } else if (utils::isa(value)) { - MS_LOG(DEBUG) << "tensor"; - py::tuple v(1); - v[0] = utils::cast(value); - ret = v[0]; - } else if (utils::isa(value)) { - MS_LOG(DEBUG) << "py obj"; - PyObjectRef py_ref = utils::cast(value); - ret = py_ref.object_; - } else if (utils::isa(value)) { - auto vec_ref = utils::cast(value); - ret = VectorRefToPyData(vec_ref); - } else if (utils::isa(value)) { - py::tuple v(1); - v[0] = utils::cast(value); - ret = v[0]; - } else { - MS_LOG(EXCEPTION) << "value is not support type"; - } - return ret; -} - bool ValueToBool(const ValuePtr &v, bool *value) { MS_EXCEPTION_IF_NULL(v); if (v->isa()) { @@ -315,185 +118,6 @@ bool BaseRefToBool(const BaseRef &v, bool *value) { return true; } -py::object BuiltinsToPyData(const Any &value) { - if (value.is()) { - MS_LOG(DEBUG) << "int"; - py::int_ ret = value.cast(); - return std::move(ret); - } else if (value.is()) { - MS_LOG(DEBUG) << "float"; - py::float_ ret = value.cast(); - return std::move(ret); - } else if (value.is()) { - MS_LOG(DEBUG) << "double"; - py::float_ ret = value.cast(); - return std::move(ret); - } else { - MS_LOG(DEBUG) << "bool"; - py::bool_ ret = value.cast(); - return std::move(ret); - } -} - -py::object BuiltinsToPyData(const BaseRef &value) { - if (utils::isa(value)) { - MS_LOG(DEBUG) << "int"; - py::int_ ret = utils::cast(value); - return std::move(ret); - } else if (utils::isa(value)) { - MS_LOG(DEBUG) << "float"; - py::float_ ret = utils::cast(value); - return std::move(ret); - } else if (utils::isa(value)) { - MS_LOG(DEBUG) << "double"; - py::float_ ret = utils::cast(value); - return std::move(ret); - } else { - MS_LOG(DEBUG) << "bool"; - py::bool_ ret = utils::cast(value); - return std::move(ret); - } -} - -py::object VectorToPyData(const Any &value) { - py::object ret; - if (value.is>()) { - MS_LOG(DEBUG) << "vector_tensor"; - std::vector outputs; - outputs = value.cast>(); - py::tuple tensor_tuple(outputs.size()); - for (std::size_t i = 0; i < outputs.size(); ++i) { - tensor_tuple[i] = *outputs[i]; - } - ret = tensor_tuple; - } else { - MS_LOG(DEBUG) << "vector_any"; - auto value_list = value.cast>(); - py::tuple any_tuple = py::tuple(value_list.size()); - size_t i = 0; - for (auto &v : value_list) { - any_tuple[i] = AnyToPyData(v); - i++; - } - ret = any_tuple; - } - return ret; -} - -py::object VectorRefToPyData(const VectorRef &value_list) { - py::object ret; - MS_LOG(DEBUG) << "vector_ref"; - size_t value_size = value_list.size(); - auto ref_tuple = py::tuple(value_size); - for (size_t i = 0; i < value_size; i++) { - ref_tuple[i] = BaseRefToPyData(value_list[i]); - } - ret = ref_tuple; - return ret; -} - -AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py::object &type_obj, - const py::object &min_shape, const py::object &max_shape) { - if ((py::isinstance(shape_obj) || py::isinstance(shape_obj)) && py::isinstance(type_obj)) { - auto ret_vec = shape_obj.cast(); - auto ret_dtype = type_obj.cast(); - MS_EXCEPTION_IF_NULL(ret_dtype); - // if the size of shape list is empty, return an scalar abstract - if (ret_vec.empty() && (!ret_dtype->isa())) { - abstract::AbstractScalarPtr abs_scalar = std::make_shared(kAnyValue, ret_dtype); - return abs_scalar; - } - AbstractBasePtr tensor = nullptr; - ShapeVector min_shape_vec; - ShapeVector max_shape_vec; - if (!min_shape.is_none()) { - min_shape_vec = min_shape.cast(); - } - if (!max_shape.is_none()) { - max_shape_vec = max_shape.cast(); - } - auto ret_shape = std::make_shared(ret_vec, min_shape_vec, max_shape_vec); - if (ret_dtype->isa()) { - auto tensor_type = type_obj.cast(); - MS_EXCEPTION_IF_NULL(tensor_type); - auto element = std::make_shared(kAnyValue, tensor_type->element()); - tensor = std::make_shared(element, ret_shape); - } else { - auto element = std::make_shared(kAnyValue, ret_dtype); - tensor = std::make_shared(element, ret_shape); - } - return tensor; - } else if (py::isinstance(shape_obj) && py::isinstance(type_obj)) { - py::tuple shape_tuple = shape_obj.cast(); - py::tuple typeid_tuple = type_obj.cast(); - AbstractBasePtrList ptr_list; - for (size_t it = 0; it < shape_tuple.size(); ++it) { - auto tensor_it = PyListDtype2AbstractTensor(shape_tuple[it], typeid_tuple[it]); - ptr_list.push_back(tensor_it); - } - auto tuple = std::make_shared(ptr_list); - return tuple; - } else if (shape_obj.is_none() && type_obj.is_none()) { - // AbstractNone indicates there is no output for this CNode node. - auto abstract_none = std::make_shared(); - return abstract_none; - } else { - // When sparse enabled, the undetermined might be raised and eliminated in opt passes - auto context = MsContext::GetInstance(); - MS_EXCEPTION_IF_NULL(context); - bool enable_sparse = context->enable_sparse(); - if (enable_sparse) { - return std::make_shared(); - } - MS_LOG(EXCEPTION) << "Python evaluator return invalid shape or type. " << (std::string)py::str(type_obj); - } -} -bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args, - const std::shared_ptr &ret_val) { - if (output->isa()) { - MS_LOG(INFO) << "Graph's output is a constant. No need to execute."; - ValuePtr value = GetValueNode(output); - *ret_val = ValuePtrToPyData(value); - return true; - } - - // Adapter will transform values in __init__() and construct() to parameters, this could cause - // inputs (a.k.a args in current function) size less than parameters'. - if (output->isa()) { - MS_LOG(INFO) << "Graph's output is a parameter. If all params are inputs, no need to execute."; - // Find the right parameter as ret_val. - auto func_graph = output->func_graph(); - MS_EXCEPTION_IF_NULL(func_graph); - auto params = func_graph->parameters(); - if ((args.size() + func_graph->hyper_param_count()) != params.size()) { - MS_LOG(EXCEPTION) << "Input size " << args.size() << " add Parameter count " << func_graph->hyper_param_count() - << " not equal to graph input size " << params.size() << ", let graph to be executed."; - } - - auto it = std::find(params.begin(), params.end(), output); - if (it == params.end()) { - MS_EXCEPTION(UnknownError) << "When graph output is Parameter, it should be found in graph parameters"; - } - size_t index = it - params.cbegin(); - if (index >= args.size() + func_graph->hyper_param_count()) { - MS_EXCEPTION(UnknownError) << "Index " << index << " equal or larger than args size " << args.size() - << " add Parameter count " << func_graph->hyper_param_count() << "."; - } - if (index < args.size()) { - *ret_val = args[index]; - } else { - auto param = dyn_cast(params[index]); - MS_EXCEPTION_IF_NULL(param); - if (!param->has_default()) { - MS_LOG(EXCEPTION) << "Can not determine value of Parameter " << index << " (" << param->name() << ")"; - } - auto tensor = param->default_param(); - *ret_val = py::cast(tensor); - } - return true; - } - return false; -} namespace { // Isomorphism bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph, diff --git a/mindspore/ccsrc/utils/convert_utils.h b/mindspore/ccsrc/utils/convert_utils.h index 5597ae4d5e..1628d78a4c 100644 --- a/mindspore/ccsrc/utils/convert_utils.h +++ b/mindspore/ccsrc/utils/convert_utils.h @@ -25,14 +25,12 @@ #include #include -#include "pybind11/pybind11.h" #include "utils/convert_utils_base.h" #include "utils/any.h" #include "base/base_ref.h" #include "base/base.h" #include "ir/anf.h" - -namespace py = pybind11; +#include "ir/func_graph.h" namespace mindspore { namespace tensor { @@ -40,19 +38,9 @@ class Tensor; using TensorPtr = std::shared_ptr; } // namespace tensor -py::object AnyToPyData(const Any &value); -py::object BaseRefToPyData(const BaseRef &value); bool BaseRefToBool(const BaseRef &in, bool *out); bool BaseRefToInt(const ValuePtr &v, int *value); bool ValueToBool(const ValuePtr &in, bool *out); -py::object ValuePtrToPyData(const ValuePtr &value); - -AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py::object &type_obj, - const py::object &min_shape = py::none(), - const py::object &max_shape = py::none()); - -bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args, - const std::shared_ptr &ret_val); // Isomorphism struct PairHasher { diff --git a/mindspore/ccsrc/utils/convert_utils_py.cc b/mindspore/ccsrc/utils/convert_utils_py.cc new file mode 100644 index 0000000000..5fd7b2b8a9 --- /dev/null +++ b/mindspore/ccsrc/utils/convert_utils_py.cc @@ -0,0 +1,409 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "utils/convert_utils_py.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "abstract/abstract_value.h" +#include "pipeline/jit/parse/parse.h" +#include "pipeline/jit/parse/parse_base.h" +#include "ir/value.h" +#include "ir/tensor.h" +#include "ir/param_info.h" +#include "pybind_api/ir/base_ref_py.h" +#include "utils/ms_context.h" + +namespace mindspore { +py::object BuiltinsToPyData(const Any &value); +py::object BuiltinsToPyData(const BaseRef &value); +py::object VectorToPyData(const Any &value); +py::object VectorRefToPyData(const VectorRef &value); + +py::object ValuePtrToPyData(const ValuePtr &value) { + if (value == nullptr) { + MS_LOG(EXCEPTION) << "value is null"; + } + py::object ret; + if (value->isa()) { + MS_LOG(DEBUG) << "int8"; + py::int_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "int16"; + py::int_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "int32"; + py::int_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "int64"; + py::int_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "uint8"; + py::int_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "uint16"; + py::int_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "uint32"; + py::int_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "uint64"; + py::int_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "bool"; + py::bool_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "double"; + py::float_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "float"; + py::float_ v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "String"; + py::str v = value->cast()->value(); + ret = v; + } else if (value->isa()) { + MS_LOG(DEBUG) << "tensor"; + py::tuple v(1); + v[0] = value->cast(); + ret = v[0]; + } else if (value->isa()) { + MS_LOG(DEBUG) << "MetaTensor"; + py::tuple v(1); + v[0] = value->cast(); + ret = v[0]; + } else if (value->isa()) { + MS_LOG(DEBUG) << "RefKey"; + py::tuple v(1); + v[0] = value->cast(); + ret = v[0]; + } else if (value->isa()) { + MS_LOG(DEBUG) << "tuple"; + auto value_tuple = value->cast()->value(); + py::tuple rets(value_tuple.size()); + + size_t i = 0; + for (auto &v : value_tuple) { + rets[i] = ValuePtrToPyData(v); + i++; + } + ret = rets; + } else if (value->isa()) { + MS_LOG(DEBUG) << "list"; + auto value_list = value->cast()->value(); + py::list rets(value_list.size()); + + size_t i = 0; + for (auto &v : value_list) { + rets[i] = ValuePtrToPyData(v); + i++; + } + ret = rets; + } else if (value->isa()) { + ret = py::ellipsis(); + } else if (value->isa()) { + auto slice = value->cast(); + auto start = ValuePtrToPyData(slice->start()); + auto end = ValuePtrToPyData(slice->stop()); + auto step = ValuePtrToPyData(slice->step()); + ret = parse::python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE, parse::PYTHON_PARSE_CLASS_SLICE, start, end, + step); + } else if (value->isa()) { + py::tuple v(1); + v[0] = value->cast(); + ret = v[0]; + } else if (value->isa()) { + ret = py::none(); + } else if (value->isa()) { + ret = py::none(); + } else { + MS_LOG(INFO) << "Unsupported convert value: " << value->ToString() << " to a PyData."; + } + return ret; +} + +py::object AnyToPyData(const Any &value) { + py::object ret; + MS_LOG(DEBUG) << "AnyToPyData " << value.GetString(); + if (value.is() || value.is() || value.is() || value.is()) { + ret = BuiltinsToPyData(value); + } else if (value.is()) { + MS_LOG(DEBUG) << "ValuePtr"; + ValuePtr v = value.cast(); + ret = ValuePtrToPyData(v); + } else if (value.is()) { + MS_LOG(DEBUG) << "tensor"; + py::tuple v(1); + v[0] = value.cast(); + ret = v[0]; + } else if (value.is()) { + MS_LOG(DEBUG) << "py obj"; + ret = value.cast(); + } else if (value.is>() || value.is>()) { + ret = VectorToPyData(value); + } else if (value.is>()) { + MS_LOG(DEBUG) << "list_any"; + auto value_list = value.cast>(); + py::list rets = py::list(); + for (auto &v : value_list) { + rets.append(AnyToPyData(v)); + } + ret = rets; + } else if (value.is>()) { + auto value_list = value.cast>(); + py::tuple rets(value_list.size()); + for (size_t i = 0; i < value_list.size(); i++) { + rets[i] = AnyToPyData(value_list[i]); + } + ret = rets; + } else if (value.is()) { + py::tuple v(1); + v[0] = value.cast(); + ret = v[0]; + } else { + MS_LOG(EXCEPTION) << "value is not support type"; + } + return ret; +} + +py::object BaseRefToPyData(const BaseRef &value) { + py::object ret; + MS_LOG(DEBUG) << "BaseRefToPyData " << value.ToString(); + if (utils::isa(value) || utils::isa(value) || utils::isa(value) || utils::isa(value)) { + ret = BuiltinsToPyData(value); + } else if (utils::isa(value)) { + MS_LOG(DEBUG) << "ValuePtr"; + ValuePtr v = utils::cast(value); + ret = ValuePtrToPyData(v); + } else if (utils::isa(value)) { + MS_LOG(DEBUG) << "tensor"; + py::tuple v(1); + v[0] = utils::cast(value); + ret = v[0]; + } else if (utils::isa(value)) { + MS_LOG(DEBUG) << "py obj"; + PyObjectRef py_ref = utils::cast(value); + ret = py_ref.object_; + } else if (utils::isa(value)) { + auto vec_ref = utils::cast(value); + ret = VectorRefToPyData(vec_ref); + } else if (utils::isa(value)) { + py::tuple v(1); + v[0] = utils::cast(value); + ret = v[0]; + } else { + MS_LOG(EXCEPTION) << "value is not support type"; + } + return ret; +} + +py::object BuiltinsToPyData(const Any &value) { + if (value.is()) { + MS_LOG(DEBUG) << "int"; + py::int_ ret = value.cast(); + return std::move(ret); + } else if (value.is()) { + MS_LOG(DEBUG) << "float"; + py::float_ ret = value.cast(); + return std::move(ret); + } else if (value.is()) { + MS_LOG(DEBUG) << "double"; + py::float_ ret = value.cast(); + return std::move(ret); + } else { + MS_LOG(DEBUG) << "bool"; + py::bool_ ret = value.cast(); + return std::move(ret); + } +} + +py::object BuiltinsToPyData(const BaseRef &value) { + if (utils::isa(value)) { + MS_LOG(DEBUG) << "int"; + py::int_ ret = utils::cast(value); + return std::move(ret); + } else if (utils::isa(value)) { + MS_LOG(DEBUG) << "float"; + py::float_ ret = utils::cast(value); + return std::move(ret); + } else if (utils::isa(value)) { + MS_LOG(DEBUG) << "double"; + py::float_ ret = utils::cast(value); + return std::move(ret); + } else { + MS_LOG(DEBUG) << "bool"; + py::bool_ ret = utils::cast(value); + return std::move(ret); + } +} + +py::object VectorToPyData(const Any &value) { + py::object ret; + if (value.is>()) { + MS_LOG(DEBUG) << "vector_tensor"; + std::vector outputs; + outputs = value.cast>(); + py::tuple tensor_tuple(outputs.size()); + for (std::size_t i = 0; i < outputs.size(); ++i) { + tensor_tuple[i] = *outputs[i]; + } + ret = tensor_tuple; + } else { + MS_LOG(DEBUG) << "vector_any"; + auto value_list = value.cast>(); + py::tuple any_tuple = py::tuple(value_list.size()); + size_t i = 0; + for (auto &v : value_list) { + any_tuple[i] = AnyToPyData(v); + i++; + } + ret = any_tuple; + } + return ret; +} + +py::object VectorRefToPyData(const VectorRef &value_list) { + py::object ret; + MS_LOG(DEBUG) << "vector_ref"; + size_t value_size = value_list.size(); + auto ref_tuple = py::tuple(value_size); + for (size_t i = 0; i < value_size; i++) { + ref_tuple[i] = BaseRefToPyData(value_list[i]); + } + ret = ref_tuple; + return ret; +} + +AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py::object &type_obj, + const py::object &min_shape, const py::object &max_shape) { + if ((py::isinstance(shape_obj) || py::isinstance(shape_obj)) && py::isinstance(type_obj)) { + auto ret_vec = shape_obj.cast(); + auto ret_dtype = type_obj.cast(); + MS_EXCEPTION_IF_NULL(ret_dtype); + // if the size of shape list is empty, return an scalar abstract + if (ret_vec.empty() && (!ret_dtype->isa())) { + abstract::AbstractScalarPtr abs_scalar = std::make_shared(kAnyValue, ret_dtype); + return abs_scalar; + } + AbstractBasePtr tensor = nullptr; + ShapeVector min_shape_vec; + ShapeVector max_shape_vec; + if (!min_shape.is_none()) { + min_shape_vec = min_shape.cast(); + } + if (!max_shape.is_none()) { + max_shape_vec = max_shape.cast(); + } + auto ret_shape = std::make_shared(ret_vec, min_shape_vec, max_shape_vec); + if (ret_dtype->isa()) { + auto tensor_type = type_obj.cast(); + MS_EXCEPTION_IF_NULL(tensor_type); + auto element = std::make_shared(kAnyValue, tensor_type->element()); + tensor = std::make_shared(element, ret_shape); + } else { + auto element = std::make_shared(kAnyValue, ret_dtype); + tensor = std::make_shared(element, ret_shape); + } + return tensor; + } else if (py::isinstance(shape_obj) && py::isinstance(type_obj)) { + py::tuple shape_tuple = shape_obj.cast(); + py::tuple typeid_tuple = type_obj.cast(); + AbstractBasePtrList ptr_list; + for (size_t it = 0; it < shape_tuple.size(); ++it) { + auto tensor_it = PyListDtype2AbstractTensor(shape_tuple[it], typeid_tuple[it]); + ptr_list.push_back(tensor_it); + } + auto tuple = std::make_shared(ptr_list); + return tuple; + } else if (shape_obj.is_none() && type_obj.is_none()) { + // AbstractNone indicates there is no output for this CNode node. + auto abstract_none = std::make_shared(); + return abstract_none; + } else { + // When sparse enabled, the undetermined might be raised and eliminated in opt passes + auto context = MsContext::GetInstance(); + MS_EXCEPTION_IF_NULL(context); + bool enable_sparse = context->enable_sparse(); + if (enable_sparse) { + return std::make_shared(); + } + MS_LOG(EXCEPTION) << "Python evaluator return invalid shape or type. " << (std::string)py::str(type_obj); + } +} +bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args, + const std::shared_ptr &ret_val) { + if (output->isa()) { + MS_LOG(INFO) << "Graph's output is a constant. No need to execute."; + ValuePtr value = GetValueNode(output); + *ret_val = ValuePtrToPyData(value); + return true; + } + + // Adapter will transform values in __init__() and construct() to parameters, this could cause + // inputs (a.k.a args in current function) size less than parameters'. + if (output->isa()) { + MS_LOG(INFO) << "Graph's output is a parameter. If all params are inputs, no need to execute."; + // Find the right parameter as ret_val. + auto func_graph = output->func_graph(); + MS_EXCEPTION_IF_NULL(func_graph); + auto params = func_graph->parameters(); + if ((args.size() + func_graph->hyper_param_count()) != params.size()) { + MS_LOG(EXCEPTION) << "Input size " << args.size() << " add Parameter count " << func_graph->hyper_param_count() + << " not equal to graph input size " << params.size() << ", let graph to be executed."; + } + + auto it = std::find(params.begin(), params.end(), output); + if (it == params.end()) { + MS_EXCEPTION(UnknownError) << "When graph output is Parameter, it should be found in graph parameters"; + } + size_t index = it - params.cbegin(); + if (index >= args.size() + func_graph->hyper_param_count()) { + MS_EXCEPTION(UnknownError) << "Index " << index << " equal or larger than args size " << args.size() + << " add Parameter count " << func_graph->hyper_param_count() << "."; + } + if (index < args.size()) { + *ret_val = args[index]; + } else { + auto param = dyn_cast(params[index]); + MS_EXCEPTION_IF_NULL(param); + if (!param->has_default()) { + MS_LOG(EXCEPTION) << "Can not determine value of Parameter " << index << " (" << param->name() << ")"; + } + auto tensor = param->default_param(); + *ret_val = py::cast(tensor); + } + return true; + } + return false; +} +} // namespace mindspore diff --git a/mindspore/ccsrc/utils/convert_utils_py.h b/mindspore/ccsrc/utils/convert_utils_py.h new file mode 100644 index 0000000000..069154ea4a --- /dev/null +++ b/mindspore/ccsrc/utils/convert_utils_py.h @@ -0,0 +1,43 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_UTILS_CONVERT_UTILS_PY_H_ +#define MINDSPORE_CCSRC_UTILS_CONVERT_UTILS_PY_H_ + +#include + +#include "pybind11/pybind11.h" +#include "utils/convert_utils_base.h" +#include "utils/any.h" +#include "utils/base_ref_extends.h" +#include "ir/anf.h" + +namespace py = pybind11; + +namespace mindspore { +py::object AnyToPyData(const Any &value); +py::object BaseRefToPyData(const BaseRef &value); +py::object ValuePtrToPyData(const ValuePtr &value); + +bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args, + const std::shared_ptr &ret_val); + +AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py::object &type_obj, + const py::object &min_shape = py::none(), + const py::object &max_shape = py::none()); +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_UTILS_CONVERT_UTILS_PY_H_ diff --git a/mindspore/ccsrc/utils/primitive_utils.cc b/mindspore/ccsrc/utils/primitive_utils.cc index 8b6b06bfcb..658b2f8a23 100644 --- a/mindspore/ccsrc/utils/primitive_utils.cc +++ b/mindspore/ccsrc/utils/primitive_utils.cc @@ -22,6 +22,7 @@ #include "utils/log_adapter.h" #include "utils/ms_utils.h" #include "utils/base_ref_extends.h" +#include "utils/convert_utils_py.h" namespace mindspore { py::function GetBpropFunctionByObj(py::object obj) { diff --git a/mindspore/ccsrc/vm/vmimpl.h b/mindspore/ccsrc/vm/vmimpl.h index 2cc94bc062..12fc9bc916 100644 --- a/mindspore/ccsrc/vm/vmimpl.h +++ b/mindspore/ccsrc/vm/vmimpl.h @@ -24,7 +24,6 @@ #include #include -#include "utils/base_ref_extends.h" #include "ir/anf.h" #include "ir/manager.h" #include "ir/tensor.h" diff --git a/tests/ut/cpp/optimizer/optimizer_test.cc b/tests/ut/cpp/optimizer/optimizer_test.cc index c5c99531e4..6e9d04f6cf 100644 --- a/tests/ut/cpp/optimizer/optimizer_test.cc +++ b/tests/ut/cpp/optimizer/optimizer_test.cc @@ -21,7 +21,7 @@ #include "ir/anf.h" #include "frontend/operator/ops.h" -#include "frontend/optimizer/cse.h" +#include "frontend/optimizer/cse_pass.h" #include "frontend/optimizer/optimizer.h" #include "frontend/optimizer/irpass.h" #include "debug/draw.h" @@ -53,7 +53,7 @@ TEST_F(TestOptOptimizer, test_step_opt) { irpass.inline_, }}, {"grad", {irpass.expand_jprim_}}, - {"cse", OptPassConfig(CSE(false))}}, + {"cse", OptPassConfig(CSEPass(false))}}, true); EXPECT_TRUE(optimizer.get() != nullptr); diff --git a/tests/ut/cpp/parallel/step_parallel_test.cc b/tests/ut/cpp/parallel/step_parallel_test.cc index 80b8f6be0c..0c1ff2e84f 100644 --- a/tests/ut/cpp/parallel/step_parallel_test.cc +++ b/tests/ut/cpp/parallel/step_parallel_test.cc @@ -20,6 +20,7 @@ #include "debug/draw.h" #include "frontend/operator/ops.h" #include "pipeline/jit/static_analysis/static_analysis.h" +#include "utils/convert_utils_py.h" namespace mindspore { namespace parallel { diff --git a/tests/ut/cpp/stub/anf_ir/dump_proto_stub.cc b/tests/ut/cpp/stub/anf_ir/dump_proto_stub.cc index 45b2f422ea..cadedbaeae 100644 --- a/tests/ut/cpp/stub/anf_ir/dump_proto_stub.cc +++ b/tests/ut/cpp/stub/anf_ir/dump_proto_stub.cc @@ -13,10 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "debug/anf_ir_utils.h" +#include "debug/dump_proto.h" namespace mindspore { +void DumpIRProto(const FuncGraphPtr &func_graph, const std::string &suffix) { return; } + std::string GetFuncGraphProtoString(const FuncGraphPtr &func_graph) { return ""; } std::string GetOnnxProtoString(const FuncGraphPtr &func_graph) { return ""; } diff --git a/tests/ut/cpp/vm/segment_runner_test.cc b/tests/ut/cpp/vm/segment_runner_test.cc index 22d3e6857d..fb4a13d181 100644 --- a/tests/ut/cpp/vm/segment_runner_test.cc +++ b/tests/ut/cpp/vm/segment_runner_test.cc @@ -29,6 +29,7 @@ #include "vm/transform.h" #include "ir/tensor.h" #include "utils/convert_utils.h" +#include "utils/convert_utils_py.h" #include "utils/log_adapter.h" namespace mindspore {