| @@ -666,6 +666,5 @@ REGISTER_FRONTENT_PRIMITIVE_EVAL_IMPL(J, prim::kPrimJ, InferImplJ); | |||||
| REGISTER_FRONTENT_PRIMITIVE_EVAL_IMPL(BroadcastGradientArgs, prim::kPrimBroadcastGradientArgs, | REGISTER_FRONTENT_PRIMITIVE_EVAL_IMPL(BroadcastGradientArgs, prim::kPrimBroadcastGradientArgs, | ||||
| InferImplBroadcastGradientArgs); | InferImplBroadcastGradientArgs); | ||||
| REGISTER_PRIMITIVE_EVAL_IMPL(Assign, prim::kPrimAssign, InferImplAssign); | REGISTER_PRIMITIVE_EVAL_IMPL(Assign, prim::kPrimAssign, InferImplAssign); | ||||
| } // namespace abstract | } // namespace abstract | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -30,7 +30,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| /* namespace to support opt */ | /* namespace to support opt */ | ||||
| namespace opt { | namespace opt { | ||||
| // Common subexpression elimination. | // Common subexpression elimination. | ||||
| class CSEPass : public CSE { | class CSEPass : public CSE { | ||||
| public: | public: | ||||
| @@ -27,7 +27,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace opt { | namespace opt { | ||||
| bool CNodeHasTupleInput(const CNodePtr &cnode); | bool CNodeHasTupleInput(const CNodePtr &cnode); | ||||
| bool FuncGraphHasTupleInput(const FuncGraphPtr &fg); | bool FuncGraphHasTupleInput(const FuncGraphPtr &fg); | ||||
| std::vector<AnfNodePtr> TransformTupleArgument(const FuncGraphPtr &fg, const AnfNodePtr &node, | std::vector<AnfNodePtr> TransformTupleArgument(const FuncGraphPtr &fg, const AnfNodePtr &node, | ||||
| @@ -102,7 +101,6 @@ class GraphTupleParamTransform { | |||||
| } | } | ||||
| std::unordered_map<FuncGraphPtr, FuncGraphPtr> cache_; | std::unordered_map<FuncGraphPtr, FuncGraphPtr> cache_; | ||||
| }; | }; | ||||
| } // namespace opt | } // namespace opt | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| #endif // MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_GRAPH_TRANSFORM_H | #endif // MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_GRAPH_TRANSFORM_H | ||||
| @@ -145,7 +145,7 @@ AnfNodePtr MatchResult::get_node(const PatternPtr &pattern) { | |||||
| } | } | ||||
| void MatchResult::merge(const MatchResultPtr &other_result) { | void MatchResult::merge(const MatchResultPtr &other_result) { | ||||
| auto other_result_map = other_result->_result(); | |||||
| auto other_result_map = other_result->result(); | |||||
| // add/update entries in other_result | // add/update entries in other_result | ||||
| for (auto &iter : other_result_map) { | for (auto &iter : other_result_map) { | ||||
| match_result_[iter.first] = iter.second; | match_result_[iter.first] = iter.second; | ||||
| @@ -259,7 +259,7 @@ class MatchResult { | |||||
| MatchResult() {} | MatchResult() {} | ||||
| ~MatchResult() = default; | ~MatchResult() = default; | ||||
| void add_entry(PatternPtr pattern, AnfNodePtr node) { match_result_[pattern] = node; } | void add_entry(PatternPtr pattern, AnfNodePtr node) { match_result_[pattern] = node; } | ||||
| PatternNodeMap &_result() { return match_result_; } | |||||
| PatternNodeMap &result() { return match_result_; } | |||||
| AnfNodePtr get_node(const PatternPtr &pattern); | AnfNodePtr get_node(const PatternPtr &pattern); | ||||
| void merge(const MatchResultPtr &other_result); | void merge(const MatchResultPtr &other_result); | ||||
| void clear() { match_result_.clear(); } | void clear() { match_result_.clear(); } | ||||
| @@ -243,7 +243,6 @@ void Reset(PatternPtr pattern) { | |||||
| } | } | ||||
| return; | return; | ||||
| } | } | ||||
| } // namespace internal | } // namespace internal | ||||
| AnfNodePtr PythonPass::Run(const FuncGraphPtr &func_graph, const FuncGraphPtr &top_graph, const AnfNodePtr &node, | AnfNodePtr PythonPass::Run(const FuncGraphPtr &func_graph, const FuncGraphPtr &top_graph, const AnfNodePtr &node, | ||||
| @@ -150,7 +150,6 @@ FuncGraphPtr ParsePythonCode(const py::object &obj, | |||||
| const std::string &python_mod_get_parse_method = PYTHON_MOD_GET_PARSE_METHOD); | const std::string &python_mod_get_parse_method = PYTHON_MOD_GET_PARSE_METHOD); | ||||
| // add wrap for cell top graph. | // add wrap for cell top graph. | ||||
| FuncGraphPtr MakeTopGraph(const py::object &cell, const ValuePtr &cell_ptr); | FuncGraphPtr MakeTopGraph(const py::object &cell, const ValuePtr &cell_ptr); | ||||
| } // namespace parse | } // namespace parse | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -28,7 +28,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace pipeline { | namespace pipeline { | ||||
| class ResourceBase { | class ResourceBase { | ||||
| public: | public: | ||||
| ResourceBase() { manager_ = MakeManager(); } | ResourceBase() { manager_ = MakeManager(); } | ||||
| @@ -59,7 +58,6 @@ class ResourceBase { | |||||
| }; | }; | ||||
| using ResourceBasePtr = std::shared_ptr<pipeline::ResourceBase>; | using ResourceBasePtr = std::shared_ptr<pipeline::ResourceBase>; | ||||
| } // namespace pipeline | } // namespace pipeline | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -32,13 +32,11 @@ namespace py = pybind11; | |||||
| // mindspore namespace is the top level namespace of Mindsporeession project. | // mindspore namespace is the top level namespace of Mindsporeession project. | ||||
| // Other namespace should be a sub namespace of mindspore namespace in the ME project. | // Other namespace should be a sub namespace of mindspore namespace in the ME project. | ||||
| namespace mindspore { | namespace mindspore { | ||||
| // Cell python wrapper and adapter class. | // Cell python wrapper and adapter class. | ||||
| class CellPy { | class CellPy { | ||||
| public: | public: | ||||
| static void AddAttr(CellPtr cell, const std::string &name, const py::object &obj); | static void AddAttr(CellPtr cell, const std::string &name, const py::object &obj); | ||||
| }; | }; | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| #endif // MINDSPORE_CCSRC_UTILS_CELL_PY_H_ | #endif // MINDSPORE_CCSRC_UTILS_CELL_PY_H_ | ||||
| @@ -19,6 +19,7 @@ | |||||
| #include <vector> | #include <vector> | ||||
| #include <sstream> | #include <sstream> | ||||
| #include <string> | #include <string> | ||||
| #include <utility> | |||||
| #include "pybind_api/api_register.h" | #include "pybind_api/api_register.h" | ||||
| #include "abstract/abstract_value.h" | #include "abstract/abstract_value.h" | ||||
| @@ -30,7 +30,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace lite { | namespace lite { | ||||
| const char WHITESPACE[] = "\t\n\v\f\r "; | const char WHITESPACE[] = "\t\n\v\f\r "; | ||||
| const int FLAG_PREFIX_LEN = 2; | const int FLAG_PREFIX_LEN = 2; | ||||
| @@ -64,6 +64,5 @@ class Cell : public Named { | |||||
| }; | }; | ||||
| using CellPtr = std::shared_ptr<Cell>; | using CellPtr = std::shared_ptr<Cell>; | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| #endif // MINDSPORE_CCSRC_IR_CELL_H_ | #endif // MINDSPORE_CCSRC_IR_CELL_H_ | ||||
| @@ -206,5 +206,4 @@ std::ostream &operator<<(std::ostream &os, const std::shared_ptr<Problem> proble | |||||
| const TypePtr kTensorTypeFP16 = std::make_shared<TensorType>(std::make_shared<Float>(16)); | const TypePtr kTensorTypeFP16 = std::make_shared<TensorType>(std::make_shared<Float>(16)); | ||||
| const TypePtr kTensorTypeFP32 = std::make_shared<TensorType>(std::make_shared<Float>(32)); | const TypePtr kTensorTypeFP32 = std::make_shared<TensorType>(std::make_shared<Float>(32)); | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -264,7 +264,6 @@ extern const TypePtr kKeyword; | |||||
| extern const TypePtr kTensorType; | extern const TypePtr kTensorType; | ||||
| extern const TypePtr kTensorTypeFP16; | extern const TypePtr kTensorTypeFP16; | ||||
| extern const TypePtr kTensorTypeFP32; | extern const TypePtr kTensorTypeFP32; | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| #endif // MINDSPORE_CORE_IR_DTYPE_H_ | #endif // MINDSPORE_CORE_IR_DTYPE_H_ | ||||
| @@ -21,7 +21,6 @@ | |||||
| #include "utils/log_adapter.h" | #include "utils/log_adapter.h" | ||||
| namespace mindspore { | namespace mindspore { | ||||
| TypePtr UndeterminedType::DeepCopy() const { | TypePtr UndeterminedType::DeepCopy() const { | ||||
| MS_EXCEPTION_IF_NULL(element_type_); | MS_EXCEPTION_IF_NULL(element_type_); | ||||
| if (IsGeneric()) { | if (IsGeneric()) { | ||||
| @@ -190,5 +189,4 @@ bool SparseTensorType::operator==(const Type &other) const { | |||||
| } | } | ||||
| return *element_type_ == *other_elem_type; | return *element_type_ == *other_elem_type; | ||||
| } | } | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -34,7 +34,6 @@ | |||||
| #include "ir/dtype/type.h" | #include "ir/dtype/type.h" | ||||
| namespace mindspore { | namespace mindspore { | ||||
| class UndeterminedType : public Object { | class UndeterminedType : public Object { | ||||
| public: | public: | ||||
| UndeterminedType() : Object(kObjectTypeUndeterminedType) {} | UndeterminedType() : Object(kObjectTypeUndeterminedType) {} | ||||
| @@ -126,7 +125,6 @@ class SparseTensorType : public Object { | |||||
| TypePtr element_type_; | TypePtr element_type_; | ||||
| }; | }; | ||||
| using SparseTensorTypePtr = std::shared_ptr<SparseTensorType>; | using SparseTensorTypePtr = std::shared_ptr<SparseTensorType>; | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| #endif // MINDSPORE_CORE_IR_DTYPE_TENSORTYPE_H_ | #endif // MINDSPORE_CORE_IR_DTYPE_TENSORTYPE_H_ | ||||
| @@ -641,6 +641,7 @@ class PConstant : public PBase<PConstant<T> > { | |||||
| } | } | ||||
| int ret = 0; | int ret = 0; | ||||
| char *source_data = reinterpret_cast<char *>(GetPointerToTensorData(x)); | char *source_data = reinterpret_cast<char *>(GetPointerToTensorData(x)); | ||||
| MS_EXCEPTION_IF_NULL(source_data); | |||||
| if (x_tensor_ptr->DataSize() == 1) { | if (x_tensor_ptr->DataSize() == 1) { | ||||
| for (int i = 0; i < new_tensor_ptr->ElementsNum(); i++) { | for (int i = 0; i < new_tensor_ptr->ElementsNum(); i++) { | ||||
| ret = memcpy_s(data + i * GetTypeByte(tensor_type_ptr), GetTypeByte(tensor_type_ptr), source_data, | ret = memcpy_s(data + i * GetTypeByte(tensor_type_ptr), GetTypeByte(tensor_type_ptr), source_data, | ||||
| @@ -115,7 +115,7 @@ class Primitive : public Named { | |||||
| void set_const_input_indexes(const std::vector<size_t> &const_input_indexes) { | void set_const_input_indexes(const std::vector<size_t> &const_input_indexes) { | ||||
| const_input_indexes_ = const_input_indexes; | const_input_indexes_ = const_input_indexes; | ||||
| } | } | ||||
| std::vector<size_t> &get_const_input_indexes() { return const_input_indexes_; } | |||||
| const std::vector<size_t> &get_const_input_indexes() { return const_input_indexes_; } | |||||
| std::string id() const { return id_; } | std::string id() const { return id_; } | ||||
| protected: | protected: | ||||