From dde5a1bb4822912467bf9bc75036f8ed71d0ee5a Mon Sep 17 00:00:00 2001 From: Wei Luning Date: Thu, 20 Aug 2020 15:15:40 +0800 Subject: [PATCH] updata doc for Parameter & fix codex --- mindspore/ccsrc/debug/data_dump_parser.cc | 6 ++++-- .../optimizer/irpass/switch_layer_defer_inline.h | 3 +++ .../ccsrc/pipeline/jit/remove_value_node_dup.cc | 1 - .../ccsrc/pipeline/jit/remove_value_node_dup.h | 1 - mindspore/ccsrc/utils/convert_utils.cc | 15 ++++++++------- mindspore/common/parameter.py | 9 ++++----- mindspore/core/ir/pattern_matcher.h | 2 -- 7 files changed, 19 insertions(+), 18 deletions(-) diff --git a/mindspore/ccsrc/debug/data_dump_parser.cc b/mindspore/ccsrc/debug/data_dump_parser.cc index 85f8961583..0f5845ea92 100644 --- a/mindspore/ccsrc/debug/data_dump_parser.cc +++ b/mindspore/ccsrc/debug/data_dump_parser.cc @@ -216,8 +216,10 @@ std::string DataDumpParser::GetOpOverflowBinPath(uint32_t graph_id, uint32_t dev std::string bin_path = "/var/log/npu/ide_daemon/dump"; const char *dump_data_path = std::getenv("DATA_DUMP_PATH"); - bin_path.append(dump_data_path); - bin_path.append("_"); + if (dump_data_path != nullptr) { + bin_path.append(dump_data_path); + bin_path.append("_"); + } bin_path.append(std::to_string(device_id)); bin_path.append("/"); bin_path.append(net_name_); diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/switch_layer_defer_inline.h b/mindspore/ccsrc/frontend/optimizer/irpass/switch_layer_defer_inline.h index 01472c2fd9..f355a54b86 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/switch_layer_defer_inline.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/switch_layer_defer_inline.h @@ -36,6 +36,9 @@ class SwitchLayerDeferInline : public AnfVisitor { auto tuple = dyn_cast(cnode->inputs()[2]->abstract()); for (auto elem : tuple->elements()) { auto abstract = dyn_cast(elem); + if (abstract == nullptr) { + return nullptr; + } *(abstract->func_graph()->switch_layer_input()) = true; } return nullptr; diff --git a/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc b/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc index 2d390c46a2..4c6ed7e718 100644 --- a/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc +++ b/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc @@ -173,6 +173,5 @@ bool MergeDuplicateGraphs(const FuncGraphManagerPtr manager) { } return true; } - } // namespace pipeline } // namespace mindspore diff --git a/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h b/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h index 39fcd4472b..dd82f5d701 100644 --- a/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h +++ b/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h @@ -31,7 +31,6 @@ void TryToDoReplace(FuncGraphManager *manager, const AnfNodePtr &node, HashCache size_t HashOfGraph(const FuncGraphPtr &fg); bool IsCNodeGraph(const AnfNodePtr &node); bool MergeDuplicateGraphs(const FuncGraphManagerPtr manager); - } // namespace pipeline } // namespace mindspore diff --git a/mindspore/ccsrc/utils/convert_utils.cc b/mindspore/ccsrc/utils/convert_utils.cc index 1e477fe6c2..0b41330d58 100644 --- a/mindspore/ccsrc/utils/convert_utils.cc +++ b/mindspore/ccsrc/utils/convert_utils.cc @@ -473,12 +473,12 @@ bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple } return false; } - +namespace { // Isomorphism -static bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph, - NodeMapEquiv *const equiv_node); -static bool SameNodeShallow(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph, - NodeMapEquiv *const equiv_node) { +bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph, + NodeMapEquiv *const equiv_node); +bool SameNodeShallow(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph, + NodeMapEquiv *const equiv_node) { if (equiv_node == nullptr) { MS_LOG(ERROR) << "Invalid equiv_node"; return false; @@ -534,8 +534,8 @@ bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMap return SameNodeShallow(node1, node2, equiv_func_graph, equiv_node); } -static bool SameSubgraph(AnfNodePtr root1, AnfNodePtr root2, FuncGraphPairMapEquiv *equiv_func_graph, - NodeMapEquiv *const equiv_node) { +bool SameSubgraph(AnfNodePtr root1, AnfNodePtr root2, FuncGraphPairMapEquiv *equiv_func_graph, + NodeMapEquiv *const equiv_node) { std::unordered_set done; std::stack> todo; @@ -576,6 +576,7 @@ static bool SameSubgraph(AnfNodePtr root1, AnfNodePtr root2, FuncGraphPairMapEqu } return true; } +} // namespace bool Isomorphic(FuncGraphPtr fg1, FuncGraphPtr fg2, FuncGraphPairMapEquiv *equiv_func_graph, NodeMapEquiv *const equiv_node) { diff --git a/mindspore/common/parameter.py b/mindspore/common/parameter.py index 18ae4bfd27..dfe03a75e8 100644 --- a/mindspore/common/parameter.py +++ b/mindspore/common/parameter.py @@ -40,19 +40,18 @@ class Parameter(MetaTensor): After initialized `Parameter` is a subtype of `Tensor`. In auto_parallel mode of "semi_auto_parallel" and "auto_parallel", if init `Parameter` by - a `Initializer`, the type of Parameter will be a `MetaTensor` not a `Tensor`. `MetaTensor` - only save the shape type info of a tensor with no memory usage. The shape can be change while + an `Initializer`, the type of Parameter will be `MetaTensor` not `Tensor`. `MetaTensor` + only saves the shape and type info of a tensor with no memory usage. The shape can be changed while compile for auto-parallel. Call `init_data` will return a Tensor Parameter with initialized data. Note: Each parameter of Cell is represented by Parameter class. Args: - default_input (Union[Tensor, Initializer]): Parameter data, when `default_input` is` Initializer`, - the data stored by Parameter is `MetaTensor`, otherwise it is `Tensor`. + default_input (Union[Tensor, Initializer, Number]): Parameter data, to be set initialized. name (str): Name of the child parameter. requires_grad (bool): True if the parameter requires gradient. Default: True. - layerwise_parallel (bool): A kind of model parallel mode. When layerwise_parallel is true in paralle mode, + layerwise_parallel (bool): A kind of model parallel mode. When layerwise_parallel is true in parallel mode, broadcast and gradients communication would not be applied to parameters. Default: False. Example: diff --git a/mindspore/core/ir/pattern_matcher.h b/mindspore/core/ir/pattern_matcher.h index 2ba5730d45..3c5c3122c9 100644 --- a/mindspore/core/ir/pattern_matcher.h +++ b/mindspore/core/ir/pattern_matcher.h @@ -580,7 +580,6 @@ class PConstant : public PBase > { return nullptr; } auto value = node->cast()->value(); - if (!value->isa()) { return nullptr; } @@ -747,7 +746,6 @@ class PConstant : public PBase > { std::vector tensor_out_shape = tensor_3_abstract->shape()->shape(); int data_out_size = std::accumulate(tensor_out_shape.begin(), tensor_out_shape.end(), 1, std::multiplies()); - if ((tensor_ptr_1->DataSize() > 1) && (tensor_ptr_1->DataSize() != data_out_size)) { return nullptr; }