Browse Source

updata doc for Parameter & fix codex

tags/v0.7.0-beta
Wei Luning 5 years ago
parent
commit
dde5a1bb48
7 changed files with 19 additions and 18 deletions
  1. +4
    -2
      mindspore/ccsrc/debug/data_dump_parser.cc
  2. +3
    -0
      mindspore/ccsrc/frontend/optimizer/irpass/switch_layer_defer_inline.h
  3. +0
    -1
      mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc
  4. +0
    -1
      mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h
  5. +8
    -7
      mindspore/ccsrc/utils/convert_utils.cc
  6. +4
    -5
      mindspore/common/parameter.py
  7. +0
    -2
      mindspore/core/ir/pattern_matcher.h

+ 4
- 2
mindspore/ccsrc/debug/data_dump_parser.cc View File

@@ -216,8 +216,10 @@ std::string DataDumpParser::GetOpOverflowBinPath(uint32_t graph_id, uint32_t dev
std::string bin_path = "/var/log/npu/ide_daemon/dump";

const char *dump_data_path = std::getenv("DATA_DUMP_PATH");
bin_path.append(dump_data_path);
bin_path.append("_");
if (dump_data_path != nullptr) {
bin_path.append(dump_data_path);
bin_path.append("_");
}
bin_path.append(std::to_string(device_id));
bin_path.append("/");
bin_path.append(net_name_);


+ 3
- 0
mindspore/ccsrc/frontend/optimizer/irpass/switch_layer_defer_inline.h View File

@@ -36,6 +36,9 @@ class SwitchLayerDeferInline : public AnfVisitor {
auto tuple = dyn_cast<abstract::AbstractTuple>(cnode->inputs()[2]->abstract());
for (auto elem : tuple->elements()) {
auto abstract = dyn_cast<abstract::FuncGraphAbstractClosure>(elem);
if (abstract == nullptr) {
return nullptr;
}
*(abstract->func_graph()->switch_layer_input()) = true;
}
return nullptr;


+ 0
- 1
mindspore/ccsrc/pipeline/jit/remove_value_node_dup.cc View File

@@ -173,6 +173,5 @@ bool MergeDuplicateGraphs(const FuncGraphManagerPtr manager) {
}
return true;
}

} // namespace pipeline
} // namespace mindspore

+ 0
- 1
mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h View File

@@ -31,7 +31,6 @@ void TryToDoReplace(FuncGraphManager *manager, const AnfNodePtr &node, HashCache
size_t HashOfGraph(const FuncGraphPtr &fg);
bool IsCNodeGraph(const AnfNodePtr &node);
bool MergeDuplicateGraphs(const FuncGraphManagerPtr manager);

} // namespace pipeline
} // namespace mindspore



+ 8
- 7
mindspore/ccsrc/utils/convert_utils.cc View File

@@ -473,12 +473,12 @@ bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple
}
return false;
}
namespace {
// Isomorphism
static bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node);
static bool SameNodeShallow(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node) {
bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node);
bool SameNodeShallow(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node) {
if (equiv_node == nullptr) {
MS_LOG(ERROR) << "Invalid equiv_node";
return false;
@@ -534,8 +534,8 @@ bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMap
return SameNodeShallow(node1, node2, equiv_func_graph, equiv_node);
}

static bool SameSubgraph(AnfNodePtr root1, AnfNodePtr root2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node) {
bool SameSubgraph(AnfNodePtr root1, AnfNodePtr root2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node) {
std::unordered_set<AnfNodePtr> done;
std::stack<std::pair<AnfNodePtr, AnfNodePtr>> todo;

@@ -576,6 +576,7 @@ static bool SameSubgraph(AnfNodePtr root1, AnfNodePtr root2, FuncGraphPairMapEqu
}
return true;
}
} // namespace

bool Isomorphic(FuncGraphPtr fg1, FuncGraphPtr fg2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node) {


+ 4
- 5
mindspore/common/parameter.py View File

@@ -40,19 +40,18 @@ class Parameter(MetaTensor):
After initialized `Parameter` is a subtype of `Tensor`.

In auto_parallel mode of "semi_auto_parallel" and "auto_parallel", if init `Parameter` by
a `Initializer`, the type of Parameter will be a `MetaTensor` not a `Tensor`. `MetaTensor`
only save the shape type info of a tensor with no memory usage. The shape can be change while
an `Initializer`, the type of Parameter will be `MetaTensor` not `Tensor`. `MetaTensor`
only saves the shape and type info of a tensor with no memory usage. The shape can be changed while
compile for auto-parallel. Call `init_data` will return a Tensor Parameter with initialized data.

Note:
Each parameter of Cell is represented by Parameter class.

Args:
default_input (Union[Tensor, Initializer]): Parameter data, when `default_input` is` Initializer`,
the data stored by Parameter is `MetaTensor`, otherwise it is `Tensor`.
default_input (Union[Tensor, Initializer, Number]): Parameter data, to be set initialized.
name (str): Name of the child parameter.
requires_grad (bool): True if the parameter requires gradient. Default: True.
layerwise_parallel (bool): A kind of model parallel mode. When layerwise_parallel is true in paralle mode,
layerwise_parallel (bool): A kind of model parallel mode. When layerwise_parallel is true in parallel mode,
broadcast and gradients communication would not be applied to parameters. Default: False.

Example:


+ 0
- 2
mindspore/core/ir/pattern_matcher.h View File

@@ -580,7 +580,6 @@ class PConstant : public PBase<PConstant<T> > {
return nullptr;
}
auto value = node->cast<ValueNodePtr>()->value();

if (!value->isa<tensor::Tensor>()) {
return nullptr;
}
@@ -747,7 +746,6 @@ class PConstant : public PBase<PConstant<T> > {

std::vector<int> tensor_out_shape = tensor_3_abstract->shape()->shape();
int data_out_size = std::accumulate(tensor_out_shape.begin(), tensor_out_shape.end(), 1, std::multiplies<int>());

if ((tensor_ptr_1->DataSize() > 1) && (tensor_ptr_1->DataSize() != data_out_size)) {
return nullptr;
}


Loading…
Cancel
Save