Browse Source

modify the limit of axis of reduce ops

tags/v1.5.0-rc1
simson 4 years ago
parent
commit
5b34859541
5 changed files with 22 additions and 6 deletions
  1. +15
    -0
      mindspore/ccsrc/backend/optimizer/ascend/format_type/change_axis_of_reduce_kernel.cc
  2. +2
    -2
      mindspore/ccsrc/backend/session/kernel_graph.cc
  3. +1
    -1
      mindspore/ccsrc/backend/session/kernel_graph.h
  4. +1
    -0
      mindspore/ccsrc/utils/utils.h
  5. +3
    -3
      mindspore/ops/operations/math_ops.py

+ 15
- 0
mindspore/ccsrc/backend/optimizer/ascend/format_type/change_axis_of_reduce_kernel.cc View File

@@ -53,6 +53,15 @@ void SafeCheckFunction(const CNodePtr &cnode, const std::vector<int64_t> &reduce
}
}

void DynamicAttrUpdate(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
auto primitive = AnfAlgo::GetCNodePrimitive(node);
MS_EXCEPTION_IF_NULL(primitive);
auto axis_attr = primitive->GetAttr(kAttrAxis);
AnfAlgo::SetNodeAttr(kAttrAxes, axis_attr, node);
AnfAlgo::EraseNodeAttr(kAttrAxis, node);
}

void ConvertReduceAttrFraczAnd6HD(const CNodePtr &cnode) {
auto axis = kernel::GetReduceAttrAxis(cnode);
std::vector<int64_t> convert_axis;
@@ -95,9 +104,15 @@ const AnfNodePtr ChangeAxisOfReduceKernel::Process(const FuncGraphPtr &, const A
}
auto convert_map = kReduceConvertMap.find(AnfAlgo::GetInputFormat(node, 0));
if (convert_map == kReduceConvertMap.end()) {
if (AnfAlgo::IsDynamicShape(node)) {
DynamicAttrUpdate(node);
}
return nullptr;
}
convert_map->second(node->cast<CNodePtr>());
if (AnfAlgo::IsDynamicShape(node)) {
DynamicAttrUpdate(node);
}
return nullptr;
}
} // namespace opt


+ 2
- 2
mindspore/ccsrc/backend/session/kernel_graph.cc View File

@@ -470,7 +470,7 @@ void KernelGraph::CreateKernelInfoFromNewParameter(const CNodePtr &cnode) {
}
}

void KernelGraph::ResetAssignInputFeaatureMapFlag(const CNodePtr &cnode) const {
void KernelGraph::ResetAssignInputFeatureMapFlag(const CNodePtr &cnode) const {
if (kOpAssignKernelNameList.find(AnfAlgo::GetCNodeName(cnode)) == kOpAssignKernelNameList.end()) {
MS_LOG(EXCEPTION) << "Only supported to change the node [Assign , AssignSub, AssignAdd] node's input feature map "
"flag but got the node :"
@@ -493,7 +493,7 @@ void KernelGraph::SetKernelInfoForNode(const AnfNodePtr &node) const {
node->set_kernel_info(kernel_info);
if (node->isa<CNode>()) {
if (kOpAssignKernelNameList.find(AnfAlgo::GetCNodeName(node)) != kOpAssignKernelNameList.end()) {
ResetAssignInputFeaatureMapFlag(node->cast<CNodePtr>());
ResetAssignInputFeatureMapFlag(node->cast<CNodePtr>());
}
#if defined(__APPLE__)
std::vector<int> feature_map_input_indexs;


+ 1
- 1
mindspore/ccsrc/backend/session/kernel_graph.h View File

@@ -111,7 +111,7 @@ class KernelGraph : public FuncGraph {
CNodePtr NewCNodeWithInfos(const std::vector<AnfNodePtr> &inputs, const CNodePtr &ori_cnode = nullptr);
void CreateKernelInfoFromNewParameter(const CNodePtr &cnode);
CNodePtr NewCNode(const CNodePtr &cnode);
void ResetAssignInputFeaatureMapFlag(const CNodePtr &cnode) const;
void ResetAssignInputFeatureMapFlag(const CNodePtr &cnode) const;
ParameterPtr NewParameter(const ParameterPtr &parameter = nullptr);
ParameterPtr NewParameter(const abstract::AbstractBasePtr &abstract);
ValueNodePtr NewValueNode(const AbstractBasePtr &abstract, const ValuePtr &value);


+ 1
- 0
mindspore/ccsrc/utils/utils.h View File

@@ -335,6 +335,7 @@ constexpr auto kAttrDataShape = "data_shape";
constexpr auto kAttrFormat = "format";
constexpr auto kAttrReshapeType = "reshape_type";
constexpr auto kAttrAxis = "axis";
constexpr auto kAttrAxes = "axes";
constexpr auto kAttrKeepDims = "keep_dims";
constexpr auto kAttrShapeGamma = "shape_gamma";
constexpr auto kAttrPerm = "perm";


+ 3
- 3
mindspore/ops/operations/math_ops.py View File

@@ -371,6 +371,8 @@ class _Reduce(PrimitiveWithInfer):
input_shp = input_x['shape']
args = {'input_x': input_x['dtype']}
validator.check_tensors_dtypes_same_and_valid(args, valid_dtype, self.name)
if not isinstance(axis, mstype.tensor_type) and axis_v is None:
raise ValueError(f"For {self.name}, axis must be const.")
out_shape = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name)
if -1 in input_shp:
if axis_v is None:
@@ -423,8 +425,6 @@ class _Reduce(PrimitiveWithInfer):
value = np_reduce_func(value, axis_v, keepdims=self.keep_dims)
value = np.array(value)
value = Tensor(value)
if (-1 in input_shp or axis_v is None) and context.get_context("device_target") == "Ascend":
self.init_prim_io_names(inputs=['x', 'axes'], outputs=['y'])
return {'shape': out_shape,
'min_shape': output_min_shape,
'max_shape': output_max_shape,
@@ -742,7 +742,7 @@ class ReduceMax(_Reduce):
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple or list.
ValueError: If `axis` is not one of the following: int, tuple or list.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``


Loading…
Cancel
Save