Browse Source

rename dynamicreshape to reshape

tags/v1.6.0
lingyunli63 4 years ago
parent
commit
89e8b90a8d
11 changed files with 158 additions and 151 deletions
  1. +6
    -0
      mindspore/ccsrc/backend/kernel_compiler/host/dynamic_broadcast_gradient_args_kernel.cc
  2. +87
    -40
      mindspore/ccsrc/backend/kernel_compiler/host/reshape_kernel.cc
  3. +59
    -0
      mindspore/ccsrc/backend/kernel_compiler/host/reshape_kernel.h
  4. +0
    -2
      mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc
  5. +0
    -67
      mindspore/ccsrc/backend/optimizer/ascend/mindir/dynamic_reshape_unify_mindir.cc
  6. +0
    -34
      mindspore/ccsrc/backend/optimizer/ascend/mindir/dynamic_reshape_unify_mindir.h
  7. +3
    -0
      mindspore/ccsrc/backend/optimizer/common/helper.cc
  8. +2
    -2
      mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc
  9. +0
    -1
      mindspore/ccsrc/utils/utils.h
  10. +0
    -3
      mindspore/core/abstract/primitive_infer_map.cc
  11. +1
    -2
      mindspore/core/base/core_ops.h

+ 6
- 0
mindspore/ccsrc/backend/kernel_compiler/host/dynamic_broadcast_gradient_args_kernel.cc View File

@@ -173,6 +173,12 @@ size_t SetOutputValue(const CNodePtr &cnode, const std::vector<std::vector<int64
*(data_ptr + i) = output[i];
}
auto runtime_instance = device::KernelRuntimeManager::Instance().GetCurrentKernelRuntime();
MS_EXCEPTION_IF_NULL(runtime_instance);
auto ret = runtime_instance->SyncStream();
if (!ret) {
MS_LOG(EXCEPTION) << "Sync stream error!";
}
if (!out_addr->SyncHostToDevice(out_shape, LongToSize(tensor_for_sync->data().nbytes()), tensor_for_sync->data_type(),
tensor_for_sync->data_c(), tensor_for_sync->device_info().host_format_)) {
MS_LOG(EXCEPTION) << "Output Value SyncHostToDevice failed.";


mindspore/ccsrc/backend/kernel_compiler/host/dynamic_reshape_kernel.cc → mindspore/ccsrc/backend/kernel_compiler/host/reshape_kernel.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,16 @@
* limitations under the License.
*/
#include "backend/kernel_compiler/host/dynamic_reshape_kernel.h"
#include "backend/kernel_compiler/host/reshape_kernel.h"
#include <algorithm>
#include <functional>
#include "backend/session/anf_runtime_algorithm.h"
#include "abstract/utils.h"
#include "runtime/device/kernel_runtime_manager.h"
#include "utils/check_convert_utils.h"
#include "utils/trace_base.h"
#include "runtime/mem.h"
namespace mindspore {
namespace kernel {
@@ -62,27 +66,51 @@ std::vector<int64_t> GetInputValue(const CNodePtr &cnode, size_t index) {
}
return input_shape;
}
} // namespace
void DynamicReshapeKernel::Execute() {
MS_LOG(INFO) << "Execute host ReshapeKernel Start";
auto cnode = cnode_ptr_.lock();
MS_EXCEPTION_IF_NULL(cnode);
int64_t GetArrProd(const CNodePtr &cnode) {
auto shape_x = AnfAlgo::GetPrevNodeOutputInferShape(cnode, 0);
auto arr_prod = std::accumulate(shape_x.begin(), shape_x.end(), static_cast<int64_t>(1), std::multiplies<int64_t>());
return arr_prod;
}
std::vector<int64_t> GetOutputShapes(const CNodePtr &cnode) {
std::vector<int64_t> output_shapes;
auto input_num = AnfAlgo::GetInputTensorNum(cnode);
if (input_num != kInputNum) {
MS_LOG(EXCEPTION) << "Invalid input num, should be " << kInputNum << ", but got " << input_num
<< trace::DumpSourceLines(cnode);
MS_LOG(DEBUG) << "Reshape has one input";
auto prim = AnfAlgo::GetCNodePrimitive(cnode);
ValuePtr sh = prim->GetAttr("shape");
if (sh == nullptr) {
auto un_output_shapes = AnfAlgo::GetOutputInferShape(cnode, 0);
(void)std::transform(std::begin(un_output_shapes), std::end(un_output_shapes), std::back_inserter(output_shapes),
[](const uint64_t &i) -> int64_t { return static_cast<int64_t>(i); });
} else if (sh->isa<ValueTuple>()) {
auto reshape_value_tuple = sh->cast<ValueTuplePtr>();
MS_EXCEPTION_IF_NULL(reshape_value_tuple);
auto reshape_tuple = reshape_value_tuple->value();
(void)std::transform(std::begin(reshape_tuple), std::end(reshape_tuple), std::back_inserter(output_shapes),
[](const ValuePtr &e) -> int64_t {
if (e->isa<UInt64Imm>()) {
return (int64_t)GetValue<uint64_t>(e);
} else {
return GetValue<int64_t>(e);
}
});
} else if (sh->isa<tensor::Tensor>()) {
auto tensor_value = sh->cast<tensor::TensorPtr>();
output_shapes = CheckAndConvertUtils::CheckTensorIntValue("shape", sh, "Reshape");
} else {
MS_EXCEPTION(ValueError) << "shape must be a tuple or constant Tensor";
}
} else {
MS_LOG(DEBUG) << "Reshape has dynamic shape";
output_shapes = GetInputValue(cnode, 1);
}
auto address_x = AnfAlgo::GetPrevNodeMutableOutputAddr(cnode, 0);
MS_EXCEPTION_IF_NULL(address_x);
auto type_x = AnfAlgo::GetOutputInferDataType(cnode, 0);
auto shape_x = AnfAlgo::GetPrevNodeOutputInferShape(cnode, 0);
std::vector<int64_t> output_shapes = GetInputValue(cnode, 1);
auto arr_prod = GetArrProd(cnode);
int64_t dim_prod = 1;
int64_t neg_index = -1;
auto arr_prod = std::accumulate(shape_x.begin(), shape_x.end(), static_cast<int64_t>(1), std::multiplies<int64_t>());
for (size_t i = 0; i < output_shapes.size(); ++i) {
if (output_shapes[i] == -1) {
neg_index = SizeToLong(i);
@@ -93,47 +121,66 @@ void DynamicReshapeKernel::Execute() {
if (neg_index != -1) {
output_shapes[LongToSize(neg_index)] = arr_prod / dim_prod;
}
return output_shapes;
}
} // namespace
size_t input_size_byte = LongToSize(arr_prod) * abstract::TypeIdSize(type_x);
auto output_addr = AnfAlgo::GetOutputAddr(cnode, 0);
void ReshapeKernel::Execute() {
MS_LOG(INFO) << "Execute host ReshapeKernel Start";
auto cnode = cnode_ptr_.lock();
MS_EXCEPTION_IF_NULL(cnode);
auto address_x = AnfAlgo::GetPrevNodeMutableOutputAddr(cnode, 0);
MS_EXCEPTION_IF_NULL(address_x);
std::vector<int64_t> output_shapes = GetOutputShapes(cnode);
auto type_x = AnfAlgo::GetOutputInferDataType(cnode, 0);
size_t input_size_byte = LongToSize(GetArrProd(cnode)) * abstract::TypeIdSize(type_x);
// At execute reshape is noOpNode as all shapes are known so set skipNoOpNode false
auto output_addr = AnfAlgo::GetOutputAddr(cnode, 0, false);
MS_EXCEPTION_IF_NULL(output_addr);
if (address_x->DeviceType() == device::DeviceAddressType::kCPU) {
auto ret =
memcpy_s(const_cast<void *>(output_addr->GetPtr()), output_addr->GetSize(), address_x->GetPtr(), input_size_byte);
if (ret != EOK) {
MS_LOG(EXCEPTION) << "Execute DynamicReshapeKernel memcpy_s failed";
MS_LOG(EXCEPTION) << "Execute ReshapeKernel memcpy_s failed";
}
} else {
if (!output_addr->AsyncDeviceToDevice(output_shapes, input_size_byte, address_x->type_id(), address_x->GetPtr(),
address_x->format())) {
MS_LOG(EXCEPTION) << "Host Reshape sync device to device failed.";
}
MS_LOG(INFO) << "Execute host ReshapeKernel End";
}
}
device::DynamicKernelPtr DynamicReshapeKernelMod::GenDynamicKernel(const CNodePtr &cnode_ptr, void *stream_ptr) {
return std::make_shared<DynamicReshapeKernel>(stream_ptr, cnode_ptr);
MS_LOG(INFO) << "Execute host ReshapeKernel End";
}
bool DynamicReshapeKernelMod::Launch(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &, void *stream_ptr) {
auto node = anf_node_.lock();
MS_EXCEPTION_IF_NULL(node);
if (!node->isa<CNode>()) {
MS_LOG(EXCEPTION) << "anfnode is not a cnode";
}
auto cnode = node->cast<CNodePtr>();
void ReshapeKernel::Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs) {
MS_LOG(INFO) << "Execute host ReshapeKernel Start";
auto cnode = cnode_ptr_.lock();
MS_EXCEPTION_IF_NULL(cnode);
stream_ = stream_ptr;
auto reshape_kernel = std::make_shared<DynamicReshapeKernel>(stream_ptr, cnode);
try {
reshape_kernel->Execute();
} catch (const std::exception &e) {
MS_LOG(ERROR) << "DynamicReshapeKernel Launch failed. node: " << cnode->fullname_with_scope()
<< ", Error message is " << e.what();
return false;
if (inputs.empty() || outputs.empty()) {
MS_LOG(EXCEPTION) << "Inputs or outputs address of Reshape kernel is empty";
}
auto address_x = inputs[0]->addr;
MS_EXCEPTION_IF_NULL(address_x);
auto output_addr = outputs[0]->addr;
MS_EXCEPTION_IF_NULL(output_addr);
auto type_x = AnfAlgo::GetOutputInferDataType(cnode, 0);
size_t input_size_byte = LongToSize(GetArrProd(cnode)) * abstract::TypeIdSize(type_x);
auto status =
rtMemcpyAsync(output_addr, outputs[0]->size, address_x, input_size_byte, RT_MEMCPY_DEVICE_TO_DEVICE, stream_);
if (status != RT_ERROR_NONE) {
MS_LOG(ERROR) << "Call rtMemcpyAsync failed, ret = 0x" << status;
}
return true;
MS_LOG(INFO) << "Execute host ReshapeKernel End";
}
device::DynamicKernelPtr ReshapeKernelMod::GenDynamicKernel(const CNodePtr &cnode_ptr, void *stream_ptr) {
return std::make_shared<ReshapeKernel>(stream_ptr, cnode_ptr);
}
} // namespace kernel
} // namespace mindspore

+ 59
- 0
mindspore/ccsrc/backend/kernel_compiler/host/reshape_kernel.h View File

@@ -0,0 +1,59 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_HOST_RESHAPE_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_HOST_RESHAPE_KERNEL_H_
#include <vector>
#include <memory>
#include <string>
#include "runtime/device/ascend/executor/host_dynamic_kernel.h"
#include "backend/kernel_compiler/host/host_kernel_mod.h"
using HostDynamicKernel = mindspore::device::ascend::HostDynamicKernel;
namespace mindspore {
namespace kernel {
class ReshapeKernel : public HostDynamicKernel {
public:
ReshapeKernel(void *stream, const CNodePtr &cnode_ptr) : HostDynamicKernel(stream, cnode_ptr) {}
~ReshapeKernel() override = default;
void Execute() override;
void Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs);
};
class ReshapeKernelMod : public HostKernelMod {
public:
ReshapeKernelMod() = default;
~ReshapeKernelMod() override = default;
device::DynamicKernelPtr GenDynamicKernel(const CNodePtr &cnode_ptr, void *stream_ptr) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
if (kernel_ == nullptr) {
auto node = anf_node_.lock();
MS_EXCEPTION_IF_NULL(node);
kernel_ = std::dynamic_pointer_cast<ReshapeKernel>(GenDynamicKernel(node->cast<CNodePtr>(), stream_ptr));
kernel_->Initialize();
}
kernel_->Execute(inputs, outputs);
return true;
}
void UpdateOp() override { AscendKernelMod::UpdateOp(); }
private:
std::shared_ptr<ReshapeKernel> kernel_;
};
MS_HOST_REG_KERNEL(Reshape, ReshapeKernelMod);
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_HOST_RESHAPE_KERNEL_H_

+ 0
- 2
mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc View File

@@ -150,7 +150,6 @@
#include "backend/optimizer/ascend/mindir/update_input_names_strided_slice_grad.h"
#include "backend/optimizer/ascend/mindir/avg_pool_grad_unify_mindir.h"
#include "backend/optimizer/ascend/mindir/bn_grad_unify_mindir.h"
#include "backend/optimizer/ascend/mindir/dynamic_reshape_unify_mindir.h"
#include "backend/optimizer/ascend/mindir/all_to_all_unify_mindir.h"
#include "backend/optimizer/ascend/mindir/neighbor_exchange_v2_unify_mindir.h"
#include "backend/optimizer/ascend/dynamic_shape/convert_dynamic_op.h"
@@ -605,7 +604,6 @@ void AscendUnifyMindIR(const std::shared_ptr<session::KernelGraph> &graph) {
unify_mindir_pm->AddPass(std::make_shared<opt::DropoutUnifyMindIR1>());
unify_mindir_pm->AddPass(std::make_shared<opt::DropoutGradUnifyMindIR>());
unify_mindir_pm->AddPass(std::make_shared<opt::BatchNormGradUnifyMindIR>());
unify_mindir_pm->AddPass(std::make_shared<opt::DynamicReshapeUnifyMindIR>());
unify_mindir_pm->AddPass(std::make_shared<opt::NeighborExchangeUnifyMindIR>());
unify_mindir_pm->AddPass(std::make_shared<opt::NeighborExchangeV2UnifyMindIR>());
unify_mindir_pm->AddPass(std::make_shared<opt::NeighborExchangeV2GradUnifyMindIR>());


+ 0
- 67
mindspore/ccsrc/backend/optimizer/ascend/mindir/dynamic_reshape_unify_mindir.cc View File

@@ -1,67 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/optimizer/ascend/mindir/dynamic_reshape_unify_mindir.h"
#include <vector>
#include <memory>
#include "backend/session/anf_runtime_algorithm.h"

namespace mindspore {
namespace opt {
namespace {
size_t kDynamicReshapeInputNum = 2;

AnfNodePtr CreateDynamicReshape(const FuncGraphPtr &graph, const CNodePtr &reshape_node) {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(reshape_node);
const auto &reshape_node_inputs = reshape_node->inputs();
CheckCNodeInputSize(reshape_node, kDynamicReshapeInputNum);
std::vector<AnfNodePtr> dynamic_reshape_inputs = {NewValueNode(std::make_shared<Primitive>(kDynamicReshapeOpName)),
reshape_node_inputs[kDim1], reshape_node_inputs[kDim2]};
auto dynamic_reshape_node = graph->NewCNode(dynamic_reshape_inputs);
MS_EXCEPTION_IF_NULL(dynamic_reshape_node);
dynamic_reshape_node->set_scope(reshape_node->scope());
auto types = {AnfAlgo::GetOutputInferDataType(reshape_node, 0)};
auto shapes = {AnfAlgo::GetOutputDetailShape(reshape_node, 0)};
AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, dynamic_reshape_node.get());
AnfAlgo::CopyNodeAttrs(reshape_node, dynamic_reshape_node);
return dynamic_reshape_node;
}
} // namespace

const BaseRef DynamicReshapeUnifyMindIR::DefinePattern() const {
VarPtr Xs = std::make_shared<SeqVar>();
auto prim = std::make_shared<Primitive>(kReshapeOpName);
return VectorRef({prim, Xs});
}

const AnfNodePtr DynamicReshapeUnifyMindIR::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(func_graph);

auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
if (cnode->size() < kDynamicReshapeInputNum + 1) {
return nullptr;
}
auto shp_input = cnode->input(kDynamicReshapeInputNum);
if (shp_input->isa<ValueNode>()) {
return nullptr;
}
return CreateDynamicReshape(func_graph, cnode);
}
} // namespace opt
} // namespace mindspore

+ 0
- 34
mindspore/ccsrc/backend/optimizer/ascend/mindir/dynamic_reshape_unify_mindir.h View File

@@ -1,34 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_MINDIR_DYNAMIC_RESHAPE_UNIFY_MINDIR_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_MINDIR_DYNAMIC_RESHAPE_UNIFY_MINDIR_H_

#include "backend/optimizer/common/optimizer.h"
#include "backend/optimizer/common/helper.h"

namespace mindspore {
namespace opt {
class DynamicReshapeUnifyMindIR : public PatternProcessPass {
public:
explicit DynamicReshapeUnifyMindIR(bool multigraph = true)
: PatternProcessPass("dynamic_reshape_unify_mindir", multigraph) {}
~DynamicReshapeUnifyMindIR() override = default;
const BaseRef DefinePattern() const override;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_MINDIR_DYNAMIC_RESHAPE_UNIFY_MINDIR_H_

+ 3
- 0
mindspore/ccsrc/backend/optimizer/common/helper.cc View File

@@ -330,6 +330,9 @@ bool IsNopNode(const AnfNodePtr &node) {
if (nop_nodes.find(AnfAlgo::GetCNodeName(cnode)) == nop_nodes.end() && !is_nop_node) {
return false;
}
if (AnfAlgo::GetCNodeName(cnode) == prim::kPrimReshape->name() && AnfAlgo::IsNodeDynamicShape(cnode)) {
return false;
}
return true;
}



+ 2
- 2
mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc View File

@@ -1022,7 +1022,7 @@ const DeviceAddress *AnfRuntimeAlgorithm::GetOutputAddr(const AnfNodePtr &node,
if (opt::IsNopNode(node) && (skip_nop_node || IsNeedSkipNopOpAddr(node))) {
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
if (cnode->size() == kNopNodeInputSize) {
if (cnode->size() == kNopNodeInputSize || AnfAlgo::GetCNodeName(cnode) == "Reshape") {
return AnfRuntimeAlgorithm::GetPrevNodeOutputAddr(cnode, 0);
} else {
MS_LOG(EXCEPTION) << node->DebugString() << "Invalid nop node." << trace::DumpSourceLines(node);
@@ -2229,7 +2229,7 @@ void AnfRuntimeAlgorithm::GetAllFatherRealNode(const AnfNodePtr &anf_node, std::
}

bool AnfRuntimeAlgorithm::IsHostKernel(const CNodePtr &kernel_node) {
const std::set<std::string> host_kernel = {prim::kPrimDynamicShape->name(), prim::kPrimDynamicReshape->name(),
const std::set<std::string> host_kernel = {prim::kPrimDynamicShape->name(), prim::kPrimReshape->name(),
prim::kPrimDynamicBroadcastGradientArgs->name()};
auto op_name = AnfAlgo::GetCNodeName(kernel_node);
if (host_kernel.find(op_name) == host_kernel.end()) {


+ 0
- 1
mindspore/ccsrc/utils/utils.h View File

@@ -96,7 +96,6 @@ constexpr auto kUnsortedSegmentProdOpName = "UnsortedSegmentProd";
constexpr auto kUnsortedSegmentMinOpName = "UnsortedSegmentMin";
constexpr auto kFlattenGradOpName = "FlattenGrad";
constexpr auto kExpandDimsOpName = "ExpandDims";
constexpr auto kDynamicReshapeOpName = "DynamicReshape";
constexpr auto kReshapeOpName = "Reshape";
constexpr auto kTransposeOpName = "Transpose";
constexpr auto kTransposeNODOpName = "TransposeNOD";


+ 0
- 3
mindspore/core/abstract/primitive_infer_map.cc View File

@@ -66,7 +66,6 @@ std::set<int64_t> GetDependsFormMap(const CNodePtr &cnode) {
static const auto &kSlice = prim::kPrimSlice->name();
static const auto &kSliceGrad = prim::kPrimSliceGrad->name();
static const auto &kReshape = prim::kPrimReshape->name();
static const auto &kDynamicReshape = prim::kPrimDynamicReshape->name();
// Common dynamic shape depends.
static const PrimShapeDependMap dynamic_shape_depends{{kUnsortedSegmentSum, ShapeSet{2}},
{kUnsortedSegmentMin, ShapeSet{2}},
@@ -82,7 +81,6 @@ std::set<int64_t> GetDependsFormMap(const CNodePtr &cnode) {
{kStridedSliceGrad, ShapeSet{1, 2, 3, 4}},
{kTile, ShapeSet{1}},
{kReshape, ShapeSet{1}},
{kDynamicReshape, ShapeSet{1}},
{kSlice, ShapeSet{1, 2}},
{kSliceGrad, ShapeSet{2, 3}},
{kDynamicBroadcastTo, ShapeSet{1}}};
@@ -158,7 +156,6 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() {
{prim::kPrimDynamicStitch, R{InferImplDynamicStitch, nullptr, true}},
{prim::kPrimPadAndShift, R{InferImplPadAndShift, nullptr, true}},
{prim::kPrimDynamicShape, R{InferImplDynamicShape, nullptr, true}},
{prim::kPrimDynamicReshape, R{InferImplReshape, nullptr, true}},
{prim::kPrimMapUniform, R{InferImplMapUniform, nullptr, true}},
{prim::kPrimSplit, R{InferImplSplit, nullptr, true}},
{prim::kPrimSequenceMask, R{InferImplSequenceMask, nullptr, true}},


+ 1
- 2
mindspore/core/base/core_ops.h View File

@@ -104,7 +104,7 @@ constexpr auto kDynamicBroadcastGradientArgs = "DynamicBroadcastGradientArgs";
constexpr auto kTranspose = "Transpose";
constexpr auto kSplitV = "SplitV";
constexpr auto kDynamicBroadcastTo = "DynamicBroadcastTo";
constexpr auto kDynamicReshape = "DynamicReshape";
constexpr auto kReshape = "Reshape";

// NN
constexpr auto kCTCLoss = "CTCLoss";
@@ -228,7 +228,6 @@ inline const PrimitivePtr kPrimUnsortedSegmentSum = std::make_shared<Primitive>(
inline const PrimitivePtr kPrimUnsortedSegmentMin = std::make_shared<Primitive>("UnsortedSegmentMin");
inline const PrimitivePtr kPrimConcatOffset = std::make_shared<Primitive>("ConcatOffset");
inline const PrimitivePtr kPrimReshape = std::make_shared<Primitive>("Reshape");
inline const PrimitivePtr kPrimDynamicReshape = std::make_shared<Primitive>(kDynamicReshape);
inline const PrimitivePtr kPrimSubAndFilter = std::make_shared<Primitive>("SubAndFilter");
inline const PrimitivePtr kPrimMapCacheIdx = std::make_shared<Primitive>("MapCacheIdx");
inline const PrimitivePtr kPrimUpdateCache = std::make_shared<Primitive>("UpdateCache");


Loading…
Cancel
Save