diff --git a/mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc b/mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc index cf82a6ef74..d62447aa43 100644 --- a/mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc @@ -25,6 +25,10 @@ using mindspore::schema::PrimitiveType_Conv2DFusion; namespace mindspore::kernel { int ConvolutionNPUKernel::IsSupport(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter) { + if (conv_param_->stride_h_ > inputs[0]->Height() || conv_param_->stride_w_ > inputs[0]->Width()) { + MS_LOG(ERROR) << "Npu convolution does not support stride greater than input size."; + return RET_ERROR; + } return RET_OK; } @@ -108,10 +112,14 @@ kernel::LiteKernel *NpuConvKernelCreator(const std::vector &inpu const lite::InnerContext *ctx, const kernel::KernelKey &desc) { MS_ASSERT(op_parameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DFusion); + if (inputs[0]->Size() > NPU_MEMORY_MAX) { + MS_LOG(ERROR) << "Npu does not support input tensor size greater than 200MB"; + free(op_parameter); + return nullptr; + } auto conv_param = reinterpret_cast(op_parameter); kernel::NPUKernel *kernel = nullptr; - if (conv_param->group_ == 1) { kernel = new (std::nothrow) kernel::ConvolutionNPUKernel(op_parameter, inputs, outputs, ctx); } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { diff --git a/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h b/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h index 77ec44ebae..6b9d626905 100644 --- a/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h +++ b/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h @@ -27,6 +27,7 @@ using mindspore::kernel::LiteKernel; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; namespace mindspore::kernel { +#define NPU_MEMORY_MAX 200 * 1024 * 1024 class NPUKernel : public LiteKernel { public: NPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -62,6 +63,11 @@ kernel::LiteKernel *NPUKernelCreator(const std::vector &inputs, free(op_parameter); return nullptr; } + if (inputs[0]->Size() > NPU_MEMORY_MAX) { + MS_LOG(ERROR) << "Npu does not support input tensor size greater than 200MB"; + free(op_parameter); + return nullptr; + } auto *kernel = new (std::nothrow) T(op_parameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "kernel " << op_parameter->name_ << "is nullptr.";