Browse Source

[MSLITE][Develop] fix bug of npu op: reshape, pooling

tags/v1.2.0-rc1
yangruoqi713 5 years ago
parent
commit
964471ea6e
51 changed files with 85 additions and 69 deletions
  1. +2
    -0
      mindspore/lite/nnacl/reshape_parameter.h
  2. +12
    -0
      mindspore/lite/src/ops/populate/pooling_populate.cc
  3. +8
    -0
      mindspore/lite/src/ops/populate/reshape_populate.cc
  4. +10
    -10
      mindspore/lite/src/ops/reshape.cc
  5. +2
    -0
      mindspore/lite/src/ops/reshape.h
  6. +2
    -3
      mindspore/lite/src/runtime/kernel/npu/activation_npu.cc
  7. +0
    -2
      mindspore/lite/src/runtime/kernel/npu/activation_npu.h
  8. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.cc
  9. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.h
  10. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.cc
  11. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.h
  12. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.h
  13. +1
    -2
      mindspore/lite/src/runtime/kernel/npu/cast_npu.h
  14. +1
    -2
      mindspore/lite/src/runtime/kernel/npu/concat_npu.h
  15. +1
    -2
      mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.h
  16. +0
    -2
      mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.cc
  17. +0
    -2
      mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.h
  18. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc
  19. +0
    -2
      mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.cc
  20. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/eltwise_npu.cc
  21. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/eltwise_npu.h
  22. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.h
  23. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/gather_npu.cc
  24. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/gather_npu.h
  25. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/instance_norm_npu.h
  26. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/matmul_npu.h
  27. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/npu_kernel.h
  28. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/pad_npu.h
  29. +4
    -0
      mindspore/lite/src/runtime/kernel/npu/pooling_npu.cc
  30. +0
    -2
      mindspore/lite/src/runtime/kernel/npu/reduce_npu.cc
  31. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/reduce_npu.h
  32. +16
    -3
      mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc
  33. +6
    -3
      mindspore/lite/src/runtime/kernel/npu/reshape_npu.h
  34. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/resize_npu.cc
  35. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/resize_npu.h
  36. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/scale_npu.h
  37. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/shape_npu.cc
  38. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/shape_npu.h
  39. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/slice_npu.cc
  40. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/slice_npu.h
  41. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/softmax_npu.cc
  42. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/softmax_npu.h
  43. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/split_npu.h
  44. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/squeeze_npu.cc
  45. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/squeeze_npu.h
  46. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.cc
  47. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.h
  48. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/transpose_npu.cc
  49. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/transpose_npu.h
  50. +0
    -1
      mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.cc
  51. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.h

+ 2
- 0
mindspore/lite/nnacl/reshape_parameter.h View File

@@ -29,6 +29,8 @@ typedef struct ReshapeQuantArg {
typedef struct ReshapeParameter {
// primitive parameter
OpParameter op_parameter_;
int shape_dim_;
int shape_[MAX_SHAPE_SIZE];

// other parameter
ReshapeQuantArg quant_para_;


+ 12
- 0
mindspore/lite/src/ops/populate/pooling_populate.cc View File

@@ -43,6 +43,18 @@ OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primiti
pooling_param->stride_w_ = pooling_primitive->GetStrideW();
pooling_param->stride_h_ = pooling_primitive->GetStrideH();
pooling_param->avg_mode_ = pooling_primitive->GetAvgMode();
auto pad_mode = pooling_primitive->GetPadMode();
switch (pad_mode) {
case schema::PadMode_SAME_UPPER:
pooling_param->pad_mode_ = Pad_Same;
break;
case schema::PadMode_VALID:
pooling_param->pad_mode_ = Pad_Valid;
break;
default:
pooling_param->pad_mode_ = Pad_No;
break;
}

auto is_global = pooling_primitive->GetGlobal();
pooling_param->global_ = is_global;


+ 8
- 0
mindspore/lite/src/ops/populate/reshape_populate.cc View File

@@ -19,6 +19,7 @@
#include "src/common/log_adapter.h"
#include "src/tensor.h"
#include "nnacl/reshape_parameter.h"
#include "src/ops/reshape.h"

namespace mindspore {
namespace lite {
@@ -31,6 +32,13 @@ OpParameter *PopulateReshapeParameter(const mindspore::lite::PrimitiveC *primiti
}
memset(reshape_param, 0, sizeof(ReshapeParameter));
reshape_param->op_parameter_.type_ = primitive->Type();
auto reshape_lite_primitive = (lite::Reshape *)primitive;
auto shape = reshape_lite_primitive->GetShape();
reshape_param->shape_dim_ = shape.size();
int i = 0;
for (auto iter = shape.begin(); iter != shape.end(); iter++) {
reshape_param->shape_[i++] = *iter;
}
return reinterpret_cast<OpParameter *>(reshape_param);
}



+ 10
- 10
mindspore/lite/src/ops/reshape.cc View File

@@ -176,13 +176,13 @@ int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
return RET_INFER_INVALID;
}

std::vector<int> out_shape;
out_shape_.clear();
if (inputs_.size() == kDoubleNum) {
auto shape_tensor = inputs_.at(1);
if (shape_tensor->IsConst()) {
if (shape_tensor->data_c() == nullptr || (shape_tensor->shape().size() == 1 && shape_tensor->shape()[0] == 0)) {
MS_LOG(DEBUG) << "reshape to a scalar.";
output->set_shape(out_shape);
output->set_shape(out_shape_);
return RET_OK;
}
}
@@ -194,23 +194,23 @@ int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
switch (shape_tensor->data_type()) {
case kNumberTypeInt8: {
auto data = reinterpret_cast<int8_t *>(shape_tensor->MutableData());
CalShape<int8_t>(data, inputs_, &out_shape, shape_size);
CalShape<int8_t>(data, inputs_, &out_shape_, shape_size);
} break;
case kNumberTypeInt32: {
auto data = reinterpret_cast<int32_t *>(shape_tensor->MutableData());
CalShape<int32_t>(data, inputs_, &out_shape, shape_size);
CalShape<int32_t>(data, inputs_, &out_shape_, shape_size);
} break;
case kNumberTypeInt64: {
auto data = reinterpret_cast<int64_t *>(shape_tensor->MutableData());
CalShape<int64_t>(data, inputs_, &out_shape, shape_size);
CalShape<int64_t>(data, inputs_, &out_shape_, shape_size);
} break;
case kNumberTypeFloat: {
auto data = reinterpret_cast<float *>(shape_tensor->MutableData());
CalShape<float>(data, inputs_, &out_shape, shape_size);
CalShape<float>(data, inputs_, &out_shape_, shape_size);
} break;
case kNumberTypeUInt32: {
auto data = reinterpret_cast<uint32_t *>(shape_tensor->MutableData());
CalShape<uint32_t>(data, inputs_, &out_shape, shape_size);
CalShape<uint32_t>(data, inputs_, &out_shape_, shape_size);
} break;
default: {
MS_LOG(ERROR) << "Reshape weight tensor has unsupported dataType: " << shape_tensor->data_type();
@@ -219,18 +219,18 @@ int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
}
} else if (inputs_.size() == kSingleNum) {
for (size_t i = 0; i < GetShape().size(); ++i) {
out_shape.push_back(GetShape().at(i));
out_shape_.push_back(GetShape().at(i));
}
} else {
MS_LOG(ERROR) << "inputs tensor size invalid.";
return RET_INFER_ERR;
}
auto ret = CalNewShape(inputs_.front(), &out_shape);
auto ret = CalNewShape(inputs_.front(), &out_shape_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "CalNewShape error";
return ret;
}
output->set_shape(out_shape);
output->set_shape(out_shape_);
return RET_OK;
}
} // namespace lite


+ 2
- 0
mindspore/lite/src/ops/reshape.h View File

@@ -42,9 +42,11 @@ class Reshape : public PrimitiveC {
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;
int GetFormat() const;
std::vector<int64_t> GetShape() const;
std::vector<int> GetOutputShape() { return out_shape_; }

private:
int CalNewShape(const lite::Tensor *in_tensor, std::vector<int> *out_shape) const;
std::vector<int> out_shape_;
};
} // namespace lite
} // namespace mindspore


+ 2
- 3
mindspore/lite/src/runtime/kernel/npu/activation_npu.cc View File

@@ -15,7 +15,6 @@
*/

#include "src/runtime/kernel/npu/activation_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"

using mindspore::kernel::KERNEL_ARCH::kNPU;
@@ -28,7 +27,7 @@ int ActivationNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs,
if (act_param_->type_ != schema::ActivationType_RELU && act_param_->type_ != schema::ActivationType_RELU6 &&
act_param_->type_ != schema::ActivationType_SIGMOID && act_param_->type_ != schema::ActivationType_TANH &&
act_param_->type_ != schema::ActivationType_HSIGMOID && act_param_->type_ != schema::ActivationType_LEAKY_RELU) {
MS_LOG(ERROR) << "Unsupport activation type for activation op " << name_ << "when running npu";
MS_LOG(ERROR) << "Unsupported activation type for activation op " << name_ << "when running npu";
return RET_ERROR;
}
return RET_OK;
@@ -64,7 +63,7 @@ int ActivationNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
act_->set_attr_mode(14);
break;
default:
MS_LOG(ERROR) << "Unsupport activation type for activation op " << name_ << "when running npu";
MS_LOG(ERROR) << "Unsupported activation type for activation op " << name_ << "when running npu";
return RET_ERROR;
}
return RET_OK;


+ 0
- 2
mindspore/lite/src/runtime/kernel/npu/activation_npu.h View File

@@ -18,10 +18,8 @@

#include <vector>
#include "include/graph/op/all_ops.h"
#include "include/graph/compatible/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/fp32/activation_fp32.h"

namespace mindspore::kernel {
class ActivationNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/arithmetic_npu.h"
#include <string>
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"

using mindspore::kernel::KERNEL_ARCH::kNPU;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETIC_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETIC_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/arithmetic.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class ArithmeticNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/arithmetic_self_npu.h"
#include <string>
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"

using mindspore::kernel::KERNEL_ARCH::kNPU;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.h View File

@@ -17,8 +17,8 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETICSELF_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETICSELF_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/math_defs.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
namespace mindspore::kernel {
class ArithmeticSelfNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.h View File

@@ -17,7 +17,6 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_BATCHNORM_NPU_H_

#include <vector>
#include "include/graph/op/all_ops.h"
#include "include/graph/compatible/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/batchnorm_parameter.h"


+ 1
- 2
mindspore/lite/src/runtime/kernel/npu/cast_npu.h View File

@@ -17,9 +17,8 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
namespace mindspore::kernel {
class CastNPUKernel : public NPUKernel {
public:


+ 1
- 2
mindspore/lite/src/runtime/kernel/npu/concat_npu.h View File

@@ -17,10 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CONCAT_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CONCAT_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/concat_parameter.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"

namespace mindspore::kernel {
class ConcatNPUKernel : public NPUKernel {
public:


+ 1
- 2
mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.h View File

@@ -18,10 +18,9 @@

#include <vector>
#include <memory>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/conv_parameter.h"

namespace mindspore::kernel {
class ConvolutionBaseNPUKernel : public NPUKernel {
public:


+ 0
- 2
mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.cc View File

@@ -16,8 +16,6 @@

#include "src/runtime/kernel/npu/convolution_depthwise_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"

using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_DepthwiseConv2D;


+ 0
- 2
mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.h View File

@@ -18,9 +18,7 @@

#include <vector>
#include "include/graph/op/all_ops.h"
#include "include/graph/compatible/all_ops.h"
#include "src/runtime/kernel/npu/convolution_base_npu.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/conv_parameter.h"

namespace mindspore::kernel {


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc View File

@@ -15,7 +15,6 @@
*/

#include "src/runtime/kernel/npu/convolution_npu.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"

using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;


+ 0
- 2
mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.cc View File

@@ -15,8 +15,6 @@
*/

#include "src/runtime/kernel/npu/deconvolution_npu.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"

using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_DeConv2D;


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/eltwise_npu.cc View File

@@ -15,7 +15,6 @@
*/

#include "src/runtime/kernel/npu/eltwise_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"



+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/eltwise_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ELTWISE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ELTWISE_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "src/ops/eltwise.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class EltwiseNPUKernel : public NPUKernel {
public:


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.h View File

@@ -17,8 +17,8 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_FULLCONNECTION_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_FULLCONNECTION_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/convolution_base_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/convolution_base_npu.h"
#include "nnacl/matmul_parameter.h"
namespace mindspore::kernel {
class FullconnectionNPUKernel : public ConvolutionBaseNPUKernel {


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/gather_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/gather_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Gather;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/gather_npu.h View File

@@ -17,8 +17,8 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/gather_parameter.h"
namespace mindspore::kernel {
class GatherNPUKernel : public NPUKernel {


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/instance_norm_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_INSTANCE_NORM_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_INSTANCE_NORM_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/instance_norm_parameter.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class InstanceNormNPUKernel : public NPUKernel {
public:


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/matmul_npu.h View File

@@ -17,10 +17,10 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_MATMUL_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_MATMUL_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/matmul_parameter.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/softmax_parameter.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class MatMulNPUKernel : public NPUKernel {
public:


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/npu_kernel.h View File

@@ -18,9 +18,9 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_KERNEL_NPU_H_

#include <vector>
#include "include/graph/graph.h"
#include "src/lite_kernel.h"
#include "include/errorcode.h"
#include "include/graph/graph.h"
#include "src/kernel_registry.h"

using mindspore::kernel::LiteKernel;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/pad_npu.h View File

@@ -17,10 +17,10 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_PAD_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_PAD_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/pad_parameter.h"
#include "src/ops/pad.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class PadNPUKernel : public NPUKernel {
public:


+ 4
- 0
mindspore/lite/src/runtime/kernel/npu/pooling_npu.cc View File

@@ -24,6 +24,10 @@ using mindspore::schema::PrimitiveType_Pooling;
namespace mindspore::kernel {
int PoolingNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (pooling_param_->pad_l_ > pooling_param_->stride_w_ || pooling_param_->pad_u_ > pooling_param_->stride_h_) {
MS_LOG(ERROR) << "Npu pooling does not support pad > stride.";
return RET_ERROR;
}
return RET_OK;
}



+ 0
- 2
mindspore/lite/src/runtime/kernel/npu/reduce_npu.cc View File

@@ -17,8 +17,6 @@
#include "src/runtime/kernel/npu/reduce_npu.h"
#include <memory>
#include "src/kernel_registry.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Reduce;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/reduce_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_REDUCE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_REDUCE_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/reduce_parameter.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class ReduceNPUKernel : public NPUKernel {
public:


+ 16
- 3
mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc View File

@@ -15,9 +15,8 @@
*/

#include "src/runtime/kernel/npu/reshape_npu.h"
#include <memory>
#include "src/kernel_registry.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Reshape;
@@ -25,6 +24,10 @@ using mindspore::schema::PrimitiveType_Reshape;
namespace mindspore::kernel {
int ReshapeNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (reshape_param_->shape_dim_ == 0) {
MS_LOG(ERROR) << "Npu reshape op only supports const shape.";
return RET_ERROR;
}
return RET_OK;
}

@@ -37,7 +40,17 @@ int ReshapeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
return RET_ERROR;
}
op_->set_input_x(*npu_inputs[0]);
op_->set_input_shape(*npu_inputs[1]);

auto shape_op = new (std::nothrow) hiai::op::Const(name_ + "_shape");
std::vector<int> shape;
for (int i = 0; i < reshape_param_->shape_dim_; i++) {
shape.push_back(reshape_param_->shape_[i]);
}
ge::TensorDesc shape_tensor_desc(ge::Shape({reshape_param_->shape_dim_}), ge::FORMAT_NCHW, ge::DT_INT32);
ge::TensorPtr ai_shape_tensor = std::make_shared<hiai::Tensor>(shape_tensor_desc);
ai_shape_tensor->SetData(reinterpret_cast<uint8_t *>(shape.data()), reshape_param_->shape_dim_ * sizeof(int32_t));
shape_op->set_attr_value(ai_shape_tensor);
op_->set_input_shape(*shape_op);
return RET_OK;
}



+ 6
- 3
mindspore/lite/src/runtime/kernel/npu/reshape_npu.h View File

@@ -17,16 +17,18 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_RESHAPE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_RESHAPE_NPU_H_
#include <vector>
#include "nnacl/conv_parameter.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
#include "nnacl/reshape_parameter.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
namespace mindspore::kernel {
class ReshapeNPUKernel : public NPUKernel {
public:
ReshapeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
reshape_param_ = reinterpret_cast<ReshapeParameter *>(parameter);
}
~ReshapeNPUKernel() override;

int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
@@ -37,6 +39,7 @@ class ReshapeNPUKernel : public NPUKernel {

private:
hiai::op::Reshape *op_ = nullptr;
ReshapeParameter *reshape_param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_RESHAPE_NPU_H_

+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/resize_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/resize_npu.h"
#include <memory>
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"



+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/resize_npu.h View File

@@ -17,10 +17,10 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_RESIZE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_RESIZE_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/resize_parameter.h"
#include "src/ops/resize.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class ResizeNPUKernel : public NPUKernel {
public:


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/scale_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SCALE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SCALE_NPU_H_
#include <vector>
#include "include/graph/op/nn_defs.h"
#include "nnacl/scale.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/nn_defs.h"
namespace mindspore::kernel {
class ScaleNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/shape_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/shape_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Shape;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/shape_npu.h View File

@@ -17,8 +17,8 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SHAPE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SHAPE_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
namespace mindspore::kernel {
class ShapeNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/slice_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/slice_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Slice;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/slice_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SLICE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SLICE_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "src/ops/slice.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class SliceNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/softmax_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/softmax_npu.h"
#include "src/kernel_registry.h"

using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_SoftMax;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/softmax_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SOFTMAX_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SOFTMAX_NPU_H_
#include <vector>
#include "include/graph/op/nn_defs.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/softmax_parameter.h"
#include "include/graph/op/nn_defs.h"
namespace mindspore::kernel {
class SoftmaxNPUKernel : public NPUKernel {
public:


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/split_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SPLIT_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SPLIT_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "src/ops/split.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class SplitNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/squeeze_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/squeeze_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Squeeze;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/squeeze_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SQUEEZE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SQUEEZE_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "src/ops/squeeze.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class SqueezeNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/strided_slice_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_StridedSlice;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.h View File

@@ -17,10 +17,10 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_STRIDEDSLICE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_STRIDEDSLICE_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "src/ops/strided_slice.h"
#include "nnacl/strided_slice_parameter.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class StridedSliceNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/transpose_npu.cc View File

@@ -16,7 +16,6 @@

#include "src/runtime/kernel/npu/transpose_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Nchw2Nhwc;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/transpose_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_TRANSPOSE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_TRANSPOSE_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/transpose.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class TransposeNPUKernel : public NPUKernel {
public:


+ 0
- 1
mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.cc View File

@@ -17,7 +17,6 @@
#include "src/runtime/kernel/npu/unsqueeze_npu.h"
#include <memory>
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Unsqueeze;


+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.h View File

@@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_UNSQUEEZE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_UNSQUEEZE_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "src/ops/unsqueeze.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class UnsqueezeNPUKernel : public NPUKernel {
public:


Loading…
Cancel
Save