Browse Source

self check onnx caffe parser

feature/build-system-rewrite
qinzheng 4 years ago
parent
commit
420830d95e
11 changed files with 35 additions and 26 deletions
  1. +1
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/reduce_fp32.h
  2. +3
    -0
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/unique_fp32.c
  3. +11
    -8
      mindspore/lite/src/runtime/kernel/arm/fp32/ragged_range_fp32.cc
  4. +3
    -3
      mindspore/lite/src/runtime/kernel/arm/fp32/relative_position_attention_fp32.cc
  5. +0
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32/relative_position_attention_fp32.h
  6. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.cc
  7. +4
    -3
      mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc
  8. +0
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.h
  9. +4
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc
  10. +2
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32/uniform_real_fp32.cc
  11. +3
    -4
      mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc

+ 1
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/reduce_fp32.h View File

@@ -16,6 +16,7 @@

#ifndef MINDSPORE_NNACL_FP32_REDUCE_H_
#define MINDSPORE_NNACL_FP32_REDUCE_H_
#include <stddef.h>
#include "nnacl/op_base.h"
#include "nnacl/reduce_parameter.h"



+ 3
- 0
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/unique_fp32.c View File

@@ -17,6 +17,9 @@
#include "nnacl/fp32/unique_fp32.h"

int Find(const float *array, int len, float target) {
if (array == NULL) {
return -1;
}
for (int i = 0; i < len; ++i) {
if (array[i] == target) {
return i;


+ 11
- 8
mindspore/lite/src/runtime/kernel/arm/fp32/ragged_range_fp32.cc View File

@@ -37,16 +37,19 @@ int RaggedRangeCPUKernel::Prepare() {
int RaggedRangeCPUKernel::ReSize() { return RET_OK; }

int RaggedRangeCPUKernel::Run() {
if (in_tensors_[0]->data_type() == kNumberTypeFloat32) {
RaggedRangeFp32(static_cast<float *>(in_tensors_.at(0)->data()), static_cast<float *>(in_tensors_.at(1)->data()),
static_cast<float *>(in_tensors_.at(2)->data()), static_cast<int *>(out_tensors_.at(0)->data()),
static_cast<float *>(out_tensors_.at(1)->data()),
if (in_tensors_[FIRST_INPUT]->data_type() == kNumberTypeFloat32) {
RaggedRangeFp32(static_cast<float *>(in_tensors_.at(FIRST_INPUT)->data()),
static_cast<float *>(in_tensors_.at(SECOND_INPUT)->data()),
static_cast<float *>(in_tensors_.at(THIRD_INPUT)->data()),
static_cast<int *>(out_tensors_.at(FIRST_INPUT)->data()),
static_cast<float *>(out_tensors_.at(SECOND_INPUT)->data()),
reinterpret_cast<RaggedRangeParameter *>(op_parameter_));
} else {
RaggedRangeInt(static_cast<int *>(in_tensors_.at(0)->data()), static_cast<int *>(in_tensors_.at(1)->data()),
static_cast<int *>(in_tensors_.at(2)->data()), static_cast<int *>(out_tensors_.at(0)->data()),
static_cast<int *>(out_tensors_.at(1)->data()),
reinterpret_cast<RaggedRangeParameter *>(op_parameter_));
RaggedRangeInt(
static_cast<int *>(in_tensors_.at(FIRST_INPUT)->data()), static_cast<int *>(in_tensors_.at(SECOND_INPUT)->data()),
static_cast<int *>(in_tensors_.at(THIRD_INPUT)->data()), static_cast<int *>(out_tensors_.at(FIRST_INPUT)->data()),
static_cast<int *>(out_tensors_.at(SECOND_INPUT)->data()),
reinterpret_cast<RaggedRangeParameter *>(op_parameter_));
}
return RET_OK;
}


+ 3
- 3
mindspore/lite/src/runtime/kernel/arm/fp32/relative_position_attention_fp32.cc View File

@@ -37,7 +37,7 @@ constexpr int k3DimsLeftMatrixDeepIndex = 2;
constexpr int kRightMatrixDeepIndex = 0;
constexpr int kRelativePositionHasBiasInputSize = 15;

bool AttentionActivationTensorCheck(lite::Tensor *tensor) {
bool AttentionActivationTensorCheck(const lite::Tensor *tensor) {
if (tensor == nullptr || tensor->data_type() != kNumberTypeFloat32 ||
tensor->shape().size() != kActivationTensorShapeSize ||
tensor->shape().at(kTensorShapeBatchIndex) != kActivationTensorBatch) {
@@ -99,7 +99,7 @@ int RelativePositionAttentionCPUKernel::CheckInputs() {

namespace {
constexpr int kWeightTensorShapeSize = 2;
bool AttentionWeightTensorCheck(lite::Tensor *tensor) {
bool AttentionWeightTensorCheck(const lite::Tensor *tensor) {
if (tensor == nullptr || !tensor->IsConst() || tensor->data_type() != kNumberTypeFloat32 ||
tensor->shape().size() != kWeightTensorShapeSize) {
return false;
@@ -152,7 +152,7 @@ int RelativePositionAttentionCPUKernel::CheckWeights() {
}

namespace {
bool AttentionBiasTensorCheck(lite::Tensor *tensor) {
bool AttentionBiasTensorCheck(const lite::Tensor *tensor) {
if (tensor == nullptr || !tensor->IsConst() || tensor->data_type() != kNumberTypeFloat32 ||
tensor->shape().size() != 1) {
return false;


+ 0
- 1
mindspore/lite/src/runtime/kernel/arm/fp32/relative_position_attention_fp32.h View File

@@ -39,7 +39,6 @@ class RelativePositionAttentionCPUKernel : public InnerKernel {
int ReSize() override;
int Run() override;

private:
// check inputs
int CheckInputs();
int CheckWeights();


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.cc View File

@@ -48,12 +48,12 @@ int ROIPoolingCPUKernel::ReSize() {
auto in_shape = in_tensors_.front()->shape();
auto out_shape = out_tensors_.front()->shape();
int ndims = static_cast<int>(in_shape.size());
if (ndims < C4NUM) {
MS_LOG(ERROR) << "ROIPooling in_shape.size() error ,shape dim greater than or equal to 4!";
if (ndims != C4NUM) {
MS_LOG(ERROR) << "ROIPooling in_shape.size() error ,shape must be equal to 4!";
return RET_ERROR;
}
if (out_shape.size() < C4NUM) {
MS_LOG(ERROR) << "ROIPooling out_shape.size() error ,shape dim greater than or equal to 4!";
if (out_shape.size() != C4NUM) {
MS_LOG(ERROR) << "ROIPooling out_shape.size() error ,shape must be equal to 4!";
return RET_ERROR;
}
param_->ndim_ = ndims;


+ 4
- 3
mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc View File

@@ -53,7 +53,7 @@ int ScaleCPUKernel::InitScaleOffset() {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
memcpy(scale_, scale_tensor->data(), scale_tensor->ElementsNum() * sizeof(float));
(void)memcpy(scale_, scale_tensor->data(), scale_tensor->ElementsNum() * sizeof(float));
} else {
scale_param_->const_scale_ = false;
scale_ = nullptr;
@@ -67,7 +67,7 @@ int ScaleCPUKernel::InitScaleOffset() {
return RET_ERROR;
}
memset(offset_, 0, scale_tensor->ElementsNum() * sizeof(float));
} else if (in_tensors_.size() == 3 && reinterpret_cast<float *>(in_tensors_.at(2)->data()) != nullptr) {
} else if (in_tensors_.size() == C3NUM && reinterpret_cast<float *>(in_tensors_.at(THIRD_INPUT)->data()) != nullptr) {
scale_param_->const_offset_ = true;
auto offset_tensor = in_tensors_.at(2);
MS_CHECK_TRUE_RET(scale_tensor->ElementsNum() == offset_tensor->ElementsNum(), RET_ERROR);
@@ -76,7 +76,7 @@ int ScaleCPUKernel::InitScaleOffset() {
MS_LOG(ERROR) << "Malloc data failed";
return RET_ERROR;
}
memcpy(offset_, offset_tensor->data(), offset_tensor->ElementsNum() * sizeof(float));
(void)memcpy(offset_, offset_tensor->data(), offset_tensor->ElementsNum() * sizeof(float));
} else {
scale_param_->const_offset_ = false;
offset_ = nullptr;
@@ -179,6 +179,7 @@ int ScaleRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
int ScaleCPUKernel::Run() {
auto in_tensor = in_tensors_.front();
input_ptr_ = reinterpret_cast<float *>(in_tensor->data());
CHECK_NULL_RETURN(input_ptr_);
if (!scale_param_->const_scale_) {
auto scale_tensor = in_tensors_.at(1);
scale_ = reinterpret_cast<float *>(scale_tensor->data());


+ 0
- 1
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.h View File

@@ -36,7 +36,6 @@ class SpaceToBatchCPUKernel : public InnerKernel {
int Run() override;
void ProcessInput();

public:
int DoRun(int task_id);

protected:


+ 4
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc View File

@@ -62,6 +62,10 @@ int TransposeCPUKernel::ReSize() {
} else {
MS_ASSERT(in_tensors_.size() == C2NUM);
auto perm_tensor = in_tensors_.at(1);
if (perm_tensor->data_type() != kNumberTypeInt32) {
MS_LOG(ERROR) << "Unsupported type id: " << perm_tensor->data_type() << " of perm tensor.";
return RET_ERROR;
}
perm_data = reinterpret_cast<int *>(perm_tensor->data());
MSLITE_CHECK_PTR(perm_data);
}


+ 2
- 2
mindspore/lite/src/runtime/kernel/arm/fp32/uniform_real_fp32.cc View File

@@ -14,8 +14,8 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/uniform_real_fp32.h"
#include <stdlib.h>
#include <string.h>
#include <cstdlib>
#include <cstring>
#include <vector>
#include "src/kernel_registry.h"
#include "include/errorcode.h"


+ 3
- 4
mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc View File

@@ -98,7 +98,6 @@ int WhereCPUKernel::RunWithSingleInput() {
int data_num_int = where_param_->condition_num_ * where_param_->rank_;
MS_CHECK_TRUE_RET(data_num_int >= 0, RET_ERROR);
size_t data_num = static_cast<size_t>(data_num_int);
MS_CHECK_FALSE_MSG(SIZE_MUL_OVERFLOW(sizeof(int32_t), data_num), RET_ERROR, "mul overflow");
size_t data_size = data_num * sizeof(int32_t);
auto data = ms_context_->allocator->Malloc(data_size);
if (data == nullptr) {
@@ -113,13 +112,13 @@ int WhereCPUKernel::RunWithSingleInput() {
bool condition = false;
switch (input_data_type) {
case kNumberTypeInt32:
condition = int32_condition_[index];
condition = static_cast<bool>(int32_condition_[index]);
break;
case kNumberTypeFloat32:
condition = fp32_condition_[index];
condition = static_cast<bool>(fp32_condition_[index]);
break;
case kNumberTypeBool:
condition = condition_[index];
condition = static_cast<bool>(condition_[index]);
break;
default:
MS_LOG(ERROR) << "Unsupported data type: " << input_data_type << " of where cpu kernel.";


Loading…
Cancel
Save