Browse Source

!9125 [MS][Lite][Cpu] add new tensorlist ops

From: @lzkcode
Reviewed-by: 
Signed-off-by:
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
06bc0bd9e1
16 changed files with 1207 additions and 0 deletions
  1. +30
    -0
      mindspore/lite/nnacl/tensorlist_parameter.h
  2. +3
    -0
      mindspore/lite/schema/ops.fbs
  3. +42
    -0
      mindspore/lite/src/ops/populate/tensorlistfromtensor_populate.cc
  4. +41
    -0
      mindspore/lite/src/ops/populate/tensorlistgetitem_populate.cc
  5. +41
    -0
      mindspore/lite/src/ops/populate/tensorlistreserve_populate.cc
  6. +41
    -0
      mindspore/lite/src/ops/populate/tensorliststack_populate.cc
  7. +314
    -0
      mindspore/lite/src/ops/tensor_list.cc
  8. +75
    -0
      mindspore/lite/src/ops/tensor_list.h
  9. +124
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/TensorListFromTensor.cc
  10. +54
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/TensorListFromTensor.h
  11. +101
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/TensorListGetItem.cc
  12. +45
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/TensorListGetItem.h
  13. +82
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/TensorListReserve.cc
  14. +44
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/TensorListReserve.h
  15. +122
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/TensorListStack.cc
  16. +48
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/TensorListStack.h

+ 30
- 0
mindspore/lite/nnacl/tensorlist_parameter.h View File

@@ -0,0 +1,30 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_PARAMETER_H_
#define MINDSPORE_LITE_NNACL_TENSORLIST_PARAMETER_H_

#include "nnacl/op_base.h"
#include "ir/dtype/type_id.h"

typedef struct TensorListParameter {
OpParameter op_parameter_;
mindspore::TypeId shape_type_;
mindspore::TypeId element_dtype_;
int num_element_;
} TensorListParameter;

#endif // MINDSPORE_LITE_NNACL_ARG_TENSORLIST_PARAMETER_H_

+ 3
- 0
mindspore/lite/schema/ops.fbs View File

@@ -1172,6 +1172,8 @@ table Partial {
}

table TensorListFromTensor {
elementDType : int;
shapeType : int;
}

table TensorListStack {
@@ -1184,6 +1186,7 @@ table TensorListGetItem {
}

table TensorListSetItem {
elementDType : int;
}

table TensorListReserve {


+ 42
- 0
mindspore/lite/src/ops/populate/tensorlistfromtensor_populate.cc View File

@@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "nnacl/tensorlist_parameter.h"
#include "src/ops/primitive_c.h"
#include "src/ops/populate/populate_register.h"
#include "src/ops/tensor_list.h"

namespace mindspore {
namespace lite {
OpParameter *PopulateTensorListFromTensorParameter(const mindspore::lite::PrimitiveC *primitive) {
TensorListParameter *TensorList_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter)));
if (TensorList_param == nullptr) {
MS_LOG(ERROR) << "malloc TensorListParameter failed.";
return nullptr;
}
memset(TensorList_param, 0, sizeof(TensorListParameter));
TensorList_param->op_parameter_.type_ = primitive->Type();
auto tensorList =
reinterpret_cast<mindspore::lite::TensorListFromTensor *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
TensorList_param->shape_type_ = tensorList->GetShapeType();
TensorList_param->element_dtype_ = tensorList->GetElementDType();
return reinterpret_cast<OpParameter *>(TensorList_param);
}
Registry TensorListFromTensorParameterRegistry(schema::PrimitiveType_TensorListFromTensor,
PopulateTensorListFromTensorParameter);

} // namespace lite
} // namespace mindspore

+ 41
- 0
mindspore/lite/src/ops/populate/tensorlistgetitem_populate.cc View File

@@ -0,0 +1,41 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/ops/tensor_list.h"
#include "src/ops/primitive_c.h"
#include "src/ops/populate/populate_register.h"
#include "nnacl/tensorlist_parameter.h"

namespace mindspore {
namespace lite {
OpParameter *PopulateTensorListGetItemParameter(const mindspore::lite::PrimitiveC *primitive) {
TensorListParameter *getItem_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter)));
if (getItem_param == nullptr) {
MS_LOG(ERROR) << "malloc TensorListParameter failed.";
return nullptr;
}
memset(getItem_param, 0, sizeof(TensorListParameter));
getItem_param->op_parameter_.type_ = primitive->Type();
auto getItem =
reinterpret_cast<mindspore::lite::TensorListGetItem *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
getItem_param->element_dtype_ = getItem->GetElementDType();
return reinterpret_cast<OpParameter *>(getItem_param);
}
Registry TensorListGetItemParameterRegistry(schema::PrimitiveType_TensorListGetItem,
PopulateTensorListGetItemParameter);

} // namespace lite
} // namespace mindspore

+ 41
- 0
mindspore/lite/src/ops/populate/tensorlistreserve_populate.cc View File

@@ -0,0 +1,41 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/ops/tensor_list.h"
#include "src/ops/primitive_c.h"
#include "src/ops/populate/populate_register.h"
#include "nnacl/tensorlist_parameter.h"

namespace mindspore {
namespace lite {
OpParameter *PopulateTensorListReserveParameter(const mindspore::lite::PrimitiveC *primitive) {
TensorListParameter *reserve_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter)));
if (reserve_param == nullptr) {
MS_LOG(ERROR) << "malloc TensorListParameter failed.";
return nullptr;
}
memset(reserve_param, 0, sizeof(TensorListParameter));
reserve_param->op_parameter_.type_ = primitive->Type();
auto reserve =
reinterpret_cast<mindspore::lite::TensorListReserve *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
reserve_param->element_dtype_ = reserve->GetElementDType();
return reinterpret_cast<OpParameter *>(reserve_param);
}
Registry TensorListReserveParameterRegistry(schema::PrimitiveType_TensorListReserve,
PopulateTensorListReserveParameter);

} // namespace lite
} // namespace mindspore

+ 41
- 0
mindspore/lite/src/ops/populate/tensorliststack_populate.cc View File

@@ -0,0 +1,41 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/ops/tensor_list.h"
#include "src/ops/primitive_c.h"
#include "src/ops/populate/populate_register.h"
#include "nnacl/tensorlist_parameter.h"

namespace mindspore {
namespace lite {
OpParameter *PopulateTensorListStackParameter(const mindspore::lite::PrimitiveC *primitive) {
TensorListParameter *stack_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter)));
if (stack_param == nullptr) {
MS_LOG(ERROR) << "malloc TensorListParameter failed.";
return nullptr;
}
memset(stack_param, 0, sizeof(TensorListParameter));
stack_param->op_parameter_.type_ = primitive->Type();
auto stack =
reinterpret_cast<mindspore::lite::TensorListStack *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
stack_param->element_dtype_ = stack->GetElementDType();
stack_param->num_element_ = stack->GetNumElements();
return reinterpret_cast<OpParameter *>(stack_param);
}
Registry TensorListStackParameterRegistry(schema::PrimitiveType_TensorListStack, PopulateTensorListStackParameter);

} // namespace lite
} // namespace mindspore

+ 314
- 0
mindspore/lite/src/ops/tensor_list.cc View File

@@ -0,0 +1,314 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "src/ops/tensor_list.h"

#ifndef PRIMITIVE_WRITEABLE
#include "src/ops/ops_register.h"
#endif

using mindspore::schema::Format_NC;

namespace mindspore {
namespace lite {

int TensorListFromTensor::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) {
// inputs0:tensor
// inputs1: element_shape
// outputs0: vector<tensor>.size() dtype
// outputs1: element_shape
// outputs2-n: vector<tensor>
auto input = inputs_[0];
MS_ASSERT(input != nullptr);
std::vector<int> in_shape = input->shape();
int dim0 = in_shape[0];
if (dim0 <= 0) {
MS_LOG(ERROR) << "inputs_[0] dim0:" << dim0 << " must greater than 0";
return RET_ERROR;
}
std::vector<int> out_shape(in_shape.begin() + 1, in_shape.end());
int out_vec_size = outputs_.size() - 2;
if (out_vec_size != dim0) {
MS_LOG(ERROR) << "outputs_.size() - 2:" << out_vec_size << "must be equal to dim0:" << dim0;
return RET_ERROR;
}
for (int i = 0; i < dim0; ++i) {
auto output = outputs_[i + 2];
MS_ASSERT(output != nullptr);
output->set_data_type(input->data_type());
output->set_shape(out_shape);
}

auto output = outputs_[0]; // vector<tensor>.size(), tensorlist.dtype
MS_ASSERT(output != nullptr);
output->set_data_type(kNumberTypeInt);
output->set_shape(std::vector<int>(1, 2)); // one element.value = 2

output = outputs_[1]; // element_shape tensor
MS_ASSERT(output != nullptr);
output->set_data_type(inputs_[1]->data_type());
output->set_format(inputs_[1]->format());
output->set_shape(inputs_[1]->shape());

return RET_OK;
}

bool TensorListGetItem::IsFullyDefined(const std::vector<int> &shape) const {
for (size_t i = 0; i < shape.size(); ++i) {
if (shape[i] < 0) {
return false;
}
}
return true;
}

int TensorListGetItem::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) {
int in_vec_size = inputs_.size();
auto input0 = inputs_[0];
MS_ASSERT(input0 != nullptr);
auto in0_ptr = reinterpret_cast<int *>(input0->data_c());
if (in_vec_size != in0_ptr[0] + 4) {
MS_LOG(ERROR) << "inputs_.size():" << in_vec_size << " must be equal to:" << in0_ptr[0] + 4;
return RET_ERROR;
}
auto get_index = inputs_[in0_ptr[0] + 2];
MS_ASSERT(get_index != nullptr);
index_ = reinterpret_cast<int *>(get_index->data_c())[0];
if (index_ < 0 || index_ > in0_ptr[0]) {
MS_LOG(ERROR) << "index_:" << index_ << "must in [0, " << in0_ptr[0] << "]";
return RET_ERROR;
}
auto input_index = inputs_[index_ + 2];
MS_ASSERT(input_index != nullptr);
auto output = outputs_.front();
MS_ASSERT(output != nullptr);
if (input_index->data_type() != kTypeUnknown) {
output->set_format(input_index->format());
output->set_data_type(input_index->data_type());
output->set_shape(input_index->shape());
} else {
auto ele_shape_tensor = inputs_[in0_ptr[0] + 3];
MS_ASSERT(ele_shape_tensor != nullptr);
auto ele_shape_type = ele_shape_tensor->data_type();
if (ele_shape_type != kNumberTypeInt) {
MS_LOG(ERROR) << "ele_shape_tensor.data_type():" << ele_shape_type
<< " must be \"kNumberTypeInt\":" << kNumberTypeInt;
return RET_ERROR;
}
auto shape_ptr = reinterpret_cast<int *>(ele_shape_tensor->data_c());
for (int i = 0; i < ele_shape_tensor->ElementsNum(); ++i) {
element_shape_.push_back(shape_ptr[i]);
}
if (!IsFullyDefined(element_shape_)) {
for (int i = 0; i < in0_ptr[0]; ++i) {
auto input = inputs_[i + 2];
if (input->data_type() != kTypeUnknown) {
std::vector<int> tmp = input->shape();
for (size_t j = 0; j < tmp.size(); ++j) {
element_shape_[j] = element_shape_[j] >= 0 ? element_shape_[j] : tmp[j];
}
}
}
}
if (!IsFullyDefined(element_shape_)) {
MS_LOG(ERROR) << "ele_shape_tensor Is Not FullyDefined!";
return RET_ERROR;
}
element_dtype_ = GetElementDType();
output->set_data_type(element_dtype_);
output->set_shape(element_shape_);
}
return RET_OK;
}
#ifdef PRIMITIVE_WRITEABLE
TypeId TensorListFromTensor::GetElementDType() const {
return (TypeId)(this->primitive_->value.AsTensorListFromTensor()->elementDType);
}

TypeId TensorListFromTensor::GetShapeType() const {
return (TypeId)(this->primitive_->value.AsTensorListFromTensor()->shapeType);
}

TypeId TensorListGetItem::GetElementDType() const {
return (TypeId)(this->primitive_->value.AsTensorListGetItem()->elementDType);
}
TypeId TensorListReserve::GetElementDType() const {
return (TypeId)(this->primitive_->value.AsTensorListReserve()->elementDType);
}

TypeId TensorListStack::GetElementDType() const {
return (TypeId)(this->primitive_->value.AsTensorListStack()->elementDType);
}

int TensorListStack::GetNumElements() const { return this->primitive_->value.AsTensorListStack()->numElements; }

#else
TypeId TensorListFromTensor::GetElementDType() const {
return (TypeId)(this->primitive_->value_as_TensorListFromTensor()->elementDType());
}

TypeId TensorListFromTensor::GetShapeType() const {
return (TypeId)(this->primitive_->value_as_TensorListFromTensor()->shapeType());
}

TypeId TensorListGetItem::GetElementDType() const {
return (TypeId)(this->primitive_->value_as_TensorListGetItem()->elementDType());
}
TypeId TensorListReserve::GetElementDType() const {
return (TypeId)(this->primitive_->value_as_TensorListReserve()->elementDType());
}

TypeId TensorListStack::GetElementDType() const {
return (TypeId)(this->primitive_->value_as_TensorListStack()->elementDType());
}

int TensorListStack::GetNumElements() const { return this->primitive_->value_as_TensorListStack()->numElements(); }
#endif

int TensorListReserve::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) {
// input0: element_shape_tensor
// input1: num_elements
auto input0 = inputs_.front();
MS_ASSERT(input0 != nullptr);
auto ele_shape_type = input0->data_type();
if (ele_shape_type != kNumberTypeInt) {
MS_LOG(ERROR) << "ele_shape_tensor.data_type():" << ele_shape_type
<< " must be \"kNumberTypeInt\":" << kNumberTypeInt;
return RET_ERROR;
}
auto input1 = inputs_[1];
MS_ASSERT(input1 != nullptr);
auto num_ele_type = input1->data_type();
if (num_ele_type != kNumberTypeInt) {
MS_LOG(ERROR) << "num_ele_tensor.data_type():" << num_ele_type << " must be \"kNumberTypeInt\":" << kNumberTypeInt;
return RET_ERROR;
}
int num_elements = reinterpret_cast<int *>(input1->data_c())[0];
auto out_vec_size = outputs_.size();
if (out_vec_size != (size_t)(num_elements + 2)) {
MS_LOG(ERROR) << "outputs_.size():" << out_vec_size << " must be equal to:" << num_elements + 2;
return RET_ERROR;
}

for (int i = 0; i < num_elements; ++i) {
auto output = outputs_[i + 2];
MS_ASSERT(output != nullptr);
output->set_data_type(kTypeUnknown);
output->set_shape(std::vector<int>(1, 0)); // shape = [0]
}

auto output = outputs_[0]; // vector<tensor>.size(), tensorlist.dtype
MS_ASSERT(output != nullptr);
output->set_data_type(kNumberTypeInt);
output->set_shape(std::vector<int>(1, 2)); // one element.value = 2

output = outputs_[1]; // element_shape tensor
MS_ASSERT(output != nullptr);
output->set_data_type(input0->data_type());
output->set_format(input0->format());
output->set_shape(input0->shape());
return RET_OK;
}

bool TensorListStack::IsFullyDefined(const std::vector<int> &shape) const {
for (size_t i = 0; i < shape.size(); ++i) {
if (shape[i] < 0) {
return false;
}
}
return true;
}

int TensorListStack::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) {
// input0: tensorlist
// input[inputs_.size() - 1]: element_shape
auto input0 = inputs_.front();
MS_ASSERT(input0 != nullptr);
auto input0_ptr = reinterpret_cast<int *>(input0->data_c());
int vec_in_size = inputs_.size();
if (vec_in_size != input0_ptr[0] + 3) {
MS_LOG(ERROR) << "inputs_.size():" << vec_in_size << " must be equal:" << input0_ptr[0] + 3;
return RET_ERROR;
}
auto ele_shape = inputs_[input0_ptr[0] + 2]; // element shape
MS_ASSERT(ele_shape != nullptr);
auto ele_shape_ptr = reinterpret_cast<int *>(ele_shape->data_c());
for (int i = 0; ele_shape->ElementsNum(); ++i) {
output_shape_.push_back(ele_shape_ptr[i]);
}
std::vector<int> tensorlist_shape;
MS_ASSERT(inputs_[1] != nullptr);
auto input1_ptr = reinterpret_cast<int *>(inputs_[1]->data_c());
for (int i = 0; i < inputs_[1]->ElementsNum(); ++i) {
tensorlist_shape.push_back(input1_ptr[i]);
}
auto status = MergeShape(tensorlist_shape);
if (status == RET_ERROR) {
MS_LOG(ERROR) << "Merge tensorlist_shape is error!";
return RET_ERROR;
}
if (!IsFullyDefined(output_shape_)) {
MS_LOG(ERROR) << "element_shape Is Not FullyDefined!";
return RET_ERROR;
}
if (!IsFullyDefined(tensorlist_shape)) {
for (int i = 0; i < input0_ptr[0]; ++i) { // get tensorlist every tensor
auto tensor_tmp = inputs_[i + 2];
MS_ASSERT(tensor_tmp != nullptr);
if (tensor_tmp->data_type() != kTypeUnknown) {
status = MergeShape(tensor_tmp->shape());
if (status == RET_ERROR) {
MS_LOG(ERROR) << "Merge inputs_[" << i + 2 << "] is error!";
return RET_ERROR;
}
}
}
}
auto output = outputs_.front();
MS_ASSERT(output != nullptr);
output->set_format(Format_NC);
output->set_data_type(static_cast<TypeId>(input0_ptr[1]));
output->set_shape(std::vector<int>(
1, input0_ptr[0] * std::accumulate(output_shape_.begin(), output_shape_.end(), 1LL, std::multiplies<int>())));
return RET_OK;
}

int TensorListStack::MergeShape(const std::vector<int> &shape) {
size_t dim0 = shape.size();
size_t dim1 = output_shape_.size();
if (dim1 >= unKnownRank_) {
output_shape_ = shape;
return RET_OK;
}
if (dim1 != dim0) {
MS_LOG(ERROR) << "shape.size():" << dim1 << " must be equal output_shape_.size():" << dim0;
return RET_ERROR;
}
for (size_t i = 0; i < dim0; ++i) {
int dim0_size = shape[i];
int dim1_size = output_shape_[i];
if (dim0_size >= 0 && dim1_size >= 0 && dim0_size != dim1_size) {
MS_LOG(ERROR) << "shape[" << i << "]:" << dim0_size << " is incompatible with output_shape_[" << i
<< "]:" << dim1_size;
return RET_ERROR;
}
int tmp_size = dim1_size >= 0 ? dim1_size : dim0_size;
output_shape_[i] = tmp_size;
}
return RET_OK;
}
} // namespace lite
} // namespace mindspore

+ 75
- 0
mindspore/lite/src/ops/tensor_list.h View File

@@ -0,0 +1,75 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <functional>
#include "src/ops/primitive_c.h"
#include "ir/dtype/type_id.h"

using mindspore::schema::Format;
using mindspore::schema::Format_NC;

#ifndef LITE_MINDSPORE_LITE_C_OPS_TENSORLISTFROMTENSOR_H_
#define LITE_MINDSPORE_LITE_C_OPS_TENSORLISTFROMTENSOR_H_
namespace mindspore {
namespace lite {

class TensorListFromTensor : public PrimitiveC {
public:
TypeId GetElementDType() const;
TypeId GetShapeType() const;
TensorListFromTensor() = default;
bool IsCompatibleShape(std::vector<lite::Tensor *> inputs_);
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;
};

class TensorListReserve : public PrimitiveC {
public:
TensorListReserve() = default;
TypeId GetElementDType() const;
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;
};

class TensorListGetItem : public PrimitiveC {
public:
TensorListGetItem() = default;
TypeId GetElementDType() const;
bool IsFullyDefined(const std::vector<int> &shape) const;
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;

private:
int index_ = -1;
TypeId element_dtype_ = kTypeUnknown;
std::vector<int> element_shape_;
};

class TensorListStack : public PrimitiveC {
public:
// tensor:input, element_dtype, num_elements(default=-1:reprent any tensor dim0), element_shape
TensorListStack() = default;
TypeId GetElementDType() const;
int GetNumElements() const;
bool IsFullyDefined(const std::vector<int> &shape) const;
int MergeShape(const std::vector<int> &shape);
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;

private:
size_t unKnownRank_ = 255;
std::vector<int> output_shape_;
};
} // namespace lite
} // namespace mindspore

#endif // LITE_MINDSPORE_LITE_C_OPS_TENSORLISTFROMTENSOR_H_

+ 124
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/TensorListFromTensor.cc View File

@@ -0,0 +1,124 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/errorcode.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/fp32/TensorListFromTensor.h"
#include "src/runtime/runtime_api.h"

using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_TensorListFromTensor;

namespace mindspore::kernel {

bool TensorListFromTensorCPUKernel::IsCompatibleShape() {
if (input1_->data_type() != kNumberTypeInt) { // element_shape
MS_LOG(ERROR) << "in_tensors_[1] data type is must be \"kNumberTypeInt\", but now is:" << input1_->data_type();
return false;
}
int in1_ele_num = input1_->ElementsNum();
std::vector<int> tensor_shape = input0_->shape();
if (static_cast<int>(tensor_shape.size() - 1) != in1_ele_num) {
MS_LOG(ERROR) << "in_tensors_[0].shape() - 1:" << tensor_shape.size() - 1
<< " must be equal in_tensors_[1].ElementsNum():" << in1_ele_num;
return false;
}
int *elements_shape = reinterpret_cast<int *>(input1_->data_c()); // element shape in tensor data
for (int i = 0; i < in1_ele_num; ++i) {
const int dim0 = tensor_shape[i + 1];
const int dim1 = *(elements_shape + i);
if (dim0 >= 0 && dim1 >= 0 && dim0 != dim1) {
MS_LOG(ERROR) << "input0_->shape()[" << i + 1 << "]:" << dim0 << " is not equal input1_->data_c()[" << i
<< "]:" << dim1;
return false;
}
}
return true;
}

int TensorListFromTensorCPUKernel::Init() {
input0_ = in_tensors_[0]; // row tensor
input1_ = in_tensors_[1]; // element_shape tensor
output0_ = out_tensors_[0];
output1_ = out_tensors_[1];
return IsCompatibleShape();
}

int TensorListFromTensorCPUKernel::ReSize() { return RET_OK; }

int TensorListFromTensorCPUKernel::Run() {
int dim0 = input0_->shape()[0];
size_t devision_dim0 = input0_->ElementsNum() / dim0;
auto out0_ptr = reinterpret_cast<int *>(output0_->MutableData());
*out0_ptr = dim0;
*(out0_ptr + 1) = input0_->data_type();
auto status = output1_->CopyTensorData(*input1_);
if (status == RET_ERROR) {
MS_LOG(ERROR) << "copy tensor data failed!";
return RET_ERROR;
}
if (dim0 != static_cast<int>(out_tensors_.size() - 2)) {
MS_LOG(ERROR) << "out_tensors_.size() - 2:[" << out_tensors_.size() - 2
<< "] must be equal in_tensors_[0].shape()[0]:[" << dim0 << "]";
return RET_ERROR;
}
auto in_ptr = reinterpret_cast<float *>(input0_);
size_t index = 0;
for (int i = 0; i < dim0; ++i) {
auto out_ptr = reinterpret_cast<float *>(out_tensors_[i + 2]->MutableData());
memcpy(out_ptr, in_ptr + index, devision_dim0 * sizeof(float));
index += devision_dim0;
}
return RET_OK;
}

kernel::LiteKernel *CpuTensorListFromTensorFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *op_parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(op_parameter);
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_TensorListFromTensor);
op_parameter->thread_num_ = ctx->thread_num_;
auto *kernel = new (std::nothrow) TensorListFromTensorCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new TensorListFromTensorCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed! name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}

REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorListFromTensor, CpuTensorListFromTensorFp32KernelCreator)
} // namespace mindspore::kernel

+ 54
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/TensorListFromTensor.h View File

@@ -0,0 +1,54 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTFROMTENSOR_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTFROMTENSOR_H_

#include <vector>
#include "src/lite_kernel.h"
#include "schema/model_generated.h"

namespace mindspore::kernel {
class TensorListFromTensorCPUKernel : public LiteKernel {
public:
/*
* input0:tensor
* input1:element_shape
* output0:tensorlist.size() and dty pe
* output2~n:tensor
* output1:element_shape(tensorlist shape)
*/
TensorListFromTensorCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
~TensorListFromTensorCPUKernel() = default;

int Init() override;
int ReSize() override;
int Run() override;
bool IsCompatibleShape();

private:
std::vector<int> output_shape_;
lite::Tensor *output0_ = nullptr;
lite::Tensor *output1_ = nullptr;
lite::Tensor *input0_ = nullptr;
lite::Tensor *input1_ = nullptr;
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTFROMTENSOR_H_

+ 101
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/TensorListGetItem.cc View File

@@ -0,0 +1,101 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/errorcode.h"
#include "include/ms_tensor.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/fp32/TensorListGetItem.h"
#include "src/runtime/runtime_api.h"

using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_TensorListGetItem;

namespace mindspore::kernel {

int TensorListGetItemCPUKernel::Init() {
auto input0 = reinterpret_cast<int *>(in_tensors_[0]->data_c());
size_t dim0 = *input0;
int in_dtype = *(input0 + 1);
if (dtype_ != in_dtype) {
MS_LOG(ERROR) << "op dtype:" << dtype_ << " is not equal in_tensors dtype:" << in_dtype;
return RET_ERROR;
}
index_ = *(reinterpret_cast<int *>(in_tensors_[dim0 + 2]->data_c()));
if (index_ < 0) {
MS_LOG(ERROR) << "index tensor:[" << index_ << "] must be greater than or equal to 0";
return RET_ERROR;
}
if (index_ > dim0) {
MS_LOG(ERROR) << "index tensor:[" << index_ << "] must be less than dim0:" << dim0;
return RET_ERROR;
}
index_ += 2;
return RET_OK;
}

int TensorListGetItemCPUKernel::Run() {
if (in_tensors_[index_]->data_type() != kTypeUnknown) {
auto status = out_tensors_[0]->CopyTensorData(*in_tensors_[index_]); // tensorlist shape
if (status == RET_ERROR) {
MS_LOG(ERROR) << "copy tensor data failed!";
return RET_ERROR;
}
} else {
// reset 0 and dtype = dtype_
auto out_ptr = reinterpret_cast<char *>(out_tensors_[0]->MutableData());
memset(out_ptr, 0, lite::DataTypeSize(dtype_) * out_tensors_[0]->ElementsNum());
}
return RET_OK;
}

int TensorListGetItemCPUKernel::ReSize() { return RET_OK; }

kernel::LiteKernel *CpuTensorListGetItemFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *op_parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(op_parameter);
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_TensorListGetItem);
auto *kernel = new (std::nothrow) TensorListGetItemCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new TensorListGetItemCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed! name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}

REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorListGetItem, CpuTensorListGetItemFp32KernelCreator)
} // namespace mindspore::kernel

+ 45
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/TensorListGetItem.h View File

@@ -0,0 +1,45 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTGETITEM_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTGETITEM_H_

#include <vector>
#include "src/lite_kernel.h"
#include "schema/model_generated.h"
#include "nnacl/tensorlist_parameter.h"

namespace mindspore::kernel {
class TensorListGetItemCPUKernel : public LiteKernel {
public:
TensorListGetItemCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive),
dtype_(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_) {}
~TensorListGetItemCPUKernel() = default;

int Init() override;
int ReSize() override;
int Run() override;

private:
size_t index_ = 0;
TypeId dtype_ = kTypeUnknown;
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTGETITEM_H_

+ 82
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/TensorListReserve.cc View File

@@ -0,0 +1,82 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "include/errorcode.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/fp32/TensorListReserve.h"

using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_TensorListReserve;

namespace mindspore::kernel {

int TensorListReserveCPUKernel::Init() { return RET_OK; }

int TensorListReserveCPUKernel::Run() {
auto out0_ptr = reinterpret_cast<int *>(out_tensors_[0]->MutableData()); // tensorlist size() and dtype
out0_ptr[0] = reinterpret_cast<int *>(in_tensors_[0]->data_c())[0]; // num_elements
out0_ptr[1] = element_dtype_;
auto status = out_tensors_[1]->CopyTensorData(*in_tensors_[1]); // elements_shape
if (status == RET_ERROR) {
MS_LOG(ERROR) << "copy tensor data failed!";
return RET_ERROR;
}
if (static_cast<int>(out_tensors_.size() - 2) != out0_ptr[0]) {
MS_LOG(ERROR) << "out_tensors_.size() - 2:" << out_tensors_.size() - 2
<< " must be equal num_elements:" << out0_ptr[0];
}
return RET_OK;
}

int TensorListReserveCPUKernel::ReSize() { return RET_OK; }

kernel::LiteKernel *CpuTensorListReserveFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *op_parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(op_parameter);
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_TensorListSetItem);
auto *kernel = new (std::nothrow) TensorListReserveCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new TensorListReserveCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed! name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}

REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorListReserve, CpuTensorListReserveFp32KernelCreator)
} // namespace mindspore::kernel

+ 44
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/TensorListReserve.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTRESERVE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTRESERVE_H_

#include <vector>
#include "src/lite_kernel.h"
#include "schema/model_generated.h"
#include "nnacl/tensorlist_parameter.h"

namespace mindspore::kernel {
class TensorListReserveCPUKernel : public LiteKernel {
public:
TensorListReserveCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive),
element_dtype_(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_) {}
~TensorListReserveCPUKernel() = default;

int Init() override;
int ReSize() override;
int Run() override;

private:
int element_dtype_ = 0;
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTRESERVE_H_

+ 122
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/TensorListStack.cc View File

@@ -0,0 +1,122 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "include/errorcode.h"
#include "ir/dtype/type_id.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/fp32/TensorListStack.h"

using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_TensorListStack;

namespace mindspore::kernel {

int TensorListStackCPUKernel::CheckParam() {
auto in0_dtype = in_tensors_[0]->data_type();
if (in0_dtype != kNumberTypeInt) {
MS_LOG(ERROR) << "in_tensors_[0]->data_type():" << in0_dtype
<< " must be equal \"kNumberTypeInt\":" << kNumberTypeInt;
}
auto in0_ptr = reinterpret_cast<int *>(in_tensors_[0]->data_c());
if (in0_ptr[1] != dtype_) {
MS_LOG(ERROR) << "in_tensors_[0].data_type:[" << in0_ptr[1] << "] must be equal "
<< "param.data_type:[" << dtype_ << "]";
return RET_ERROR;
}
if (num_element_ != -1 && in0_ptr[0] != num_element_) {
MS_LOG(ERROR) << "in_tensors_[0].dim0:[" << in0_ptr[0] << "] must be equal "
<< "param.elements_num:[" << num_element_ << "]";
return RET_ERROR;
}
num_element_ = in0_ptr[0];
return RET_OK;
}

int TensorListStackCPUKernel::Init() {
output0_ = out_tensors_[0];
if (output0_->format() != schema::Format_NC) { // shape().size() = 2
MS_LOG(ERROR) << "out_tensor_[0] format must be \"Format:NC\", but now is:" << output0_->format();
return RET_ERROR;
}
int dim0 = output0_->shape()[0];
if (dim0 != 1) { // dim0 must be 1
MS_LOG(ERROR) << "out_tensor_[0] dim0 must be 1, but now is:" << dim0;
return RET_ERROR;
}
return CheckParam();
}

int TensorListStackCPUKernel::Run() {
size_t in_ele_num = 0;
for (int i = 0; i < num_element_; ++i) {
in_ele_num += in_tensors_[i + 2]->ElementsNum();
}
size_t out_ele_num = out_tensors_[0]->ElementsNum();
if (in_ele_num > out_ele_num) {
MS_LOG(ERROR) << "out_tensors_[0]->ElementsNum():" << out_ele_num << "must greater than or equal to in_ele_num"
<< in_ele_num;
return RET_ERROR;
}
size_t index = 0;
auto out_ptr = reinterpret_cast<float *>(out_tensors_[0]->MutableData());
for (int i = 0; i < num_element_; ++i) {
auto in_ptr = reinterpret_cast<float *>(in_tensors_[i + 2]->data_c());
size_t in_size = in_tensors_[i + 2]->ElementsNum();
memcpy(out_ptr + index, in_ptr, in_size * sizeof(float));
index += in_size;
}
return RET_OK;
}

int TensorListStackCPUKernel::ReSize() { return RET_OK; }

kernel::LiteKernel *CpuTensorListStackFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *op_parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(op_parameter);
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_TensorListStack);
auto *kernel = new (std::nothrow) TensorListStackCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new TensorListStackCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed! name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}

REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorListStack, CpuTensorListStackFp32KernelCreator)
} // namespace mindspore::kernel

+ 48
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/TensorListStack.h View File

@@ -0,0 +1,48 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTSTACK_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTSTACK_H_

#include <vector>
#include "src/lite_kernel.h"
#include "schema/model_generated.h"
#include "nnacl/tensorlist_parameter.h"

namespace mindspore::kernel {
class TensorListStackCPUKernel : public LiteKernel {
public:
TensorListStackCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive),
num_element_(reinterpret_cast<TensorListParameter *>(parameter)->num_element_),
dtype_(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_) {}
~TensorListStackCPUKernel() = default;

int Init() override;
int ReSize() override;
int Run() override;
int CheckParam();

private:
int num_element_ = -1;
TypeId dtype_ = kTypeUnknown;
lite::Tensor *output0_ = nullptr;
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_TENSORLISTSTACK_H_

Loading…
Cancel
Save