| @@ -67,3 +67,7 @@ void DoStackInt32(const int32_t *const *inputs, size_t input_num, int *in_shape, | |||||
| in_offset += copy_num; | in_offset += copy_num; | ||||
| } | } | ||||
| } | } | ||||
| void DoStackOneInput(const int8_t *input, int8_t *output, size_t data_size) { | |||||
| memcpy(output, input, data_size); | |||||
| } | |||||
| @@ -29,6 +29,7 @@ extern "C" { | |||||
| void DoStack(const float *const *inputs, size_t input_num, int *in_shape, size_t shape_size, int axis, float *output); | void DoStack(const float *const *inputs, size_t input_num, int *in_shape, size_t shape_size, int axis, float *output); | ||||
| void DoStackInt32(const int32_t *const *inputs, size_t input_num, int *in_shape, size_t shape_size, int axis, | void DoStackInt32(const int32_t *const *inputs, size_t input_num, int *in_shape, size_t shape_size, int axis, | ||||
| int32_t *output); | int32_t *output); | ||||
| void DoStackOneInput(const int8_t *input, int8_t *output, size_t data_size); | |||||
| #ifdef __cplusplus | #ifdef __cplusplus | ||||
| } | } | ||||
| #endif | #endif | ||||
| @@ -58,7 +58,7 @@ int Stack::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers:: | |||||
| namespace { | namespace { | ||||
| constexpr int kStackOutputNum = 1; | constexpr int kStackOutputNum = 1; | ||||
| constexpr int kStackMinInputNum = 2; | |||||
| constexpr int kStackMinInputNum = 1; | |||||
| } // namespace | } // namespace | ||||
| int Stack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) { | int Stack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) { | ||||
| MS_ASSERT(this->primitive_ != nullptr); | MS_ASSERT(this->primitive_ != nullptr); | ||||
| @@ -48,6 +48,12 @@ int StackCPUKernel::Run() { | |||||
| return ret; | return ret; | ||||
| } | } | ||||
| size_t inputs_num = in_tensors_.size(); | size_t inputs_num = in_tensors_.size(); | ||||
| auto input0 = in_tensors_[0]; | |||||
| if (inputs_num == 1) { | |||||
| auto *output_data = reinterpret_cast<int8_t *>(out_tensors_[0]->Data()); | |||||
| DoStackOneInput(reinterpret_cast<const int8_t *>(input0->Data()), output_data, input0->Size()); | |||||
| return RET_OK; | |||||
| } | |||||
| auto input0_shape = in_tensors_[0]->shape(); | auto input0_shape = in_tensors_[0]->shape(); | ||||
| if (in_tensors_[0]->data_type() == kNumberTypeFloat32 || in_tensors_[0]->data_type() == kNumberTypeFloat) { | if (in_tensors_[0]->data_type() == kNumberTypeFloat32 || in_tensors_[0]->data_type() == kNumberTypeFloat) { | ||||
| auto *output_data = reinterpret_cast<float *>(out_tensors_[0]->Data()); | auto *output_data = reinterpret_cast<float *>(out_tensors_[0]->Data()); | ||||