| @@ -45,13 +45,10 @@ using mindspore::schema::PrimitiveType_Sub; | |||||
| namespace mindspore::kernel { | namespace mindspore::kernel { | ||||
| int ArithmeticNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, | int ArithmeticNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, | ||||
| const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter) { | const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter) { | ||||
| if (primitive_->Type() == PrimitiveType_Mul || primitive_->Type() == PrimitiveType_Div || | |||||
| primitive_->Type() == PrimitiveType_Add || primitive_->Type() == PrimitiveType_Sub) { | |||||
| if (inputs[0]->shape() != inputs[1]->shape()) { | |||||
| MS_LOG(WARNING) << name_ << " for the two inputs, the corresponding dimensions must have the same value." | |||||
| << " shape 1 is:" << inputs[0]->shape() << " shape 2 is:" << inputs[1]->shape(); | |||||
| return RET_ERROR; | |||||
| } | |||||
| if (inputs[0]->shape() != inputs[1]->shape()) { | |||||
| MS_LOG(WARNING) << name_ << " for the two inputs, the corresponding dimensions must have the same value." | |||||
| << " shape 1 is:" << inputs[0]->shape() << " shape 2 is:" << inputs[1]->shape(); | |||||
| return RET_ERROR; | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -68,6 +65,26 @@ ge::Operator *CreateOperator(const std::vector<ge::Operator *> &npu_inputs, cons | |||||
| return op; | return op; | ||||
| } | } | ||||
| int ArithmeticNPUKernel::SetActivation() { | |||||
| if (activation_type_ != ActivationType_NO_ACTIVATION) { | |||||
| act_ = new (std::nothrow) hiai::op::Activation(name_ + "_act"); | |||||
| if (act_ == nullptr) { | |||||
| MS_LOG(ERROR) << "New activation npu operator for op " << name_ << " failed."; | |||||
| return RET_ERROR; | |||||
| } | |||||
| act_->set_input_x(*op_); | |||||
| if (activation_type_ == ActivationType_RELU) { | |||||
| act_->set_attr_mode(1); | |||||
| } else if (activation_type_ == ActivationType_RELU6) { | |||||
| act_->set_attr_mode(14); | |||||
| } else { | |||||
| MS_LOG(ERROR) << "Unsupported activation type for op " << name_; | |||||
| return RET_ERROR; | |||||
| } | |||||
| } | |||||
| return RET_OK; | |||||
| } | |||||
| int ArithmeticNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, | int ArithmeticNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, | ||||
| const std::vector<lite::Tensor *> &outputs, | const std::vector<lite::Tensor *> &outputs, | ||||
| const std::vector<ge::Operator *> &npu_inputs) { | const std::vector<ge::Operator *> &npu_inputs) { | ||||
| @@ -100,6 +117,9 @@ int ArithmeticNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, | |||||
| case PrimitiveType_Maximum: | case PrimitiveType_Maximum: | ||||
| op = CreateOperator<hiai::op::Maximum>(npu_inputs, name_); | op = CreateOperator<hiai::op::Maximum>(npu_inputs, name_); | ||||
| break; | break; | ||||
| case PrimitiveType_Minimum: | |||||
| op = CreateOperator<hiai::op::Minimum>(npu_inputs, name_); | |||||
| break; | |||||
| case PrimitiveType_SquaredDifference: | case PrimitiveType_SquaredDifference: | ||||
| op = CreateOperator<hiai::op::SquaredDifference>(npu_inputs, name_); | op = CreateOperator<hiai::op::SquaredDifference>(npu_inputs, name_); | ||||
| break; | break; | ||||
| @@ -132,21 +152,10 @@ int ArithmeticNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, | |||||
| } | } | ||||
| op_ = op; | op_ = op; | ||||
| if (activation_type_ != ActivationType_NO_ACTIVATION) { | |||||
| act_ = new (std::nothrow) hiai::op::Activation(name_ + "_act"); | |||||
| if (act_ == nullptr) { | |||||
| MS_LOG(ERROR) << "New activation npu operator for op " << name_ << " failed."; | |||||
| return RET_ERROR; | |||||
| } | |||||
| act_->set_input_x(*op_); | |||||
| if (activation_type_ == ActivationType_RELU) { | |||||
| act_->set_attr_mode(1); | |||||
| } else if (activation_type_ == ActivationType_RELU6) { | |||||
| act_->set_attr_mode(14); | |||||
| } else { | |||||
| MS_LOG(ERROR) << "Unsupport activation type for op " << name_; | |||||
| return RET_ERROR; | |||||
| } | |||||
| auto ret = SetActivation(); | |||||
| if (ret != RET_OK) { | |||||
| MS_LOG(ERROR) << "Arithmetic npu op set activation failed."; | |||||
| return RET_ERROR; | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -39,6 +39,7 @@ class ArithmeticNPUKernel : public NPUKernel { | |||||
| ge::Operator *GetNPUOp() override; | ge::Operator *GetNPUOp() override; | ||||
| private: | private: | ||||
| int SetActivation(); | |||||
| int activation_type_; | int activation_type_; | ||||
| ge::Operator *op_ = nullptr; | ge::Operator *op_ = nullptr; | ||||
| hiai::op::Activation *act_ = nullptr; | hiai::op::Activation *act_ = nullptr; | ||||
| @@ -68,6 +68,9 @@ int ArithmeticSelfNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inp | |||||
| case PrimitiveType_Sqrt: | case PrimitiveType_Sqrt: | ||||
| op = CreateOperator<hiai::op::Sqrt>(npu_inputs[0], name_); | op = CreateOperator<hiai::op::Sqrt>(npu_inputs[0], name_); | ||||
| break; | break; | ||||
| case PrimitiveType_Rsqrt: | |||||
| op = CreateOperator<hiai::op::Rsqrt>(npu_inputs[0], name_); | |||||
| break; | |||||
| case PrimitiveType_Sin: | case PrimitiveType_Sin: | ||||
| op = CreateOperator<hiai::op::Sin>(npu_inputs[0], name_); | op = CreateOperator<hiai::op::Sin>(npu_inputs[0], name_); | ||||
| break; | break; | ||||
| @@ -53,7 +53,7 @@ int InstanceNormNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &input | |||||
| ge::TensorDesc gamma_tensor_desc(lite::ConverterToNPUShape({1, gamma_shape[0], 1, 1}), ge::FORMAT_NCHW, | ge::TensorDesc gamma_tensor_desc(lite::ConverterToNPUShape({1, gamma_shape[0], 1, 1}), ge::FORMAT_NCHW, | ||||
| lite::ConverterToNPUDataType(inputs[1]->data_type())); | lite::ConverterToNPUDataType(inputs[1]->data_type())); | ||||
| gamma_tensor->SetTensorDesc(gamma_tensor_desc); | gamma_tensor->SetTensorDesc(gamma_tensor_desc); | ||||
| gamma_tensor->SetData(reinterpret_cast<const uint8_t *>(inputs.data()), inputs[1]->Size()); | |||||
| gamma_tensor->SetData(reinterpret_cast<const uint8_t *>(inputs[1]->data_c()), inputs[1]->Size()); | |||||
| op_->set_input_gamma(*gamma); | op_->set_input_gamma(*gamma); | ||||
| auto beta = new (std::nothrow) hiai::op::Const(name_ + "_beta"); | auto beta = new (std::nothrow) hiai::op::Const(name_ + "_beta"); | ||||
| @@ -61,16 +61,16 @@ int InstanceNormNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &input | |||||
| MS_LOG(ERROR) << "New beta const failed."; | MS_LOG(ERROR) << "New beta const failed."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| auto beta_shape = inputs[1]->shape(); | |||||
| auto beta_shape = inputs[2]->shape(); | |||||
| std::shared_ptr<ge::Tensor> beta_tensor = std::shared_ptr<ge::Tensor>(new (std::nothrow) ge::Tensor()); | std::shared_ptr<ge::Tensor> beta_tensor = std::shared_ptr<ge::Tensor>(new (std::nothrow) ge::Tensor()); | ||||
| if (beta_tensor == nullptr) { | if (beta_tensor == nullptr) { | ||||
| MS_LOG(ERROR) << "new beta_tensor failed."; | MS_LOG(ERROR) << "new beta_tensor failed."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| ge::TensorDesc beta_tensor_desc(lite::ConverterToNPUShape({1, beta_shape[0], 1, 1}), ge::FORMAT_NCHW, | ge::TensorDesc beta_tensor_desc(lite::ConverterToNPUShape({1, beta_shape[0], 1, 1}), ge::FORMAT_NCHW, | ||||
| lite::ConverterToNPUDataType(inputs[1]->data_type())); | |||||
| lite::ConverterToNPUDataType(inputs[2]->data_type())); | |||||
| beta_tensor->SetTensorDesc(beta_tensor_desc); | beta_tensor->SetTensorDesc(beta_tensor_desc); | ||||
| beta_tensor->SetData(reinterpret_cast<const uint8_t *>(inputs.data()), inputs[1]->Size()); | |||||
| beta_tensor->SetData(reinterpret_cast<const uint8_t *>(inputs[2]->data_c()), inputs[2]->Size()); | |||||
| op_->set_input_beta(*beta); | op_->set_input_beta(*beta); | ||||
| op_->set_attr_epsilon(instance_norm_param_->epsilon_); | op_->set_attr_epsilon(instance_norm_param_->epsilon_); | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -24,6 +24,13 @@ using mindspore::schema::PrimitiveType_Scale; | |||||
| namespace mindspore::kernel { | namespace mindspore::kernel { | ||||
| int ScaleNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, | int ScaleNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, | ||||
| OpParameter *opParameter) { | OpParameter *opParameter) { | ||||
| if (scale_parameter_->axis_ < 0) { | |||||
| scale_parameter_->axis_ = scale_parameter_->axis_ + inputs.size(); | |||||
| } | |||||
| if (scale_parameter_->axis_ != 1) { | |||||
| MS_LOG(ERROR) << "Npu scale axis attr only support 1, now is " << scale_parameter_->axis_; | |||||
| return RET_ERROR; | |||||
| } | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||