diff --git a/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c b/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c index ed00572b17..a6d85caff4 100644 --- a/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c +++ b/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c @@ -103,3 +103,4 @@ int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, T REG_INFER(DivGrad, PrimType_DivGrad, ArithmeticGradInferShape) REG_INFER(MulGrad, PrimType_MulGrad, ArithmeticGradInferShape) +REG_INFER(MinimumGrad, PrimType_MinimumGrad, ArithmeticGradInferShape) diff --git a/mindspore/lite/nnacl/infer/layer_norm_infer.c b/mindspore/lite/nnacl/infer/layer_norm_infer.c index 1b132b9c0f..ed8590991f 100644 --- a/mindspore/lite/nnacl/infer/layer_norm_infer.c +++ b/mindspore/lite/nnacl/infer/layer_norm_infer.c @@ -37,6 +37,8 @@ int LayerNormInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor if (!param->op_parameter_.infer_flag_) { return NNACL_INFER_INVALID; } + param->begin_norm_axis_ = + param->begin_norm_axis_ < 0 ? param->begin_norm_axis_ + input->shape_size_ : param->begin_norm_axis_; SetShapeTensor(output, input); // take care of other outputs if (outputs_size == 3) { @@ -45,10 +47,9 @@ int LayerNormInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor SetDataTypeFormat(output_mean, input); SetDataTypeFormat(output_var, input); int size = 0; - for (int i = param->begin_norm_axis_; i < input->shape_size_; i++) { - output_mean->shape_[size] = input->shape_[i]; - output_var->shape_[size] = input->shape_[i]; - size++; + for (; size < param->begin_norm_axis_; size++) { + output_mean->shape_[size] = input->shape_[size]; + output_var->shape_[size] = input->shape_[size]; } output_mean->shape_size_ = size; output_var->shape_size_ = size; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc index 5d5e1390f6..8dd61109d8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc @@ -35,8 +35,8 @@ using mindspore::schema::PrimitiveType_ActivationGrad; namespace mindspore::kernel { int ActivationGradCPUKernel::Init() { - if (in_tensors_.size() != 2) { - MS_LOG(ERROR) << "ActivationGrad should have 2 input tensors"; + if (in_tensors_.size() < 2) { + MS_LOG(ERROR) << "ActivationGrad should have more than 2 input tensors"; return RET_ERROR; } return RET_OK;