Merge pull request !5620 from lyvette/parsertags/v1.0.0
| @@ -19,7 +19,9 @@ | |||||
| #include "utils/log_adapter.h" | #include "utils/log_adapter.h" | ||||
| namespace mindspore::lite { | namespace mindspore::lite { | ||||
| std::shared_ptr<Allocator> Allocator::Create() { return std::shared_ptr<Allocator>(new DefaultAllocator()); } | |||||
| std::shared_ptr<Allocator> Allocator::Create() { | |||||
| return std::shared_ptr<Allocator>(new (std::nothrow) DefaultAllocator()); | |||||
| } | |||||
| DefaultAllocator::DefaultAllocator() {} | DefaultAllocator::DefaultAllocator() {} | ||||
| @@ -19,7 +19,11 @@ | |||||
| namespace mindspore::kernel { | namespace mindspore::kernel { | ||||
| Matrix *TransformMatrixGenerator(int m, int k) { | Matrix *TransformMatrixGenerator(int m, int k) { | ||||
| auto matrix = new Matrix; | |||||
| auto matrix = new (std::nothrow) Matrix; | |||||
| if (matrix == nullptr) { | |||||
| MS_LOG(ERROR) << "matrix is nullptr."; | |||||
| return nullptr; | |||||
| } | |||||
| auto data = malloc(m * k * sizeof(float)); | auto data = malloc(m * k * sizeof(float)); | ||||
| if (data == nullptr) { | if (data == nullptr) { | ||||
| MS_LOG(ERROR) << "Malloc matrix data failed."; | MS_LOG(ERROR) << "Malloc matrix data failed."; | ||||
| @@ -66,7 +66,7 @@ kernel::LiteKernel *CpuSoftmaxInt8KernelCreator(const std::vector<lite::tensor:: | |||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| MS_ASSERT(desc.type == schema::PrimitiveType_SoftMax); | MS_ASSERT(desc.type == schema::PrimitiveType_SoftMax); | ||||
| SoftmaxInt8CPUKernel *kernel = new (std::nothrow) SoftmaxInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); | |||||
| auto *kernel = new (std::nothrow) SoftmaxInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); | |||||
| if (kernel == nullptr) { | if (kernel == nullptr) { | ||||
| MS_LOG(ERROR) << "new SoftmaxCPUKernel fail!"; | MS_LOG(ERROR) << "new SoftmaxCPUKernel fail!"; | ||||
| return nullptr; | return nullptr; | ||||
| @@ -91,7 +91,7 @@ kernel::LiteKernel *CpuSoftmaxFp32KernelCreator(const std::vector<lite::tensor:: | |||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| MS_ASSERT(desc.type == schema::PrimitiveType_SoftMax); | MS_ASSERT(desc.type == schema::PrimitiveType_SoftMax); | ||||
| SoftmaxCPUKernel *kernel = new (std::nothrow) SoftmaxCPUKernel(opParameter, inputs, outputs, ctx, primitive); | |||||
| auto *kernel = new (std::nothrow) SoftmaxCPUKernel(opParameter, inputs, outputs, ctx, primitive); | |||||
| if (kernel == nullptr) { | if (kernel == nullptr) { | ||||
| MS_LOG(ERROR) << "new SoftmaxCPUKernel fail!"; | MS_LOG(ERROR) << "new SoftmaxCPUKernel fail!"; | ||||
| return nullptr; | return nullptr; | ||||
| @@ -36,7 +36,10 @@ class DeConvolutionFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { | |||||
| const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx, | const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx, | ||||
| const mindspore::lite::PrimitiveC *primitive) | const mindspore::lite::PrimitiveC *primitive) | ||||
| : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) { | : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) { | ||||
| matmul_param_ = new MatMulParameter(); | |||||
| matmul_param_ = new (std::nothrow) MatMulParameter(); | |||||
| if (matmul_param_ == nullptr) { | |||||
| MS_LOG(ERROR) << "new MatMulParameter fail!"; | |||||
| } | |||||
| } | } | ||||
| ~DeConvolutionFp16CPUKernel() override; | ~DeConvolutionFp16CPUKernel() override; | ||||
| int Init() override; | int Init() override; | ||||
| @@ -37,7 +37,10 @@ class Convolution1x1CPUKernel : public ConvolutionBaseCPUKernel { | |||||
| const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx, | const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx, | ||||
| const mindspore::lite::PrimitiveC *primitive) | const mindspore::lite::PrimitiveC *primitive) | ||||
| : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { | : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { | ||||
| matmul_param_ = new MatMulParameter(); | |||||
| matmul_param_ = new (std::nothrow) MatMulParameter(); | |||||
| if (matmul_param_ == nullptr) { | |||||
| MS_LOG(ERROR) << "new MatMulParameter fail!"; | |||||
| } | |||||
| } | } | ||||
| ~Convolution1x1CPUKernel(); | ~Convolution1x1CPUKernel(); | ||||
| int Init() override; | int Init() override; | ||||
| @@ -282,7 +282,11 @@ kernel::LiteKernel *CpuArithmeticGradFp32KernelCreator(const std::vector<lite::t | |||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| auto *kernel = new (std::nothrow) ArithmeticGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); | auto *kernel = new (std::nothrow) ArithmeticGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); | ||||
| MS_ASSERT(kernel != nullptr); | |||||
| if (kernel == nullptr) { | |||||
| MS_LOG(ERROR) << "new ArithmeticGradCPUKernel fail!"; | |||||
| return nullptr; | |||||
| } | |||||
| auto ret = kernel->Init(); | auto ret = kernel->Init(); | ||||
| if (ret != RET_OK) { | if (ret != RET_OK) { | ||||
| MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " | MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " | ||||
| @@ -103,7 +103,10 @@ kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vector<lite::tensor: | |||||
| MS_ASSERT(desc.type == schema::PrimitiveType_BiasGrad); | MS_ASSERT(desc.type == schema::PrimitiveType_BiasGrad); | ||||
| auto *kernel = | auto *kernel = | ||||
| new (std::nothrow) BiasGradCPUKernel(reinterpret_cast<OpParameter *>(opParameter), inputs, outputs, ctx, primitive); | new (std::nothrow) BiasGradCPUKernel(reinterpret_cast<OpParameter *>(opParameter), inputs, outputs, ctx, primitive); | ||||
| MS_ASSERT(kernel != nullptr); | |||||
| if (kernel == nullptr) { | |||||
| MS_LOG(ERROR) << "new BiasGradCPUKernel fail!"; | |||||
| return nullptr; | |||||
| } | |||||
| auto ret = kernel->Init(); | auto ret = kernel->Init(); | ||||
| if (RET_OK != ret) { | if (RET_OK != ret) { | ||||
| @@ -33,7 +33,11 @@ namespace mindspore::kernel { | |||||
| int BNGradInputCPUKernel::Init() { | int BNGradInputCPUKernel::Init() { | ||||
| auto bn_param = reinterpret_cast<bnParameter *>(opParameter); | auto bn_param = reinterpret_cast<bnParameter *>(opParameter); | ||||
| workspace_size = 5 * bn_param->channels; | workspace_size = 5 * bn_param->channels; | ||||
| workspace = new float[workspace_size]; | |||||
| workspace = new (std::nothrow) float[workspace_size]; | |||||
| if (workspace == nullptr) { | |||||
| MS_LOG(ERROR) << "new workspace fail!"; | |||||
| return RET_ERROR; | |||||
| } | |||||
| if (2 != this->inputs_.size()) { | if (2 != this->inputs_.size()) { | ||||
| MS_LOG(ERROR) << "Conv2d Grad should has 2 inputs"; | MS_LOG(ERROR) << "Conv2d Grad should has 2 inputs"; | ||||
| @@ -61,7 +61,11 @@ int ConvolutionGradFilterCPUKernel::Init() { | |||||
| int ws_size = conv_param->output_h_ * conv_param->output_w_ * conv_param->kernel_h_ * conv_param->kernel_w_ * | int ws_size = conv_param->output_h_ * conv_param->output_w_ * conv_param->kernel_h_ * conv_param->kernel_w_ * | ||||
| conv_param->input_channel_ / conv_param->group_; | conv_param->input_channel_ / conv_param->group_; | ||||
| workspace = new float[ws_size]; | |||||
| workspace = new (std::nothrow) float[ws_size]; | |||||
| if (workspace == nullptr) { | |||||
| MS_LOG(ERROR) << "new workspace fail!"; | |||||
| return RET_ERROR; | |||||
| } | |||||
| int output_w = 0; | int output_w = 0; | ||||
| int output_h = 0; | int output_h = 0; | ||||
| @@ -68,7 +68,10 @@ kernel::LiteKernel *CpuOptMomentumFp32KernelCreator(const std::vector<lite::tens | |||||
| const mindspore::lite::PrimitiveC *primitive) { | const mindspore::lite::PrimitiveC *primitive) { | ||||
| MS_ASSERT(desc.type == schema::PrimitiveType_OptMomentum); | MS_ASSERT(desc.type == schema::PrimitiveType_OptMomentum); | ||||
| auto *kernel = new (std::nothrow) OptMomentumCPUKernel(opParameter, inputs, outputs, ctx, primitive); | auto *kernel = new (std::nothrow) OptMomentumCPUKernel(opParameter, inputs, outputs, ctx, primitive); | ||||
| MS_ASSERT(kernel != nullptr); | |||||
| if (kernel == nullptr) { | |||||
| MS_LOG(ERROR) << "new OptMomentumCPUKernel fail!"; | |||||
| return nullptr; | |||||
| } | |||||
| auto ret = kernel->Init(); | auto ret = kernel->Init(); | ||||
| if (0 != ret) { | if (0 != ret) { | ||||
| @@ -181,7 +181,11 @@ kernel::LiteKernel *CpuPoolingGradFp32KernelCreator(const std::vector<lite::tens | |||||
| MS_ASSERT(desc.type == schema::PrimitiveType_PoolingGrad); | MS_ASSERT(desc.type == schema::PrimitiveType_PoolingGrad); | ||||
| auto *kernel = new (std::nothrow) PoolingGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); | auto *kernel = new (std::nothrow) PoolingGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); | ||||
| MS_ASSERT(kernel != nullptr); | |||||
| if (kernel == nullptr) { | |||||
| MS_LOG(ERROR) << "new PoolingGradCPUKernel fail!"; | |||||
| return nullptr; | |||||
| } | |||||
| auto ret = kernel->Init(); | auto ret = kernel->Init(); | ||||
| if (RET_OK != ret) { | if (RET_OK != ret) { | ||||
| MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " | MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " | ||||
| @@ -55,6 +55,11 @@ kernel::LiteKernel *CpuPowerGradFp32KernelCreator(const std::vector<lite::tensor | |||||
| MS_ASSERT(opParameter != nullptr); | MS_ASSERT(opParameter != nullptr); | ||||
| MS_ASSERT(desc.type == schema::PrimitiveType_PowerGrad); | MS_ASSERT(desc.type == schema::PrimitiveType_PowerGrad); | ||||
| auto *kernel = new (std::nothrow) PowerGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); | auto *kernel = new (std::nothrow) PowerGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); | ||||
| if (kernel == nullptr) { | |||||
| MS_LOG(ERROR) << "new PowerGradCPUKernel fail!"; | |||||
| return nullptr; | |||||
| } | |||||
| auto ret = kernel->Init(); | auto ret = kernel->Init(); | ||||
| if (ret != RET_OK) { | if (ret != RET_OK) { | ||||
| MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " | MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " | ||||
| @@ -79,7 +79,11 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Run() { | |||||
| } | } | ||||
| size_t data_size = inputs_.at(0)->ElementsNum(); | size_t data_size = inputs_.at(0)->ElementsNum(); | ||||
| float *losses = new (std::nothrow) float[data_size]; | float *losses = new (std::nothrow) float[data_size]; | ||||
| MS_ASSERT(losses != nullptr); | |||||
| if (losses == nullptr) { | |||||
| MS_LOG(ERROR) << "losses is null"; | |||||
| return nullptr; | |||||
| } | |||||
| std::fill(losses, losses + data_size, 0); | std::fill(losses, losses + data_size, 0); | ||||
| MS_ASSERT(out != nullptr); | MS_ASSERT(out != nullptr); | ||||
| @@ -29,6 +29,11 @@ namespace mindspore::kernel { | |||||
| DeConvInt8CPUKernel::~DeConvInt8CPUKernel() { | DeConvInt8CPUKernel::~DeConvInt8CPUKernel() { | ||||
| FreeTmpBuffer(); | FreeTmpBuffer(); | ||||
| ConvolutionBaseCPUKernel::FreeQuantParam(); | ConvolutionBaseCPUKernel::FreeQuantParam(); | ||||
| if (matmul_param_ != nullptr) { | |||||
| delete matmul_param_; | |||||
| matmul_param_ = nullptr; | |||||
| } | |||||
| } | } | ||||
| void DeConvInt8CPUKernel::FreeTmpBuffer() { | void DeConvInt8CPUKernel::FreeTmpBuffer() { | ||||