Browse Source

!6362 【MSLITE】fix codex checking

Merge pull request !6362 from lz/master
tags/v1.0.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
3a7098d331
8 changed files with 9 additions and 30 deletions
  1. +0
    -1
      mindspore/lite/include/train_session.h
  2. +0
    -1
      mindspore/lite/nnacl/fp32_grad/pooling_grad.h
  3. +0
    -4
      mindspore/lite/src/ops/apply_momentum.cc
  4. +0
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc
  5. +0
    -3
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h
  6. +9
    -11
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.cc
  7. +0
    -5
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc
  8. +0
    -4
      mindspore/lite/src/train/train_session.cc

+ 0
- 1
mindspore/lite/include/train_session.h View File

@@ -18,7 +18,6 @@
#include <vector>
#include <string>
#include <unordered_map>
// #include "include/lite_session.h"
#include "src/lite_session.h"

namespace mindspore {


+ 0
- 1
mindspore/lite/nnacl/fp32_grad/pooling_grad.h View File

@@ -23,7 +23,6 @@
extern "C" {
#endif
void AvgPoolingGrad(const float *input_ptr, float *output_ptr, PoolingParameter *pooling_param);
// void MaxPoolingGrad(const float *dy, const int *indices_ptr, float *output_ptr, PoolingParameter *pooling_param);
void MaxPoolingGrad(const float *input_ptr, const float *dx_ptr, const float *dy_ptr, float *output_ptr,
PoolingParameter *pooling_param);
#ifdef __cplusplus


+ 0
- 4
mindspore/lite/src/ops/apply_momentum.cc View File

@@ -65,10 +65,6 @@ int ApplyMomentum::InferShape(std::vector<lite::Tensor *> inputs, std::vector<li
MS_LOG(ERROR) << "ApplyMomentum should have at 5 input tensors";
return RET_ERROR;
}
// if (outputs.empty()) {
// MS_LOG(ERROR) << "ApplyMomentumCPUKernel error input output size!";
// return RET_ERROR;
// }

if (inputs[0]->ElementsNum() != inputs[1]->ElementsNum() || inputs[0]->ElementsNum() != inputs[3]->ElementsNum() ||
inputs[2]->ElementsNum() != 1 || inputs[4]->ElementsNum() != 1) {


+ 0
- 1
mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc View File

@@ -58,7 +58,6 @@ int BNGradCPUKernel::Run() {
auto *output_dx = out_tensors_.at(0);
auto *output_scale = out_tensors_.at(1);
auto *output_bias = out_tensors_.at(2);
// Tensor *bias = input[5];
int batch = input_x->Batch();
int channels = input_x->Channel();
int spatial = input_x->Height() * input_x->Width();


+ 0
- 3
mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h View File

@@ -40,9 +40,6 @@ class ConvolutionGradInputCPUKernel : public LiteKernel {
private:
float *workspace;
};

// OpParameter *PopulateConvolutionGradInputParameter(const lite::Primitive *primitive);

} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_CONVOLUTION_GRAD_INPUT_H

+ 9
- 11
mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.cc View File

@@ -33,17 +33,15 @@ int DependCPUKernel::Init() { return RET_OK; }
int DependCPUKernel::ReSize() { return 0; }

int DependCPUKernel::Run() {
#if 0
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";
return RET_ERROR;
}
auto in = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto out = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());

memcpy(out, in, in_tensors_.at(0)->Size());
#endif
// auto ret = Prepare();
// if (ret != RET_OK) {
// MS_LOG(ERROR) << "Prepare failed.";
// return RET_ERROR;
// }
// auto in = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
// auto out = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
//
// memcpy(out, in, in_tensors_.at(0)->Size());
return RET_OK;
}



+ 0
- 5
mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc View File

@@ -22,11 +22,9 @@
#include "src/kernel_registry.h"
#include "include/errorcode.h"

// using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
// using mindspore::schema::PrimitiveType_SoftMaxGrad;

namespace mindspore::kernel {
int SoftmaxGradCPUKernel::Init() {
@@ -71,7 +69,6 @@ int SoftmaxGradCPUKernel::Init() {
int SoftmaxGradCPUKernel::ReSize() { return RET_OK; }

int SoftmaxGradCPUKernel::Run() {
// auto input_ptr = reinterpret_cast<float *>(in_tensors_.at(kInputIndex)->MutableData());
auto input_ptr = reinterpret_cast<float *>(in_tensors_.at(kInputIndex)->MutableData());
auto yt_ptr = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->MutableData());
@@ -85,7 +82,6 @@ kernel::LiteKernel *CpuSoftmaxGradFp32KernelCreator(const std::vector<lite::Tens
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
// MS_ASSERT(desc.type == schema::PrimitiveType_SoftMaxGrad);
auto *kernel = new (std::nothrow) SoftmaxGradCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SoftmaxGradCPUKernel fail!";
@@ -101,5 +97,4 @@ kernel::LiteKernel *CpuSoftmaxGradFp32KernelCreator(const std::vector<lite::Tens
return kernel;
}

// REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SoftMaxGrad, CpuSoftmaxGradFp32KernelCreator)
} // namespace mindspore::kernel

+ 0
- 4
mindspore/lite/src/train/train_session.cc View File

@@ -59,7 +59,6 @@ TrainSession::~TrainSession() {
}

void *TrainSession::ExportToBuf(lite::Model *model, void *buf, size_t *len) const {
// return model->ExportBuf(buf, len);
return nullptr;
}

@@ -79,9 +78,6 @@ int TrainSession::RunGraph(const session::KernelCallBack &before, const session:
}

MS_EXCEPTION_IF_NULL(this->context_);
// TODO(Emir)
// SetMaxWokerNum(context_->thread_num_);
// context_->running_ = true;
lite::Executor executor;
if (before == nullptr && after == nullptr) {
return executor.Run(this->inputs_, this->outputs_, infference_kernels, this->context_->allocator.get());


Loading…
Cancel
Save