Browse Source

!6268 cmake optimze for mindspore lite

Merge pull request !6268 from zhanghaibo/master
tags/v1.0.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
a92e444f21
9 changed files with 17 additions and 8 deletions
  1. +1
    -1
      mindspore/lite/internal/src/kernel/fp32/matmul.cc
  2. +1
    -1
      mindspore/lite/nnacl/fp32/batchnorm.c
  3. +1
    -1
      mindspore/lite/nnacl/fp32/batchnorm.h
  4. +1
    -1
      mindspore/lite/nnacl/fp32_grad/softmax_grad.h
  5. +1
    -1
      mindspore/lite/nnacl/int8/space_to_batch_int8.c
  6. +11
    -0
      mindspore/lite/src/CMakeLists.txt
  7. +0
    -1
      mindspore/lite/src/ops/bias_grad.cc
  8. +1
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h
  9. +0
    -1
      mindspore/lite/src/tensor.h

+ 1
- 1
mindspore/lite/internal/src/kernel/fp32/matmul.cc View File

@@ -116,7 +116,7 @@ int DoMatMulInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector

int DoMatMul(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
if (in_tensors[0]->data_ == NULL || in_tensors[1]->data_ ==NULL) {
if (in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
LITE_LOG_ERROR("input data is NULL!");
return RET_PARAM_INVALID;
}


+ 1
- 1
mindspore/lite/nnacl/fp32/batchnorm.c View File

@@ -68,7 +68,7 @@ void FusedBatchNormFp32MeanVar(const float *input, float momentum, float *run_me
run_mean[f] = run_mean[f] / N;
run_var[f] = run_var[f] / N - run_mean[f] * run_mean[f];
save_mean[f] = momentum * save_mean[f] + (1 - momentum) * run_mean[f];
float inv_var = 1.f/sqrt(run_var[f]+param->epsilon_);
float inv_var = 1.f / sqrt(run_var[f] + param->epsilon_);
save_inv_var[f] = momentum * save_inv_var[f] + (1 - momentum) * inv_var;
}
}

+ 1
- 1
mindspore/lite/nnacl/fp32/batchnorm.h View File

@@ -29,7 +29,7 @@ void FusedBatchNormFp32(const void *input, const void *scale, const void *offset
const void *variance, BatchNormParameter *param, int task_id, void *output);

void FusedBatchNormFp32MeanVar(const float *input, float momentum, float *run_mean, float *run_var,
BatchNormParameter *param, float *save_mean, float *save_var);
BatchNormParameter *param, float *save_mean, float *save_var);
#ifdef __cplusplus
}
#endif


+ 1
- 1
mindspore/lite/nnacl/fp32_grad/softmax_grad.h View File

@@ -33,7 +33,7 @@ typedef struct SoftmaxCrossEntropyParameter {
} SoftmaxCrossEntropyParameter;

void SoftmaxGrad(const float *input_ptr, const float *yt_ptr, float *output_ptr, float *sum_data,
float *sum_mul, SoftmaxParameter *parameter);
float *sum_mul, SoftmaxParameter *parameter);
#ifdef __cplusplus
}
#endif


+ 1
- 1
mindspore/lite/nnacl/int8/space_to_batch_int8.c View File

@@ -17,7 +17,7 @@
#include "nnacl/arithmetic_common.h"

void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, int *block_sizes, int *in_shape,
int *out_shape) {
int *out_shape) {
int out_dim0 = out_shape[0];
int out_dim1 = out_shape[1];
int out_dim2 = out_shape[2];


+ 11
- 0
mindspore/lite/src/CMakeLists.txt View File

@@ -95,3 +95,14 @@ if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release" AND (PLATFORM_ARM64 OR PLATFORM_ARM
${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so)
endif ()

if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
if (PLATFORM_ARM64 OR PLATFORM_ARM32)
add_custom_command(TARGET mindspore-lite POST_BUILD
COMMAND ${ANDROID_NDK}/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/aarch64-linux-android/bin/strip
${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so)
elseif (NOT WIN32)
add_custom_command(TARGET mindspore-lite POST_BUILD
COMMAND strip ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so)
endif ()
endif ()


+ 0
- 1
mindspore/lite/src/ops/bias_grad.cc View File

@@ -100,6 +100,5 @@ int BiasGrad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> out

return RET_OK;
}

} // namespace lite
} // namespace mindspore

+ 1
- 1
mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h View File

@@ -33,7 +33,7 @@ class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel {
const std::vector<lite::Tensor *> &outputs,
const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LossKernel(parameter, inputs, outputs, ctx, primitive) , losses_(nullptr), sum_data_(nullptr) {
: LossKernel(parameter, inputs, outputs, ctx, primitive), losses_(nullptr), sum_data_(nullptr) {
param = reinterpret_cast<SoftmaxCrossEntropyParameter *>(parameter);
}
~SparseSoftmaxCrossEntropyWithLogitsCPUKernel() override {


+ 0
- 1
mindspore/lite/src/tensor.h View File

@@ -30,7 +30,6 @@

namespace mindspore {
namespace lite {

struct QuantArg {
double scale;
int32_t zeroPoint;


Loading…
Cancel
Save