Browse Source

!8684 [MS][LITE] remove internal

From: @jianghui58
Reviewed-by: @zhanghaibo5,@zhang_xue_tong
Signed-off-by:
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
de60d1d98f
44 changed files with 2 additions and 4161 deletions
  1. +1
    -1
      mindspore/ccsrc/minddata/dataset/api/de_tensor.cc
  2. +0
    -1
      mindspore/lite/CMakeLists.txt
  3. +0
    -47
      mindspore/lite/internal/CMakeLists.txt
  4. +0
    -40
      mindspore/lite/internal/include/context.h
  5. +0
    -55
      mindspore/lite/internal/include/errorcode.h
  6. +0
    -91
      mindspore/lite/internal/include/lite_session.h
  7. +0
    -32
      mindspore/lite/internal/include/lite_utils.h
  8. +0
    -249
      mindspore/lite/internal/include/model.h
  9. +0
    -151
      mindspore/lite/internal/include/ms_tensor.h
  10. +0
    -100
      mindspore/lite/internal/include/string.h
  11. +0
    -117
      mindspore/lite/internal/include/vector.h
  12. +0
    -220
      mindspore/lite/internal/src/allocator.cc
  13. +0
    -60
      mindspore/lite/internal/src/allocator.h
  14. +0
    -357
      mindspore/lite/internal/src/common/string.cc
  15. +0
    -303
      mindspore/lite/internal/src/common/vector.cc
  16. +0
    -31
      mindspore/lite/internal/src/kernel/common/common_infershape.cc
  17. +0
    -24
      mindspore/lite/internal/src/kernel/common/common_infershape.h
  18. +0
    -54
      mindspore/lite/internal/src/kernel/fp32/activation.cc
  19. +0
    -27
      mindspore/lite/internal/src/kernel/fp32/activation.h
  20. +0
    -197
      mindspore/lite/internal/src/kernel/fp32/arithmetic.cc
  21. +0
    -29
      mindspore/lite/internal/src/kernel/fp32/arithmetic.h
  22. +0
    -47
      mindspore/lite/internal/src/kernel/fp32/arithmetic_self.cc
  23. +0
    -28
      mindspore/lite/internal/src/kernel/fp32/arithmetic_self.h
  24. +0
    -71
      mindspore/lite/internal/src/kernel/fp32/bias_add.cc
  25. +0
    -28
      mindspore/lite/internal/src/kernel/fp32/bias_add.h
  26. +0
    -176
      mindspore/lite/internal/src/kernel/fp32/matmul.cc
  27. +0
    -27
      mindspore/lite/internal/src/kernel/fp32/matmul.h
  28. +0
    -162
      mindspore/lite/internal/src/kernel/fp32/reduce.cc
  29. +0
    -29
      mindspore/lite/internal/src/kernel/fp32/reduce.h
  30. +0
    -56
      mindspore/lite/internal/src/kernel/fp32_grad/activation_grad.cc
  31. +0
    -28
      mindspore/lite/internal/src/kernel/fp32_grad/activation_grad.h
  32. +0
    -51
      mindspore/lite/internal/src/kernel/fp32_grad/arithmetic_self_grad.cc
  33. +0
    -28
      mindspore/lite/internal/src/kernel/fp32_grad/arithmetic_self_grad.h
  34. +0
    -53
      mindspore/lite/internal/src/lite_log.h
  35. +0
    -218
      mindspore/lite/internal/src/lite_session.cc
  36. +0
    -240
      mindspore/lite/internal/src/ms_tensor.cc
  37. +1
    -1
      mindspore/lite/test/CMakeLists.txt
  38. +0
    -68
      mindspore/lite/test/ut/internal/CMakeLists.txt
  39. +0
    -99
      mindspore/lite/test/ut/internal/allocator_test.cc
  40. +0
    -80
      mindspore/lite/test/ut/internal/infer_test.cc
  41. +0
    -99
      mindspore/lite/test/ut/internal/src/kernel/fp32/arithmetic_fp32_test.cc
  42. +0
    -91
      mindspore/lite/test/ut/internal/src/kernel/fp32/bias_add_fp32_test.cc
  43. +0
    -241
      mindspore/lite/test/ut/internal/src/kernel/fp32/reduce_fp32_test.cc
  44. +0
    -54
      mindspore/lite/test/ut/internal/vector_test.cc

+ 1
- 1
mindspore/ccsrc/minddata/dataset/api/de_tensor.cc View File

@@ -19,7 +19,7 @@
#include "minddata/dataset/include/de_tensor.h"
#include "minddata/dataset/include/type_id.h"
#include "mindspore/core/ir/dtype/type_id.h"
#include "mindspore/lite/internal/include/ms_tensor.h"
#include "mindspore/lite/include/ms_tensor.h"
#include "utils/hashing.h"
#ifndef ENABLE_ANDROID
#include "utils/log_adapter.h"


+ 0
- 1
mindspore/lite/CMakeLists.txt View File

@@ -212,7 +212,6 @@ if (BUILD_MINDDATA STREQUAL "lite_cv")
endif ()

add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/internal)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/nnacl)
if (ENABLE_TOOLS)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/benchmark)


+ 0
- 47
mindspore/lite/internal/CMakeLists.txt View File

@@ -1,47 +0,0 @@
cmake_minimum_required(VERSION 3.14)
project (Lite_Internal)
set(TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../)
set(CMAKE_CXX_COMPILER ${CMAKE_C_COMPILER})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
include_directories(${TOP_DIR})
add_compile_definitions(ENABLE_NNACL_INFER_SHAPE)

file(GLOB KERNEL_SRC
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/arithmetic_common.c
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/activation_fp32.c
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/arithmetic_self_fp32.c
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/arithmetic_fp32.c
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/matmul_fp32.c
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/reduce_fp32.c
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/arithmetic_fp32.c
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/fp32/*.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/common/*.cc
)
if (SUPPORT_TRAIN)
file(GLOB TRAIN_KERNEL_SRC
${KERNEL_SRC}
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32_grad/activation_grad.c
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/fp32_grad/*.cc
)
endif ()

list(REMOVE_ITEM KERNEL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/opt_op_handler.c)

set(CCSRC
${CMAKE_CURRENT_SOURCE_DIR}/src/common/vector.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/common/string.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/lite_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/allocator.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/ms_tensor.cc
)

if (PLATFORM_ARM64)
# assembly
file(GLOB ASSEMBLY_SRC
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/MatmulFp32Opt.S
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/MatVecMulFp32.S
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/MatmulFp32.S)
set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C)
set(KERNEL_SRC ${KERNEL_SRC} ${ASSEMBLY_SRC})
add_library(mslite_internal SHARED ${CCSRC} ${KERNEL_SRC} ${TRAIN_KERNEL_SRC})
endif()

+ 0
- 40
mindspore/lite/internal/include/context.h View File

@@ -1,40 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_CONTEXT_H_
#define MINDSPORE_LITE_INTERNAL_INCLUDE_CONTEXT_H_

/// \brief CpuBindMode defined for holding bind cpu strategy argument.
typedef enum {
NO_BIND = 0, /**< no bind */
HIGHER_CPU = 1, /**< bind higher cpu first */
MID_CPU = 2 /**< bind middle cpu first */
} CpuBindMode;

/// \brief DeviceType defined for holding user's preferred backend.
typedef enum {
DT_CPU, /**< CPU device type */
DT_GPU, /**< GPU device type */
DT_NPU /**< NPU device type, not supported yet */
} DeviceType;

/// \brief Context defined for holding environment variables during runtime.
typedef struct {
bool float16_priority = false; /**< prior enable float16 inference */
DeviceType device_type_ = DT_CPU;
int thread_num_ = 2; /**< thread number config for thread pool */
} Context;
#endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_

+ 0
- 55
mindspore/lite/internal/include/errorcode.h View File

@@ -1,55 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_ERRORCODE_H_
#define MINDSPORE_LITE_INTERNAL_INCLUDE_ERRORCODE_H_

/// \brief STATUS defined for holding error code in MindSpore Lite.
using STATUS = int;

/* Success */
constexpr int RET_OK = 0; /**< No error occurs. */

/* Common error code, range: [-1, -100]*/
constexpr int RET_ERROR = -1; /**< Common error code. */
constexpr int RET_NULL_PTR = -2; /**< NULL pointer returned.*/
constexpr int RET_PARAM_INVALID = -3; /**< Invalid parameter.*/
constexpr int RET_NO_CHANGE = -4; /**< No change. */
constexpr int RET_SUCCESS_EXIT = -5; /**< No error but exit. */
constexpr int RET_MEMORY_FAILED = -6; /**< Fail to create memory. */

/* Executor error code, range: [-101,-200] */
constexpr int RET_OUT_OF_TENSOR_RANGE = -101; /**< Failed to check range. */
constexpr int RET_INPUT_TENSOR_ERROR = -102; /**< Failed to check input tensor. */
constexpr int RET_REENTRANT_ERROR = -103; /**< Exist executor running. */

/* Graph error code, range: [-201,-300] */
constexpr int RET_GRAPH_FILE_ERR = -201; /**< Failed to verify graph file. */

/* Node error code, range: [-301,-400] */
constexpr int RET_NOT_FIND_OP = -301; /**< Failed to find operator. */
constexpr int RET_INVALID_OP_NAME = -302; /**< Invalid operator name. */
constexpr int RET_INVALID_OP_ATTR = -303; /**< Invalid operator attr. */
constexpr int RET_OP_EXECUTE_FAILURE = -304; /**< Failed to execution operator. */

/* Tensor error code, range: [-401,-500] */
constexpr int RET_FORMAT_ERR = -401; /**< Failed to checking tensor format. */

/* InferShape error code, range: [-501,-600] */
constexpr int RET_INFER_ERR = -501; /**< Failed to infer shape. */
constexpr int RET_INFER_INVALID = -502; /**< Invalid infer shape before runtime. */

#endif // MINDSPORE_LITE_INCLUDE_ERRORCODE_H_

+ 0
- 91
mindspore/lite/internal/include/lite_session.h View File

@@ -1,91 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_SESSION_H
#define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_SESSION_H

#include "internal/include/ms_tensor.h"
#include "internal/include/model.h"
#include "internal/include/context.h"
#include "internal/include/lite_utils.h"

/// \brief LiteSession defined session in MindSpore Lite for compiling Model and forwarding model.
typedef struct LiteSession {
/// \brief Static method to create a LiteSession pointer.
///
/// \param[in] context Define the context of session to be created.
///
/// \return Pointer of MindSpore Lite LiteSession.
static LiteSession *CreateSession(Context *context);

/// \brief Compile MindSpore Lite model.
///
/// \note CompileGraph should be called before RunGraph.
///
/// \param[in] model Define the model to be compiled.
///
/// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h.
int CompileGraph(Model *model);

/// \brief Get input MindSpore Lite MSTensors of model.
///
/// \return The vector of MindSpore Lite MSTensor.
TensorPtrVector GetInputs() const;

/// \brief Get input MindSpore Lite MSTensors of model by node name.
///
/// \param[in] node_name Define node name.
///
/// \return The vector of MindSpore Lite MSTensor.
TensorPtrVector GetInputsByName(const String &node_name) const;

/// \brief Get output MindSpore Lite MSTensors of model by node name.
///
/// \param[in] node_name Define node name.
///
/// \return The vector of MindSpore Lite MSTensor.
TensorPtrVector GetOutputsByNodeName(const String &node_name) const;

/// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name.
///
/// \return The map of output tensor name and MindSpore Lite MSTensor.
TensorPtrVector GetOutputs() const;

/// \brief Get name of output tensors of model compiled by this session.
///
/// \return The vector of string as output tensor names in order.
StringVector GetOutputTensorNames() const;

/// \brief Get output MindSpore Lite MSTensors of model by tensor name.
///
/// \param[in] tensor_name Define tensor name.
///
/// \return Pointer of MindSpore Lite MSTensor.
MSTensor *GetOutputByTensorName(const String &tensor_name) const;

/// \note RunGraph should be called after CompileGraph.
int RunGraph();

/// \brief Resize inputs shape.
///
/// \param[in] inputs Define the new inputs shape.
/// \param[in] dims Define the inputs new shape.
///
/// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h.
int Resize(const TensorPtrVector &inputs, const Int32VectorVector &dims);
} LiteSession;

#endif // MINDSPORE_LITE_INCLUDE_LITE_SESSION_H

+ 0
- 32
mindspore/lite/internal/include/lite_utils.h View File

@@ -1,32 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_
#define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_
#include "internal/include/string.h"
#include "internal/include/vector.h"

struct MSTensor;
struct Node;
using TensorPtr = MSTensor *;
using TensorPtrVector = Vector<MSTensor *>;
using Uint32Vector = Vector<uint32_t>;
using StringVector = Vector<String>;
using ShapeVector = Vector<int>;
using NodePtrVector = Vector<struct Node *>;
using Int32Vector = Vector<int>;
using Int32VectorVector = Vector<Int32Vector>;
#endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_

+ 0
- 249
mindspore/lite/internal/include/model.h View File

@@ -1,249 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H
#define MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H
#include "internal/include/lite_utils.h"
#include "nnacl/op_base.h"

using PrimitiveC = OpParameter;
enum NodeType {
NodeType_ValueNode = 0,
NodeType_Parameter = 1,
NodeType_CNode = 2,
NodeType_MIN = NodeType_ValueNode,
NodeType_MAX = NodeType_CNode
};

enum KernelType : int {
KernelType_Concat = 0,
KernelType_SoftMax,
KernelType_Activation,
KernelType_Conv2D,
KernelType_FusedBatchNorm,
KernelType_BatchNorm,
KernelType_BiasAdd,
KernelType_Pooling,
KernelType_ROIPooling,
KernelType_DepthwiseConv2D,
KernelType_DeDepthwiseConv2D,
KernelType_Resize,
KernelType_DetectionPostProcess,
KernelType_FullConnection,
KernelType_Mean,
KernelType_DeConv2D,
KernelType_Scale,
KernelType_Reshape,
KernelType_Eltwise,
KernelType_NetOutput,
KernelType_Add,
KernelType_Sub,
KernelType_MatMul,
KernelType_StridedSlice,
KernelType_Power,
KernelType_Slice,
KernelType_Stack,
KernelType_Mul,
KernelType_RealDiv,
KernelType_Pad,
KernelType_Maximum,
KernelType_Minimum,
KernelType_PReLU,
KernelType_LeakyReLU,
KernelType_ArgMax,
KernelType_ArgMin,
KernelType_Exp,
KernelType_Crop,
KernelType_Range,
KernelType_Rsqrt,
KernelType_ExpandDims,
KernelType_Tile,
KernelType_Cast,
KernelType_Shape,
KernelType_Nchw2Nhwc,
KernelType_Nhwc2Nchw,
KernelType_QuantDTypeCast,
KernelType_Split,
KernelType_Permute,
KernelType_FakeQuantWithMinMaxVars,
KernelType_Equal,
KernelType_Less,
KernelType_Greater,
KernelType_NotEqual,
KernelType_LessEqual,
KernelType_GreaterEqual,
KernelType_Min,
KernelType_Floor,
KernelType_Abs,
KernelType_Neg,
KernelType_Cos,
KernelType_Sin,
KernelType_Sqrt,
KernelType_Square,
KernelType_Constant,
KernelType_Log,
KernelType_Tan,
KernelType_Atan,
KernelType_Asin,
KernelType_Clip,
KernelType_Transpose,
KernelType_Squeeze,
KernelType_Unsqueeze,
KernelType_Upsample,
KernelType_Dropout,
KernelType_Broadcast,
KernelType_BroadcastTo,
KernelType_Lrn,
KernelType_ZerosLike,
KernelType_TopK,
KernelType_SpaceToDepth,
KernelType_SpaceToBatch,
KernelType_SparseToDense,
KernelType_ReverseSequence,
KernelType_Rank,
KernelType_Gather,
KernelType_GatherNd,
KernelType_Fill,
KernelType_Elu,
KernelType_DepthToSpace,
KernelType_BatchToSpace,
KernelType_AddN,
KernelType_Ceil,
KernelType_EmbeddingLookup,
KernelType_EmbeddingLookupSparse,
KernelType_FloorDiv,
KernelType_FloorMod,
KernelType_L2Norm,
KernelType_LocalResponseNormalization,
KernelType_MatrixDiag,
KernelType_Reduce,
KernelType_Reverse,
KernelType_Round,
KernelType_Select,
KernelType_Scatter,
KernelType_ScatterND,
KernelType_ConstantOfShape,
KernelType_Unique,
KernelType_Unstack,
KernelType_LogicalAnd,
KernelType_LogicalOr,
KernelType_LogicalXor,
KernelType_LogicalNot,
KernelType_OnnxInt8Quantize,
KernelType_OnnxInt8Dequantize,
KernelType_FakeQuantWithMinMax,
KernelType_FakeQuantWithMinMaxPerChannel,
KernelType_BatchNormFold,
KernelType_MulFold,
KernelType_AddFold,
KernelType_SquaredDifference,
KernelType_Flatten,
KernelType_FlattenGrad,
KernelType_TupleGetItem,
KernelType_Div,
KernelType_Where,
KernelType_OneHot,
KernelType_Lstm,
KernelType_Conv2DGradFilter,
KernelType_Conv2DGradInput,
KernelType_PoolingGrad,
KernelType_BNGrad,
KernelType_BNGradInput,
KernelType_ApplyMomentum,
KernelType_BiasGrad,
KernelType_SoftmaxCrossEntropy,
KernelType_AddGrad,
KernelType_SubGrad,
KernelType_MulGrad,
KernelType_DivGrad,
KernelType_PowerGrad,
KernelType_ActivationGrad,
KernelType_PriorBox,
KernelType_SpaceToBatchND,
KernelType_Depend,
KernelType_Return,
KernelType_MakeTuple,
KernelType_ToFormat,
KernelType_Proposal,
KernelType_Custom,
KernelType_BlackBox,
KernelType_NegGrad,
KernelType_LogGrad,
KernelType_BatchToSpaceND,
KernelType_END,
};

enum ActivationType {
NO_ACTIVATION = 0,
RELU = 1,
SIGMOID = 2,
RELU6 = 3,
ELU = 4,
LEAKY_RELU = 5,
ABS = 6,
RELU1 = 7,
SOFTSIGN = 8,
SOFTPLUS = 9,
TANH = 10,
SELU = 11,
HSWISH = 12,
HSIGMOID = 13,
THRESHOLDRELU = 14,
LINEAR = 15,
UNKNOW = 16
};

enum ReduceMode {
ReduceMode_ReduceMean = 0,
ReduceMode_ReduceMax = 1,
ReduceMode_ReduceMin = 2,
ReduceMode_ReduceProd = 3,
ReduceMode_ReduceSum = 4,
ReduceMode_ReduceSumSquare = 5,
ReduceMode_ReduceASum = 6,
ReduceMode_MIN = ReduceMode_ReduceMean,
ReduceMode_MAX = ReduceMode_ReduceASum
};

typedef struct Node {
String name_;
NodeType node_type_;
PrimitiveC *primitive_;
Uint32Vector input_indices_;
Uint32Vector output_indices_;
} Node;

typedef struct Model {
String name_;
String version_;
TensorPtrVector all_tensors_;
Uint32Vector input_indices_;
Uint32Vector output_indices_;
NodePtrVector nodes_;
char *buf;

/// \brief Static method to create a Model pointer.
///
/// \param[in] model_buf Define the buffer read from a model file.
/// \param[in] size Define bytes number of model buffer.
///
/// \return Pointer of MindSpore Lite Model.
static Model *Import(const char *model_buf, size_t size);

/// \brief Free all the temporary buffer
void Free();
} Model;

#endif // MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H

+ 0
- 151
mindspore/lite/internal/include/ms_tensor.h View File

@@ -1,151 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_MS_TENSOR_H_
#define MINDSPORE_LITE_INTERNAL_INCLUDE_MS_TENSOR_H_

#include "internal/include/lite_utils.h"

enum TypeId : int {
kTypeUnknown = 0,
kMetaTypeBegin = kTypeUnknown,
kMetaTypeType, // Type
kMetaTypeAnything,
kMetaTypeObject,
kMetaTypeTypeType, // TypeType
kMetaTypeProblem,
kMetaTypeExternal,
kMetaTypeNone,
kMetaTypeNull,
kMetaTypeEllipsis,
kMetaTypeEnd,
//
// Object types
//
kObjectTypeBegin = kMetaTypeEnd,
kObjectTypeNumber,
kObjectTypeString,
kObjectTypeList,
kObjectTypeTuple,
kObjectTypeSlice,
kObjectTypeKeyword,
kObjectTypeTensorType,
kObjectTypeRowTensorType,
kObjectTypeSparseTensorType,
kObjectTypeUndeterminedType,
kObjectTypeClass,
kObjectTypeDictionary,
kObjectTypeFunction,
kObjectTypeJTagged,
kObjectTypeSymbolicKeyType,
kObjectTypeEnvType,
kObjectTypeRefKey,
kObjectTypeRef,
kObjectTypeEnd,
//
// Number Types
//
kNumberTypeBegin = kObjectTypeEnd,
kNumberTypeBool,
kNumberTypeInt,
kNumberTypeInt8,
kNumberTypeInt16,
kNumberTypeInt32,
kNumberTypeInt64,
kNumberTypeUInt,
kNumberTypeUInt8,
kNumberTypeUInt16,
kNumberTypeUInt32,
kNumberTypeUInt64,
kNumberTypeFloat,
kNumberTypeFloat16,
kNumberTypeFloat32,
kNumberTypeFloat64,
kNumberTypeEnd
};

enum Format {
Format_NCHW = 0,
Format_NHWC = 1,
Format_NHWC4 = 2,
Format_HWKC = 3,
Format_HWCK = 4,
Format_KCHW = 5,
Format_CKHW = 6,
Format_KHWC = 7,
Format_CHWK = 8,
Format_HW = 9,
Format_HW4 = 10,
Format_NC = 11,
Format_NC4 = 12,
Format_NC4HW4 = 100,
Format_NUM_OF_FORMAT = 101,
Format_MIN = Format_NCHW,
Format_MAX = Format_NUM_OF_FORMAT
};

typedef struct MSTensor {
enum Category {
CONST, // weight tensor
VAR // activation tensor
};
void *data_ = NULL;
void *device_data_ = NULL;
TypeId data_type_;
Format format_ = Format_NHWC;
Category category_ = VAR;
ShapeVector shape_;
size_t refCount = 0;

int32_t Batch() const;

int32_t Channel() const;

int32_t Height() const;

int32_t Width() const;

/// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index.
///
/// \param[in] index Define index of dimension returned.
///
/// \return Size of dimension of the MindSpore Lite MSTensor.
int DimensionSize(size_t index) const;

/// \brief Get number of element in MSTensor.
///
/// \return Number of element in MSTensor.
int ElementsNum() const;

int ElementsC4Num() const;

/// \brief Get byte size of data in MSTensor.
///
/// \return Byte size of data in MSTensor.
size_t Size() const;

static void *operator new(size_t sz);

static void *operator new[](size_t sz);

static void operator delete(void *ptr, size_t sz);

static void operator delete[](void *ptr, size_t sz);
} MSTensor;

MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape);
void DestroyTensor(MSTensor *ptr);
#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_

+ 0
- 100
mindspore/lite/internal/include/string.h View File

@@ -1,100 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INTERNAL_SRC_STRING_H_
#define MINDSPORE_LITE_INTERNAL_SRC_STRING_H_
#include <string.h>
#include <stdint.h>

typedef struct String {
public:
String();
String(size_t count, char ch);
String(const char *s, size_t count);
explicit String(const char *s);
String(const String &other);
String(const String &other, size_t pos, size_t count = npos);

~String();

String &operator=(const String &str);
String &operator=(const char *str);

char &at(size_t pos);
const char &at(size_t pos) const;
inline char &operator[](size_t pos);
inline const char &operator[](size_t pos) const;
char *data() noexcept;
const char *data() const noexcept;
const char *c_str() const noexcept;

// capacity
bool empty() const noexcept;
size_t size() const noexcept;
size_t length() const noexcept;

// operations
void clear() noexcept;
String &append(size_t count, const char ch);
String &append(const String &str);
String &append(const char *s);
String &operator+=(const String &str);
String &operator+=(const char *str);
String &operator+=(const char ch);
int compare(const String &str) const;
int compare(const char *str) const;

String substr(size_t pos = 0, size_t count = npos) const;

static const size_t npos = -1;

private:
size_t size_;
char *buffer_;
} String;

bool operator==(const String &lhs, const String &rhs);
bool operator==(const String &lhs, const char *rhs);
bool operator==(const char *lhs, const String rhs);

bool operator!=(const String &lhs, const String &rhs);
bool operator!=(const String &lhs, const char *rhs);
bool operator!=(const char *lhs, const String rhs);

bool operator<(const String &lhs, const String &rhs);
bool operator<(const String &lhs, const char *rhs);
bool operator<(const char *lhs, const String rhs);

bool operator>(const String &lhs, const String &rhs);
bool operator>(const String &lhs, const char *rhs);
bool operator>(const char *lhs, const String rhs);

bool operator<=(const String &lhs, const String &rhs);
bool operator<=(const String &lhs, const char *rhs);
bool operator<=(const char *lhs, const String rhs);

bool operator>=(const String &lhs, const String &rhs);
bool operator>=(const String &lhs, const char *rhs);
bool operator>=(const char *lhs, const String rhs);

String to_String(int32_t value);
String to_String(int64_t value);
String to_String(uint32_t value);
String to_String(uint64_t value);
String to_String(float value);
String to_String(double value);
String to_String(long double value);

#endif // MINDSPORE_LITE_INTERNAL_SRC_STRING_H_

+ 0
- 117
mindspore/lite/internal/include/vector.h View File

@@ -1,117 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_VECTOR_H
#define MINDSPORE_LITE_INTERNAL_INCLUDE_VECTOR_H

#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#define DEFAULT_CAPACITY 4

struct MSTensor;
struct Node;

template <typename T>
class Vector {
public:
Vector();

explicit Vector(size_t size);

Vector(size_t size, const T &value);

Vector(const Vector<T> &vector);

~Vector();

void clear();

void push_back(const T &elem);

void push_back(T &&);

void pop_back();

void insert(const T &elem, size_t index);

T *begin();

const T *begin() const;

T *end();

const T *end() const;

T &front();

const T &front() const;

T &back();

const T &back() const;

T &at(size_t index);

const T &at(size_t index) const;

T &operator[](size_t index);

const T &operator[](size_t index) const;

T *data();

const T *data() const;

size_t size() const;

size_t capacity() const;

bool empty() const;

void erase(size_t index);

void resize(size_t size);

void reserve(size_t capacity);

Vector<T> &operator=(const Vector<T> &v);

private:
size_t size_;
size_t elem_size_;
size_t capacity_;
T *data_;
};

template <typename T>
bool operator==(const Vector<T> &lhs, const Vector<T> &rhs) {
if (lhs.size() != rhs.size()) {
return false;
}
for (int i = 0; i < lhs.size(); ++i) {
if (lhs[i] != rhs[i]) {
return false;
}
}
return true;
}

template <typename T>
bool operator!=(const Vector<T> &lhs, const Vector<T> &rhs) {
return !(lhs == rhs);
}
#endif // MINDSPORE_LITE_INTERNAL_INCLUDE_VECTOR_H

+ 0
- 220
mindspore/lite/internal/src/allocator.cc View File

@@ -1,220 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "internal/src/allocator.h"
#include <stdlib.h>
#include "internal/src/lite_log.h"

namespace mindspore::lite {
namespace {
constexpr size_t kMaxMallocSize = 2000 * 1024 * 1024;
constexpr int kBlockSize = 1024;
constexpr size_t kBlockLimit = (kBlockSize << (kBlockRange - 1));

int SizeToIndex(size_t size) {
if (size > kBlockLimit) {
return -1;
}
int index = 0;
for (int i = 0; i < kBlockRange; ++i) {
if ((size & (kBlockSize << i))) {
index = i;
}
}
if (size > (size_t)(kBlockSize << index)) {
index += 1;
}
return index;
}

void PopMemNode(MemNode **head) {
if (*head == nullptr) {
return;
}
MemNode *next = (*head)->next_;
(*head) = next;
if (*head != nullptr) {
(*head)->pre_ = nullptr;
}
}

void PushMemNode(MemNode **head, MemNode *node) {
if (node == nullptr) {
return;
}
if (*head == nullptr) {
*head = node;
return;
}
(*head)->pre_ = node;
node->next_ = *head;
node->pre_ = nullptr;
*head = node;
}

void RemoveMemNode(MemNode **head, MemNode *node) {
if (node == nullptr) {
return;
}
if ((*head) == node) {
*head = node->next_;
if (*head != nullptr) {
(*head)->pre_ = nullptr;
}
} else {
MemNode *node_pre = node->pre_;
node_pre->next_ = node->next_;
node->next_ = nullptr;
node->pre_ = nullptr;
}
}

void FreeNodesList(MemNode *head) {
MemNode *node = head;
while (node != nullptr) {
MemNode *next = node->next_;
free(node);
node = next;
}
}
} // namespace

Allocator::Allocator() {
for (int i = 0; i < kBlockRange; ++i) {
allocated_list_[i] = nullptr;
free_list_[i] = nullptr;
}
}

Allocator::~Allocator() { Clear(); }

void Allocator::SetContext(const AllocatorContext &ctx) {
lock_flag_ = ctx.lock_flag_;
}

void Allocator::Lock() {
if (lock_flag_) {
pthread_mutex_lock(&lock_);
}
}

void Allocator::UnLock() {
if (lock_flag_) {
pthread_mutex_unlock(&lock_);
}
}

void *Allocator::Malloc(size_t size) {
if (size > kMaxMallocSize) {
LITE_ERROR_LOG("MallocData out of max_size, size: %zd", size);
return nullptr;
}
void *result = nullptr;
int index = SizeToIndex(size);
if (index < 0) {
MemNode *node = (MemNode *)malloc(sizeof(MemNode) + size);
if (node == nullptr) {
LITE_ERROR_LOG("MallocData out of max_size, size: %zd", (size + sizeof(MemNode)));
return result;
}
node->size_ = size;
result = (char *)node + sizeof(MemNode);
Lock();
PushMemNode(&large_mem_list_, node);
UnLock();
return result;
}
Lock();
size_t size_apply = (kBlockSize << index);
if (free_list_[index] != nullptr) {
MemNode *free_node = free_list_[index];
PopMemNode(&free_list_[index]);
PushMemNode(&allocated_list_[index], free_node);
result = (char *)free_node + sizeof(MemNode);
UnLock();
return result;
} else {
MemNode *new_node = (MemNode *)malloc(sizeof(MemNode) + size_apply);
if (new_node == nullptr) {
UnLock();
LITE_LOG_ERROR("malloc MemNode fail!");
return nullptr;
}
new_node->size_ = size;
PushMemNode(&allocated_list_[index], new_node);
result = (char *)new_node + sizeof(MemNode);
UnLock();
return result;
}
}

void Allocator::Free(void *buf) {
if (buf == nullptr) {
return;
}

MemNode *node = (MemNode *)((char *)buf - sizeof(MemNode));
size_t buf_size = node->size_;
Lock();
if (buf_size > kBlockLimit) {
RemoveMemNode(&large_mem_list_, node);
free(node);
} else {
int index = SizeToIndex(buf_size);
RemoveMemNode(&allocated_list_[index], node);
PushMemNode(&free_list_[index], node);
}
UnLock();
}

size_t Allocator::GetTotalSize() {
Lock();
size_t total_size = 0;
for (int i = 0; i < kBlockRange; ++i) {
MemNode *node = allocated_list_[i];
while (node != nullptr) {
total_size += node->size_;
node = node->next_;
}

node = free_list_[i];
while (node != nullptr) {
total_size += node->size_;
node = node->next_;
}
}
MemNode *node = large_mem_list_;
while (node != nullptr) {
total_size += node->size_;
node = node->next_;
}
UnLock();
return total_size;
}

void Allocator::Clear() {
Lock();
for (int i = 0; i < kBlockRange; ++i) {
FreeNodesList(allocated_list_[i]);
allocated_list_[i] = nullptr;

FreeNodesList(free_list_[i]);
free_list_[i] = nullptr;
}
FreeNodesList(large_mem_list_);
UnLock();
}
} // namespace mindspore::lite

+ 0
- 60
mindspore/lite/internal/src/allocator.h View File

@@ -1,60 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_SRC_ALLOCATOR_H_
#define MINDSPORE_LITE_INTERNAL_SRC_ALLOCATOR_H_

#include <stddef.h>
#include <pthread.h>
#include "internal/include/string.h"

namespace mindspore::lite {
constexpr int kBlockRange = 9;

typedef struct AllocatorContext {
bool lock_flag_;
} AllocatorContext;

typedef struct MemNode {
MemNode *pre_ = nullptr;
MemNode *next_ = nullptr;
size_t size_ = 0;
} MemNode;


class Allocator {
public:
Allocator();
~Allocator();
void SetContext(const AllocatorContext &ctx);
void *Malloc(size_t size);
void Free(void *ptr);
void Clear();
size_t GetTotalSize();

private:
void Lock();
void UnLock();

bool lock_flag_ = false;
pthread_mutex_t lock_ = PTHREAD_MUTEX_INITIALIZER;
MemNode *large_mem_list_ = nullptr;
MemNode *allocated_list_[kBlockRange];
MemNode *free_list_[kBlockRange];
};
} // namespace mindspore::lite

#endif // MINDSPORE_LITE_INTERNAL_SRC_ALLOCATOR_H_

+ 0
- 357
mindspore/lite/internal/src/common/string.cc View File

@@ -1,357 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "internal/include/string.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <float.h>
#include <stdint.h>
#include "internal/src/lite_log.h"

String::String() {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
}

String::String(size_t count, char ch) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (count + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memset(buffer_, ch, count);
buffer_[count] = '\0';
size_ = count;
}
String::String(const char *s, size_t count) {
if (s == nullptr) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
return;
}
size_t size_s = strlen(s);
if (size_s <= count) {
size_ = size_s;
} else {
size_ = count;
}
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
strncpy(buffer_, s, size_);
buffer_[size_] = '\0';
}

String::String(const char *s) {
if (s == nullptr) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
return;
}
size_ = strlen(s);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(buffer_, s, size_ + 1);
}

String::String(const String &other) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (other.size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
size_ = other.size_;
memcpy(buffer_, other.buffer_, size_ + 1);
}

String::String(const String &other, size_t pos, size_t count) {
if (pos >= other.size_) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
} else {
if (count == npos) {
count = other.size_ - pos;
}
if (pos + count > other.size_) {
size_ = other.size_ - pos;
} else {
size_ = count;
}
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
strncpy(buffer_, other.buffer_ + pos, size_);
buffer_[size_] = '\0';
}
}

String::~String() { free(buffer_); }

String &String::operator=(const String &str) {
if (this == &str) {
return *this;
}
free(buffer_);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (str.size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
size_ = str.size_;
memcpy(buffer_, str.buffer_, size_ + 1);
return *this;
}

String &String::operator=(const char *str) {
free(buffer_);
if (str == nullptr) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
return *this;
}
size_t size_s = strlen(str);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_s + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
size_ = size_s;
memcpy(buffer_, str, size_ + 1);
return *this;
}

char &String::at(size_t pos) {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return buffer_[pos];
}
const char &String::at(size_t pos) const {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return buffer_[pos];
}
char &String::operator[](size_t pos) {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return this->at(pos);
}
const char &String::operator[](size_t pos) const {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return this->at(pos);
}
char *String::data() noexcept { return buffer_; };
const char *String::data() const noexcept { return buffer_; }
const char *String::c_str() const noexcept { return buffer_; }

// capacity
bool String::empty() const noexcept { return size_ == 0; }
size_t String::size() const noexcept { return size_; }
size_t String::length() const noexcept { return size_; }

// operations
void String::clear() noexcept {
free(buffer_);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
}
String &String::operator+=(const String &str) {
size_t new_size = size_ + str.size_;
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
if (tmp == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(tmp, this->buffer_, size_ + 1);
strncat(tmp, str.buffer_, str.size_);
tmp[new_size] = '\0';
free(buffer_);
buffer_ = tmp;
size_ = new_size;
return *this;
}

String &String::operator+=(const char *str) {
if (str == nullptr) {
return *this;
}
size_t str_size = strlen(str);
size_t new_size = size_ + str_size;
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
if (tmp == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(tmp, this->buffer_, size_ + 1);
strncat(tmp, str, str_size);
tmp[new_size] = '\0';
free(buffer_);
buffer_ = tmp;
size_ = new_size;
return *this;
}

String &String::operator+=(const char ch) {
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 2)));
if (tmp == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(tmp, this->buffer_, size_ + 1);
tmp[size_] = ch;
tmp[size_ + 1] = '\0';
free(buffer_);
buffer_ = tmp;
size_ += 1;
return *this;
}

String &String::append(size_t count, const char ch) {
(*this) += ch;
return *this;
}
String &String::append(const String &str) {
(*this) += str;
return *this;
}
String &String::append(const char *str) {
if (str == nullptr) {
return *this;
}
(*this) += str;
return *this;
}

int String::compare(const String &str) const { return strcmp(buffer_, str.buffer_); }
int String::compare(const char *str) const { return strcmp(buffer_, str); }

String String::substr(size_t pos, size_t count) const { return String(*this, pos, count); }

String operator+(const String &lhs, const String &rhs) {
String str(lhs);
str += rhs;
return str;
}
String operator+(const String &lhs, const char *rhs) {
String str(lhs);
str += rhs;
return str;
}
String operator+(const char *lhs, const String &rhs) {
String str(lhs);
return str + rhs;
}
String operator+(const String &lhs, const char rhs) {
String str(lhs);
str += rhs;
return str;
}
String operator+(const char lhs, const String &rhs) {
String str(1, lhs);
str += rhs;
return str;
}

bool operator==(const String &lhs, const String &rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const String &lhs, const char *rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const char *lhs, const String rhs) { return rhs.compare(lhs) == 0; }

bool operator!=(const String &lhs, const String &rhs) { return lhs.compare(rhs) != 0; }
bool operator!=(const String &lhs, const char *rhs) { return lhs.compare(rhs) != 0; }
bool operator!=(const char *lhs, const String rhs) { return rhs.compare(lhs) != 0; }

bool operator<(const String &lhs, const String &rhs) { return lhs.compare(rhs) < 0; }
bool operator<(const String &lhs, const char *rhs) { return lhs.compare(rhs) < 0; }
bool operator<(const char *lhs, const String rhs) { return rhs.compare(lhs) >= 0; }

bool operator>(const String &lhs, const String &rhs) { return lhs.compare(rhs) > 0; }
bool operator>(const String &lhs, const char *rhs) { return lhs.compare(rhs) > 0; }
bool operator>(const char *lhs, const String rhs) { return rhs.compare(lhs) <= 0; }

bool operator<=(const String &lhs, const String &rhs) { return lhs.compare(rhs) <= 0; }
bool operator<=(const String &lhs, const char *rhs) { return lhs.compare(rhs) <= 0; }
bool operator<=(const char *lhs, const String rhs) { return rhs.compare(lhs) > 0; }

bool operator>=(const String &lhs, const String &rhs) { return lhs.compare(rhs) >= 0; }
bool operator>=(const String &lhs, const char *rhs) { return lhs.compare(rhs) >= 0; }
bool operator>=(const char *lhs, const String rhs) { return rhs.compare(lhs) < 0; }

String to_String(int32_t value) {
char tmp[sizeof(int32_t) * 4];
snprintf(tmp, sizeof(int32_t) * 4, "%d", value);
return String(tmp, strlen(tmp));
}

String to_String(int64_t value) {
char tmp[sizeof(int64_t) * 4];
snprintf(tmp, sizeof(int64_t) * 4, "%ld", value);
return String(tmp, strlen(tmp));
}

String to_String(u_int32_t value) {
char tmp[sizeof(u_int32_t) * 4];
snprintf(tmp, sizeof(unsigned) * 4, "%u", value);
return String(tmp, strlen(tmp));
}

String to_String(u_int64_t value) {
char tmp[sizeof(u_int64_t) * 4];
snprintf(tmp, sizeof(u_int64_t) * 4, "%lu", value);
return String(tmp, strlen(tmp));
}

String to_String(float value) {
char tmp[FLT_MAX_10_EXP + 20];
snprintf(tmp, FLT_MAX_10_EXP + 20, "%f", value);
return String(tmp, strlen(tmp));
}

String to_String(double value) {
char tmp[DBL_MAX_10_EXP + 20];
snprintf(tmp, DBL_MAX_10_EXP + 20, "%f", value);
return String(tmp, strlen(tmp));
}

String to_String(long double value) {
char tmp[LDBL_MAX_10_EXP + 20];
snprintf(tmp, DBL_MAX_10_EXP + 20, "%Lf", value);
return String(tmp, strlen(tmp));
}

+ 0
- 303
mindspore/lite/internal/src/common/vector.cc View File

@@ -1,303 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "internal/include/vector.h"
#include "internal/include/string.h"
#include "internal/src/lite_log.h"

#define MIN(x, y) ((x < y) ? (x) : (y))

template <typename T>
Vector<T>::Vector() {
size_ = 0;
capacity_ = DEFAULT_CAPACITY;
elem_size_ = sizeof(T);
data_ = nullptr;
}

template <typename T>
Vector<T>::Vector(size_t size) {
size_ = size;
elem_size_ = sizeof(T);
capacity_ = (size == 0 ? DEFAULT_CAPACITY : size);
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memset(data_, 0, capacity_ * elem_size_);
}

template <typename T>
Vector<T>::Vector(size_t size, const T &value) {
size_ = size;
elem_size_ = sizeof(T);
capacity_ = (size == 0 ? DEFAULT_CAPACITY : size);
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
for (int i = 0; i < size; ++i) {
data_[i] = value;
}
}

template <typename T>
Vector<T>::Vector(const Vector<T> &vec) {
size_ = vec.size_;
elem_size_ = sizeof(T);
capacity_ = vec.capacity_;
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(data_, vec.data_, size_ * elem_size_);
}

template <typename T>
Vector<T> &Vector<T>::operator=(const Vector<T> &vec) {
if (this == &vec) {
return *this;
}
size_ = vec.size_;
elem_size_ = sizeof(T);
capacity_ = vec.capacity_;
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(data_, vec.data_, size_ * elem_size_);
return *this;
}

template <typename T>
Vector<T>::~Vector() {
if (data_ != nullptr) {
free(data_);
}
}

template <typename T>
void Vector<T>::clear() {
size_ = 0;
if (data_ != nullptr) {
free(data_);
data_ = nullptr;
}
}

template <typename T>
void Vector<T>::push_back(const T &elem) {
if (data_ == nullptr) {
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
} else if (size_ == capacity_) {
resize(size_ + 1);
--size_;
}
data_[size_] = elem;
++size_;
}

template <typename T>
void Vector<T>::push_back(T &&elem) {
if (data_ == nullptr) {
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
} else if (size_ == capacity_) {
resize(size_ + 1);
--size_;
}
data_[size_] = elem;
++size_;
}

template <typename T>
void Vector<T>::pop_back() {
if (size_ > 0) {
--size_;
} else {
MS_C_EXCEPTION("Index is out of range!");
}
}

template <typename T>
void Vector<T>::insert(const T &elem, size_t index) {
if (index <= size_) {
++size_;
if (size_ > capacity_) {
resize(size_);
}
if (index == size_ - 1) {
push_back(elem);
} else {
memmove(data_ + index + 1, data_ + index, (size_ - index - 1) * elem_size_);
data_[index] = elem;
}
} else {
MS_C_EXCEPTION("Input index is out of range!");
}
}

template <typename T>
T *Vector<T>::begin() {
return data_;
}

template <typename T>
const T *Vector<T>::begin() const {
return data_;
}

template <typename T>
T *Vector<T>::end() {
return data_ + size_;
}

template <typename T>
const T *Vector<T>::end() const {
return data_ + size_;
}

template <typename T>
T &Vector<T>::front() {
if (size_ > 0) {
return data_[0];
}
MS_C_EXCEPTION("Index is out of range!");
}

template <typename T>
const T &Vector<T>::front() const {
if (size_ > 0) {
return data_[0];
}
MS_C_EXCEPTION("Index is out of range!");
}
template <typename T>
T &Vector<T>::back() {
if (size_ > 0) {
return data_[size_ - 1];
}
MS_C_EXCEPTION("Index is out of range!");
}
template <typename T>
const T &Vector<T>::back() const {
if (size_ > 0) {
return data_[size_ - 1];
}
MS_C_EXCEPTION("Index is out of range!");
}

template <typename T>
T &Vector<T>::at(size_t index) {
if (index < size_) {
return data_[index];
}
MS_C_EXCEPTION("Input index is out of range!");
}

template <typename T>
const T &Vector<T>::at(size_t index) const {
if (index < size_) {
return data_[index];
}
MS_C_EXCEPTION("Input index is out of range!");
}

template <typename T>
T &Vector<T>::operator[](size_t index) {
if (index < size_) {
return data_[index];
}
MS_C_EXCEPTION("Input index is out of range!");
}

template <typename T>
const T &Vector<T>::operator[](size_t index) const {
if (index < size_) {
return data_[index];
}
MS_C_EXCEPTION("Input index is out of range!");
}

template <typename T>
T *Vector<T>::data() {
return data_;
}

template <typename T>
const T *Vector<T>::data() const {
return data_;
}

template <typename T>
size_t Vector<T>::size() const {
return size_;
}

template <typename T>
size_t Vector<T>::capacity() const {
return capacity_;
}

template <typename T>
bool Vector<T>::empty() const {
return size_ == 0;
}

template <typename T>
void Vector<T>::erase(size_t index) {
if (index == size_ - 1) {
--size_;
} else if (index < size_) {
memmove(data_ + index, data_ + index + 1, (size_ - index - 1) * elem_size_);
--size_;
} else {
MS_C_EXCEPTION("Input index is out of range!");
}
}

template <typename T>
void Vector<T>::resize(size_t size) {
while (size > capacity_) {
capacity_ *= 2;
}
T *tmp = data_;
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(data_, tmp, MIN(size, size_) * elem_size_);
size_ = size;
free(tmp);
}

template <typename T>
void Vector<T>::reserve(size_t capacity) {
if (capacity > capacity_) {
capacity_ = capacity;
}
}

template class Vector<int>;
template class Vector<Vector<int>>;
template class Vector<uint32_t>;
template class Vector<String>;
template class Vector<MSTensor *>;
template class Vector<Node *>;

+ 0
- 31
mindspore/lite/internal/src/kernel/common/common_infershape.cc View File

@@ -1,31 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "internal/src/kernel/common/common_infershape.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "internal/src/lite_log.h"

int DoCommonInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors) {
TensorPtr input = in_tensors.at(0);
MS_ASSERT(input != nullptr);
TensorPtr output = out_tensors.at(0);
MS_ASSERT(output != nullptr);
output->format_ = input->format_;
output->data_type_ = input->data_type_;
output->shape_ = input->shape_;
return RET_OK;
}

+ 0
- 24
mindspore/lite/internal/src/kernel/common/common_infershape.h View File

@@ -1,24 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_SRC_KERNEL_COMMON_INFERSHAPE_H_
#define MINDSPORE_LITE_INTERNAL_SRC_KERNEL_COMMON_INFERSHAPE_H_

#include "internal/include/model.h"

int DoCommonInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors);

#endif // MINDSPORE_LITE_INTERNAL_SRC_KERNEL_COMMON_INFERSHAPE_H_

+ 0
- 54
mindspore/lite/internal/src/kernel/fp32/activation.cc View File

@@ -1,54 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "internal/src/kernel/fp32/activation.h"
#include "internal/src/kernel/common/common_infershape.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "nnacl/fp32/activation_fp32.h"
#include "internal/src/lite_log.h"
#include "nnacl/errorcode.h"

int DoActivationInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param) {
return DoCommonInferShape(in_tensors, out_tensors);
}

int DoActivation(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
ActivationParameter *param = (ActivationParameter *)node->primitive_;
int ret = RET_OK;
size_t length = in_tensors[0]->ElementsNum();
float *input_addr = (float *)in_tensors[0]->data_;
float *output_addr = (float *)out_tensors[0]->data_;
if (param->type_ == ActivationType::RELU) {
ret = Fp32Relu(input_addr, length, output_addr);
} else if (param->type_ == ActivationType::SIGMOID) {
ret = Sigmoid(input_addr, length, output_addr);
} else if (param->type_ == ActivationType::RELU6) {
ret = Fp32Relu6(input_addr, length, output_addr);
} else if (param->type_ == ActivationType::LEAKY_RELU) {
float alpha = param->alpha_;
ret = LRelu(input_addr, length, output_addr, alpha);
} else {
LITE_ERROR_LOG("Unsupport activation type: %d", param->type_);
return RET_PARAM_INVALID;
}
if (ret != NNACL_OK) {
LITE_ERROR_LOG("do activation(%d) fail!ret: %d", param->type_, ret);
return RET_ERROR;
}
return RET_OK;
}

+ 0
- 27
mindspore/lite/internal/src/kernel/fp32/activation.h View File

@@ -1,27 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ACTIVATION_H_
#define MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ACTIVATION_H_

#include "internal/include/model.h"
#include "internal/src/allocator.h"

int DoActivationInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param);
int DoActivation(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator);

#endif // MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ACTIVATION_H_

+ 0
- 197
mindspore/lite/internal/src/kernel/fp32/arithmetic.cc View File

@@ -1,197 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "internal/src/kernel/fp32/arithmetic.h"
#include "internal/src/lite_log.h"
#include "internal/include/errorcode.h"
#include "internal/include/model.h"
#include "internal/include/ms_tensor.h"
#include "internal/include/lite_utils.h"
#include "nnacl/arithmetic_common.h"
#include "nnacl/fp32/arithmetic_fp32.h"

typedef int (*ArithmeticRun)(const float *input0, const float *input1, float *output, const int element_size);
typedef int (*ArithmeticOptRun)(const float *input0, const float *input1, float *output, const int element_size,
const ArithmeticParameter *param);

int BroadcastRun(float *input0, float *input1, float *output, int dim, int out_count, int break_pos,
ArithmeticRun arithmetic_run, ArithmeticParameter *params) {
if (dim > break_pos) {
return arithmetic_run(input0, input1, output, out_count);
}
for (int i = 0; i < params->out_shape_[dim]; ++i) {
int pos0_ = params->in_shape0_[dim] == 1 ? 0 : i;
int pos1_ = params->in_shape1_[dim] == 1 ? 0 : i;
int error_code =
BroadcastRun(input0 + pos0_ * params->in_strides0_[dim], input1 + pos1_ * params->in_strides1_[dim],
output + i * params->out_strides_[dim], dim + 1, out_count, break_pos, arithmetic_run, params);
if (error_code != RET_OK) {
return error_code;
}
}
return RET_OK;
}

int CalBroadCasting(const TensorPtrVector &in_tensors, int *outside, int *break_pos, ArithmeticParameter *params) {
params->broadcasting_ = false;
for (size_t i = 0; i < params->ndim_; ++i) {
if (params->in_shape0_[i] != params->in_shape1_[i]) {
if (params->in_shape0_[i] == 1) {
params->out_shape_[i] = params->in_shape1_[i];
} else if (params->in_shape1_[i] == 1) {
params->out_shape_[i] = params->in_shape0_[i];
} else {
LITE_LOG_ERROR("shapes of input tensors can not be broadCasted");
return RET_INPUT_TENSOR_ERROR;
}
params->broadcasting_ = true;
} else {
params->out_shape_[i] = params->in_shape0_[i];
}
}
if (params->broadcasting_) {
*outside = 1;
for (auto i = params->ndim_ - 1; i >= 0; --i) {
if (params->in_shape0_[i] != params->in_shape1_[i]) {
*break_pos = i;
break;
}
(*outside) *= params->out_shape_[i];
}
ComputeStrides(params->in_shape0_, params->in_strides0_, params->ndim_);
ComputeStrides(params->in_shape1_, params->in_strides1_, params->ndim_);
ComputeStrides(params->out_shape_, params->out_strides_, params->ndim_);
}
return RET_OK;
}

int RunArithmetic(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, ArithmeticRun arithmetic_run,
ArithmeticOptRun arithmetic_opt_run, int outside, int break_pos, ArithmeticParameter *params) {
int error_code = RET_OK;
int count = out_tensors[0]->ElementsNum();
float *input0_data = reinterpret_cast<float *>(in_tensors[0]->data_);
float *input1_data1 = reinterpret_cast<float *>(in_tensors[1]->data_);
float *output_data = reinterpret_cast<float *>(out_tensors[0]->data_);
if (params->broadcasting_) {
error_code = BroadcastRun(input0_data, input1_data1, output_data, 0, outside, break_pos, arithmetic_run, params);
} else if (arithmetic_opt_run != NULL) {
error_code = arithmetic_opt_run(input0_data, input1_data1, output_data, count, params);
} else {
error_code = arithmetic_run(input0_data, input1_data1, output_data, count);
}
if (error_code != RET_OK) {
return error_code;
}
return RET_OK;
}

int DoArithmeticInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param) {
if (in_tensors.size() != 2 || in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
LITE_LOG_ERROR("input tensors num not correct or input data is NULL!");
return RET_INPUT_TENSOR_ERROR;
}
if (out_tensors.size() != 1) {
LITE_LOG_ERROR("output tensors num not correct!");
return RET_ERROR;
}

int in_datatype[2] = {in_tensors[0]->data_type_, in_tensors[1]->data_type_};
int in_format[2] = {static_cast<int>(in_tensors[0]->format_), static_cast<int>(in_tensors[1]->format_)};
size_t dim_size[2] = {in_tensors[0]->shape_.size(), in_tensors[1]->shape_.size()};
int *in_shape[2] = {in_tensors[0]->shape_.data(), in_tensors[1]->shape_.data()};
int out_format;
int out_datatype;
int ret = ArithmeticInferShape(in_shape, dim_size, out_tensors[0]->shape_.data(), in_format, &out_format, in_datatype,
&out_datatype, param);
if (ret != NNACL_OK) {
LITE_ERROR_LOG("arithmetic infershape failed! ret: %d", ret);
return RET_ERROR;
}
out_tensors[0]->format_ = static_cast<Format>(out_format);
out_tensors[0]->data_type_ = static_cast<TypeId>(out_datatype);
return RET_OK;
}

int ChooseKernel(const int kernel_type, ArithmeticRun *arithmetic_run, ArithmeticParameter *params) {
if (kernel_type == KernelType::KernelType_Mul) {
if (params->activation_type_ == ActivationType::RELU) {
*arithmetic_run = ElementMulRelu;
} else if (params->activation_type_ == ActivationType::RELU6) {
*arithmetic_run = ElementMulRelu6;
} else {
*arithmetic_run = ElementMul;
}
} else {
LITE_LOG_INFO("unsupported operator type");
return RET_ERROR;
}
return RET_OK;
}

int ChooseOptKernel(const int kernel_type, ArithmeticOptRun *arithmetic_opt_run, ArithmeticParameter *params) {
if (kernel_type == KernelType::KernelType_Mul) {
if (params->activation_type_ == ActivationType::RELU) {
*arithmetic_opt_run = ElementOptMulRelu;
} else if (params->activation_type_ == ActivationType::RELU6) {
*arithmetic_opt_run = ElementOptMulRelu6;
} else {
*arithmetic_opt_run = ElementOptMul;
}
} else {
LITE_LOG_INFO("kernel not have opt version");
}
return RET_OK;
}

int DoArithmetic(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
if (in_tensors.size() != 2 || in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
LITE_LOG_ERROR("input tensors num not correct or input data is NULL!");
return RET_INPUT_TENSOR_ERROR;
}
if (out_tensors.size() != 1 || out_tensors[0]->data_ == NULL) {
LITE_LOG_ERROR("output tensors num not correct or output data is NULL!");
return RET_ERROR;
}
if (allocator == NULL) {
LITE_LOG_ERROR("allocator is NULL!");
return RET_ERROR;
}
ArithmeticParameter *params = reinterpret_cast<ArithmeticParameter *>(node->primitive_);

ArithmeticRun arithmetic_run = NULL;
int kernel_type = params->op_parameter_.type_;
int status = ChooseKernel(kernel_type, &arithmetic_run, params);
if (status != RET_OK) {
return status;
}
int outside = 0;
int break_pos = 0;
// when one of input only has one element
params->in_elements_num0_ = in_tensors[0]->ElementsNum();
params->in_elements_num1_ = in_tensors[1]->ElementsNum();
params->out_elements_num_ = out_tensors[0]->ElementsNum();
ArithmeticOptRun arithmetic_opt_run = NULL;
if (params->in_elements_num0_ == 1 || params->in_elements_num1_ == 1) {
params->broadcasting_ = false;
ChooseOptKernel(kernel_type, &arithmetic_opt_run, params);
} else {
int ret = CalBroadCasting(in_tensors, &outside, &break_pos, params);
if (ret != RET_OK) {
return ret;
}
}
return RunArithmetic(in_tensors, out_tensors, arithmetic_run, arithmetic_opt_run, outside, break_pos, params);
}

+ 0
- 29
mindspore/lite/internal/src/kernel/fp32/arithmetic.h View File

@@ -1,29 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INTERNAL_SRC_RUNTIME_KERNEL_MUL_H_
#define INTERNAL_SRC_RUNTIME_KERNEL_MUL_H_

#include "internal/include/model.h"
#include "internal/include/lite_utils.h"
#include "internal/src/allocator.h"
#include "nnacl/arithmetic_common.h"

int DoArithmeticInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param);

int DoArithmetic(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator);

#endif // INTERNAL_SRC_RUNTIME_KERNEL_MUL_H_

+ 0
- 47
mindspore/lite/internal/src/kernel/fp32/arithmetic_self.cc View File

@@ -1,47 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "internal/src/kernel/fp32/arithmetic_self.h"
#include "internal/src/kernel/common/common_infershape.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "internal/src/lite_log.h"
#include "nnacl/fp32/arithmetic_self_fp32.h"

int DoArithmeticSelfInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
OpParameter *param) {
return DoCommonInferShape(in_tensors, out_tensors);
}

int DoArithmeticSelf(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
size_t data_size = in_tensors[0]->ElementsNum();
OpParameter *param = node->primitive_;
int ret;
if (param->type_ == KernelType::KernelType_Log) {
ret = ElementLog((float *)in_tensors[0]->data_, (float *)out_tensors[0]->data_, data_size);
} else if (param->type_ == KernelType::KernelType_Neg) {
ret = ElementNegative((float *)in_tensors[0]->data_, (float *)out_tensors[0]->data_, data_size);
} else {
LITE_ERROR_LOG("Unsupport kernel type: %d", param->type_);
return RET_PARAM_INVALID;
}
if (ret != NNACL_OK) {
LITE_ERROR_LOG("do arithmetic %d fail!ret: %d", param->type_, ret);
return RET_ERROR;
}
return RET_OK;
}

+ 0
- 28
mindspore/lite/internal/src/kernel/fp32/arithmetic_self.h View File

@@ -1,28 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ARITHMETIC_SELF_H_
#define MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ARITHMETIC_SELF_H_

#include "internal/include/model.h"
#include "internal/src/allocator.h"

int DoArithmeticSelfInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
OpParameter *param);
int DoArithmeticSelf(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator);

#endif // MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ARITHMETIC_SELF_H_

+ 0
- 71
mindspore/lite/internal/src/kernel/fp32/bias_add.cc View File

@@ -1,71 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "internal/src/kernel/fp32/bias_add.h"
#include "internal/src/kernel/common/common_infershape.h"
#include "internal/include/model.h"
#include "internal/include/ms_tensor.h"
#include "internal/include/lite_utils.h"
#include "internal/src/lite_log.h"
#include "internal/include/errorcode.h"
#include "nnacl/arithmetic_common.h"
#include "nnacl/fp32/arithmetic_fp32.h"

int DoBiasAddInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param) {
return DoCommonInferShape(in_tensors, out_tensors);
}

int DoBiasAdd(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
if (in_tensors.size() != 2 || in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
LITE_LOG_ERROR("input tensors num not correct or input data is NULL!");
return RET_INPUT_TENSOR_ERROR;
}
if (out_tensors.size() != 1 || out_tensors[0]->data_ == NULL) {
LITE_LOG_ERROR("output tensors num not correct or output data is NULL!");
return RET_ERROR;
}
if (allocator == NULL) {
LITE_LOG_ERROR("allocator is NULL!");
return RET_ERROR;
}
ArithmeticParameter *params = reinterpret_cast<ArithmeticParameter *>(node->primitive_);

ShapeVector dims = in_tensors[0]->shape_;
params->ndim_ = dims.size();
for (size_t i = 0; i < params->ndim_; i++) {
params->in_shape0_[i] = dims[i];
params->in_shape1_[i] = 1;
params->out_shape_[i] = dims[i];
}
params->in_shape1_[params->ndim_ - 1] = dims[params->ndim_ - 1];

float *in = reinterpret_cast<float *>(in_tensors[0]->data_);
float *bias = reinterpret_cast<float *>(in_tensors[1]->data_);
float *out = reinterpret_cast<float *>(out_tensors[0]->data_);
size_t data_size = in_tensors[0]->ElementsNum();
float *tile_in = reinterpret_cast<float *>(allocator->Malloc(data_size * sizeof(float)));
float *tile_bias = reinterpret_cast<float *>(allocator->Malloc(data_size * sizeof(float)));
if (tile_in == NULL || tile_bias == NULL) {
LITE_LOG_ERROR("Memory allocation failed!");
allocator->Free(tile_in);
allocator->Free(tile_bias);
return RET_ERROR;
}
BroadcastAdd(in, bias, tile_in, tile_bias, out, data_size, params);
allocator->Free(tile_in);
allocator->Free(tile_bias);
return RET_OK;
}

+ 0
- 28
mindspore/lite/internal/src/kernel/fp32/bias_add.h View File

@@ -1,28 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INTERNAL_SRC_RUNTIME_KERNEL_BIAS_H_
#define INTERNAL_SRC_RUNTIME_KERNEL_BIAS_H_

#include "internal/include/model.h"
#include "internal/include/lite_utils.h"
#include "internal/src/allocator.h"

int DoBiasAddInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param);

int DoBiasAdd(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator);

#endif // INTERNAL_SRC_RUNTIME_KERNEL_BIAS_H_

+ 0
- 176
mindspore/lite/internal/src/kernel/fp32/matmul.cc View File

@@ -1,176 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "internal/src/kernel/fp32/matmul.h"
#include "nnacl/fp32/matmul_fp32.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "internal/src/lite_log.h"

typedef struct MatMulCPUKernelData {
float *a_c12_ptr_;
float *b_r8_ptr_;
float *bias_ptr_;
} MatMulCPUKernelData;

void MatMulInitMatrixA(float *src_ptr, float *dst_ptr, MatMulParameter *params) {
for (int i = 0; i < params->batch; i++) {
float *src = src_ptr + i * params->deep_ * params->row_;
float *dst = dst_ptr + i * params->deep_ * params->row_12_;
if (params->a_transpose_) {
RowMajor2Row12Major(src, dst, params->deep_, params->row_);
} else {
RowMajor2Col12Major(src, dst, params->row_, params->deep_);
}
}
}

void MatMulInitMatrixB(float *src_ptr, float *dst_ptr, MatMulParameter *params) {
for (int i = 0; i < params->batch; i++) {
float *src = src_ptr + i * params->deep_ * params->col_;
float *dst = dst_ptr + i * params->deep_ * params->col_8_;
if (params->b_transpose_) {
RowMajor2Col8Major(src, dst, params->col_, params->deep_);
} else {
RowMajor2Row8Major(src, dst, params->deep_, params->col_);
}
}
}

void FreeMatMulKernelData(MatMulCPUKernelData *kernel_data, mindspore::lite::Allocator *allocator) {
if (kernel_data == NULL) {
return;
}
if (kernel_data->a_c12_ptr_ != NULL) {
allocator->Free(kernel_data->a_c12_ptr_);
kernel_data->a_c12_ptr_ = NULL;
}

if (kernel_data->b_r8_ptr_ != NULL) {
allocator->Free(kernel_data->b_r8_ptr_);
kernel_data->b_r8_ptr_ = NULL;
}

if (kernel_data->bias_ptr_ != NULL) {
allocator->Free(kernel_data->bias_ptr_);
kernel_data->bias_ptr_ = NULL;
}
free(kernel_data);
}

int DoMatMulInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param) {
TensorPtr input0 = in_tensors.at(0);
MS_ASSERT(input0 != nullptr);
TensorPtr input1 = in_tensors.at(1);
MS_ASSERT(input1 != nullptr);
TensorPtr output = out_tensors.at(0);
MS_ASSERT(output != nullptr);

int in_datatype[2] = {input0->data_type_, input1->data_type_};
int in_format[2] = {static_cast<int>(input0->format_), static_cast<int>(input1->format_)};
size_t dim_size[2] = {input0->shape_.size(), input1->shape_.size()};
int *in_shape[2] = {input0->shape_.data(), input1->shape_.data()};
int out_format;
int out_datatype;
output->shape_.resize(input0->shape_.size());
int ret = MatMulInferShape(in_shape, 2, dim_size, output->shape_.data(), in_format, &out_format, in_datatype,
&out_datatype, param);
if (ret != NNACL_OK) {
LITE_ERROR_LOG("matmul infershape fail!ret: %d", ret);
return RET_ERROR;
}
output->format_ = static_cast<Format>(out_format);
output->data_type_ = static_cast<TypeId>(out_datatype);
return RET_OK;
}

int DoMatMul(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
if (in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
LITE_LOG_ERROR("input data is NULL!");
return RET_PARAM_INVALID;
}
if (allocator == NULL) {
LITE_LOG_ERROR("input allocator is NULL!");
return RET_PARAM_INVALID;
}
int batch = 1;
ShapeVector a_shape = in_tensors[0]->shape_;
ShapeVector c_shape = out_tensors[0]->shape_;
if (in_tensors.size() == 3) {
ShapeVector bias_shape = in_tensors[2]->shape_;
if (bias_shape[bias_shape.size() - 1] != c_shape[c_shape.size() - 1]) {
LITE_ERROR_LOG("The bias' dimension %d is not equal with column %d", bias_shape[bias_shape.size() - 1],
c_shape[c_shape.size() - 1]);
return RET_INPUT_TENSOR_ERROR;
}
}
for (size_t i = 0; i < a_shape.size() - 2; ++i) {
batch *= a_shape[i];
}

MatMulParameter *params = (MatMulParameter *)node->primitive_;
params->batch = batch;
params->row_ = c_shape[c_shape.size() - 2];
params->col_ = c_shape[c_shape.size() - 1];
params->deep_ = params->a_transpose_ ? a_shape[a_shape.size() - 2] : a_shape[a_shape.size() - 1];
params->row_12_ = UP_ROUND(params->row_, C12NUM);
params->col_8_ = UP_ROUND(params->col_, 8);

MatMulCPUKernelData *kernel_data = (MatMulCPUKernelData *)malloc(sizeof(MatMulCPUKernelData));
if (kernel_data == NULL) {
LITE_LOG_ERROR("Malloc MatMulCPUKernelData failed");
return RET_MEMORY_FAILED;
}
kernel_data->a_c12_ptr_ =
reinterpret_cast<float *>(allocator->Malloc(params->batch * params->row_12_ * params->deep_ * sizeof(float)));
if (kernel_data->a_c12_ptr_ == NULL) {
FreeMatMulKernelData(kernel_data, allocator);
return RET_MEMORY_FAILED;
}
memset(kernel_data->a_c12_ptr_, 0, params->row_12_ * params->deep_ * sizeof(float));

kernel_data->b_r8_ptr_ =
reinterpret_cast<float *>(allocator->Malloc(params->batch * params->col_8_ * params->deep_ * sizeof(float)));
if (kernel_data->b_r8_ptr_ == NULL) {
FreeMatMulKernelData(kernel_data, allocator);
return RET_MEMORY_FAILED;
}
memset(kernel_data->b_r8_ptr_, 0, params->col_8_ * params->deep_ * sizeof(float));

MatMulInitMatrixA((float *)in_tensors[0]->data_, kernel_data->a_c12_ptr_, params);
MatMulInitMatrixB((float *)in_tensors[1]->data_, kernel_data->b_r8_ptr_, params);
kernel_data->bias_ptr_ = (float *)(allocator->Malloc(params->col_8_ * sizeof(float)));
if (kernel_data->bias_ptr_ == NULL) {
FreeMatMulKernelData(kernel_data, allocator);
return RET_MEMORY_FAILED;
}
memset(kernel_data->bias_ptr_, 0, params->col_8_ * sizeof(float));

if (in_tensors.size() == 3) {
memcpy(kernel_data->bias_ptr_, in_tensors[2]->data_, params->col_ * sizeof(float));
}
auto c_src = (float *)out_tensors[0]->data_;
for (int i = 0; i < params->batch; ++i) {
float *a_ptr = kernel_data->a_c12_ptr_ + i * params->row_12_ * params->deep_;
float *b_ptr = kernel_data->b_r8_ptr_ + i * params->deep_ * params->col_8_;
float *c_ptr = c_src + i * params->row_ * params->col_;
MatMulOpt(a_ptr, b_ptr, c_ptr, kernel_data->bias_ptr_, ActType_No, params->deep_, params->row_, params->col_,
params->col_, OutType_Nhwc);
}

return RET_OK;
}

+ 0
- 27
mindspore/lite/internal/src/kernel/fp32/matmul.h View File

@@ -1,27 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_MATMUL_H_
#define MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_MATMUL_H_

#include "internal/include/model.h"
#include "internal/src/allocator.h"

int DoMatMulInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param);
int DoMatMul(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator);

#endif // MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_MATMUL_H_

+ 0
- 162
mindspore/lite/internal/src/kernel/fp32/reduce.cc View File

@@ -1,162 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "internal/src/kernel/fp32/reduce.h"
#include "internal/include/model.h"
#include "internal/include/lite_utils.h"
#include "internal/src/lite_log.h"
#include "internal/include/errorcode.h"
#include "nnacl/reduce_parameter.h"
#include "nnacl/fp32/reduce_fp32.h"
#include "nnacl/errorcode.h"

typedef int (*Reducer)(const int outer_size, const int inner_size, const int axis_size, const float *src_data,
float *dst_data, const int tid, const int thread_num);

int MallocTmpBuffer(float *data_buffers[], const ShapeVector &shape, const int *axes, const int num_axes,
mindspore::lite::Allocator *allocator) {
ShapeVector input_shape = shape;
const int rank = input_shape.size();
for (auto i = 0; i < num_axes - 1; i++) {
int axis = axes[i];
size_t size = 1;
for (int j = 0; j < rank; j++) {
if (axis != j) {
size *= input_shape[j];
}
}
float *buffer = reinterpret_cast<float *>(allocator->Malloc(size * sizeof(float)));
if (buffer == nullptr) {
LITE_LOG_ERROR("Memory allocation failed!");
return RET_ERROR;
}
data_buffers[i] = buffer;
input_shape[axis] = 1;
}
return RET_OK;
}

void FreeTmpBuffer(float *data_buffers[], int size, mindspore::lite::Allocator *allocator) {
if (data_buffers == nullptr) {
return;
}
for (int i = 0; i < size; ++i) {
allocator->Free(data_buffers[i]);
data_buffers[i] = nullptr;
}
}

int RunReduce(Reducer reducer, float *data_buffers[], float *in_data, float *out_data, ReduceParameter *params,
ShapeVector shape) {
int rank = shape.size();
float *dst_data = NULL;
float *src_data = in_data;
ShapeVector tmp_shape = shape;
for (int i = 0; i < params->num_axes_; ++i) {
if (i != params->num_axes_ - 1) {
dst_data = data_buffers[i];
} else {
dst_data = out_data;
}
int axis = params->axes_[i];
int outer_size = 1;
for (int j = 0; j < axis; j++) {
outer_size *= tmp_shape[j];
}
int inner_size = 1;
for (int k = axis + 1; k < rank; k++) {
inner_size *= tmp_shape[k];
}
int axis_size = tmp_shape[axis];
int error_code = reducer(outer_size, inner_size, axis_size, src_data, dst_data, 0, 1);
if (error_code != RET_OK) {
LITE_LOG_ERROR("Reduce run error!");
return RET_ERROR;
}
tmp_shape[axis] = 1;
src_data = dst_data;
}
return RET_OK;
}

int DoReduceInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param) {
if (in_tensors.size() != 1 || in_tensors[0]->data_ == NULL) {
LITE_LOG_ERROR("input tensors num not correct or input data is NULL!");
return RET_INPUT_TENSOR_ERROR;
}
if (out_tensors.size() != 1) {
LITE_LOG_ERROR("output tensors num not correct!");
return RET_ERROR;
}

int in_datatype[1] = {in_tensors[0]->data_type_};
int in_format[1] = {static_cast<int>(in_tensors[0]->format_)};
size_t dim_size[1] = {in_tensors[0]->shape_.size()};
int *in_shape[1] = {in_tensors[0]->shape_.data()};
int out_format;
int out_datatype;
int ret = ReduceInferShape(in_shape, dim_size, out_tensors[0]->shape_.data(), in_format, &out_format, in_datatype,
&out_datatype, param);
if (ret != NNACL_OK) {
LITE_ERROR_LOG("arithmetic infershape failed! ret: %d", ret);
return RET_ERROR;
}
out_tensors[0]->data_type_ = in_tensors[0]->data_type_;
out_tensors[0]->format_ = in_tensors[0]->format_;
return RET_OK;
}

int DoReduce(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
if (in_tensors.size() != 1 || in_tensors[0]->data_ == NULL) {
LITE_LOG_ERROR("input tensors num not correct or input data is NULL!");
return RET_INPUT_TENSOR_ERROR;
}
if (out_tensors.size() != 1 || out_tensors[0]->data_ == NULL) {
LITE_LOG_ERROR("output tensors num not correct or output data is NULL!");
return RET_ERROR;
}
if (allocator == NULL) {
LITE_LOG_ERROR("allocator is NULL!");
return RET_ERROR;
}

ReduceParameter *params = reinterpret_cast<ReduceParameter *>(node->primitive_);
Reducer reducer = NULL;
if (params->mode_ == ReduceMode::ReduceMode_ReduceSum) {
reducer = ReduceSum;
} else if (params->mode_ == ReduceMode::ReduceMode_ReduceMean) {
reducer = ReduceMean;
}

int buf_num = params->num_axes_ - 1;
float *data_buffers[buf_num];
int status = MallocTmpBuffer(data_buffers, in_tensors[0]->shape_, params->axes_, params->num_axes_, allocator);
if (status != RET_OK) {
FreeTmpBuffer(data_buffers, buf_num, allocator);
return status;
}

status = RunReduce(reducer, data_buffers, reinterpret_cast<float *>(in_tensors[0]->data_),
reinterpret_cast<float *>(out_tensors[0]->data_), params, in_tensors[0]->shape_);

FreeTmpBuffer(data_buffers, buf_num, allocator);

if (status != RET_OK) {
return RET_ERROR;
}
return RET_OK;
}

+ 0
- 29
mindspore/lite/internal/src/kernel/fp32/reduce.h View File

@@ -1,29 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INTERNAL_SRC_KERNEL_FP32_REDUCE_COMMON_H_
#define INTERNAL_SRC_KERNEL_FP32_REDUCE_COMMON_H_

#include "internal/include/model.h"
#include "internal/include/ms_tensor.h"
#include "internal/include/lite_utils.h"
#include "internal/src/allocator.h"

int DoReduceInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param);

int DoReduce(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator);

#endif // INTERNAL_SRC_KERNEL_FP32_REDUCE_COMMON_H_

+ 0
- 56
mindspore/lite/internal/src/kernel/fp32_grad/activation_grad.cc View File

@@ -1,56 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "internal/src/kernel/fp32_grad/activation_grad.h"
#include "internal/src/kernel/common/common_infershape.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "nnacl/fp32_grad/activation_grad.h"
#include "internal/src/lite_log.h"
#include "nnacl/errorcode.h"

int DoActivationGradInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
OpParameter *param) {
return DoCommonInferShape(in_tensors, out_tensors);
}

int DoActivationGrad(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
ActivationGradParameter *param = (ActivationGradParameter *)node->primitive_;
int ret = RET_OK;
size_t length = in_tensors[0]->ElementsNum();
float *dy_data = (float *)in_tensors[0]->data_;
float *x_data = (float *)in_tensors[1]->data_;
float *dx_data = (float *)(float *)out_tensors[0]->data_;
if (param->type_ == ActivationType::RELU) {
ret = ReluGrad(dy_data, x_data, length, dx_data);
} else if (param->type_ == ActivationType::SIGMOID) {
ret = SigmoidGrad(dy_data, x_data, length, dx_data);
} else if (param->type_ == ActivationType::RELU6) {
ret = Relu6Grad(dy_data, x_data, length, dx_data);
} else if (param->type_ == ActivationType::LEAKY_RELU) {
float alpha = param->alpha_;
ret = LReluGrad(dy_data, x_data, length, dx_data, alpha);
} else {
LITE_ERROR_LOG("Unsupport activation type %d", param->type_);
return RET_PARAM_INVALID;
}
if (ret != NNACL_OK) {
LITE_ERROR_LOG("do activation(%d) fail!ret: %d", param->type_, ret);
return RET_ERROR;
}
return RET_OK;
}

+ 0
- 28
mindspore/lite/internal/src/kernel/fp32_grad/activation_grad.h View File

@@ -1,28 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_GRAD_ACTIVATION_GRAD_H_
#define MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_GRAD_ACTIVATION_GRAD_H_

#include "internal/include/model.h"
#include "internal/src/allocator.h"

int DoActivationGradInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
OpParameter *param);
int DoActivationGrad(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator);

#endif // MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_GRAD_ACTIVATION_GRAD_H_

+ 0
- 51
mindspore/lite/internal/src/kernel/fp32_grad/arithmetic_self_grad.cc View File

@@ -1,51 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "internal/src/kernel/fp32_grad/arithmetic_self_grad.h"
#include "internal/src/kernel/common/common_infershape.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "internal/src/lite_log.h"
#include "nnacl/fp32/arithmetic_self_fp32.h"
#include "nnacl/fp32/arithmetic_fp32.h"

int DoArithmeticSelfGradInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
OpParameter *param) {
return DoCommonInferShape(in_tensors, out_tensors);
}

int DoArithmeticSelfGrad(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
size_t data_size = in_tensors[0]->ElementsNum();
OpParameter *param = node->primitive_;
float *dy_data = reinterpret_cast<float *>(in_tensors[0]->data_);
float *x_data = reinterpret_cast<float *>(in_tensors[1]->data_);
float *dx_data = reinterpret_cast<float *>(out_tensors[0]->data_);
int ret;
if (param->type_ == KernelType::KernelType_LogGrad) {
ret = ElementDiv(dy_data, x_data, dx_data, data_size);
} else if (param->type_ == KernelType::KernelType_NegGrad) {
ret = ElementNegative(dy_data, dx_data, data_size);
} else {
LITE_ERROR_LOG("Unsupport kernel type: %d", param->type_);
return RET_PARAM_INVALID;
}
if (ret != NNACL_OK) {
LITE_ERROR_LOG("do arithmetic %d fail!ret: %d", param->type_, ret);
return RET_ERROR;
}
return RET_OK;
}

+ 0
- 28
mindspore/lite/internal/src/kernel/fp32_grad/arithmetic_self_grad.h View File

@@ -1,28 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_GRAD_ARITHMETIC_SELF_GRAD_H_
#define MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_GRAD_ARITHMETIC_SELF_GRAD_H_

#include "internal/include/model.h"
#include "internal/src/allocator.h"

int DoArithmeticSelfGradInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
OpParameter *param);
int DoArithmeticSelfGrad(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator);

#endif // MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_GRAD_ARITHMETIC_SELF_GRAD_H_

+ 0
- 53
mindspore/lite/internal/src/lite_log.h View File

@@ -1,53 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_
#define MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_

#include <stdlib.h>
#include <stdio.h>
#ifndef Release
#include <assert.h>
#endif

#ifdef Debug
#define LITE_DEBUG_LOG(format, ...) \
printf("[DEBUG] [%s %s] [%s] [%d] " format "\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define LITE_INFO_LOG(format, ...) \
printf("[INFO] [%s %s] [%s] [%d] " format "\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define LITE_LOG_INFO(...) printf("[INFO] [%s %s] [%s] [%d] %s\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define LITE_WARNING_LOG(format, ...) \
printf("[WARNING] [%s %s] [%s] [%d] " format "\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define LITE_ERROR_LOG(format, ...) \
printf("[ERROR] [%s %s] [%s] [%d] " format "\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define LITE_LOG_ERROR(...) \
printf("[ERROR] [%s %s] [%s] [%d] %s\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define MS_ASSERT(f) assert(f)
#define MS_C_EXCEPTION(...) \
printf("[EXCEPTION] [%s %s] [%s] [%d] %s\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__); \
exit(1)
#else
#define LITE_DEBUG_LOG(...)
#define LITE_INFO_LOG(...)
#define LITE_LOG_INFO(...)
#define LITE_WARNING_LOG(...)
#define LITE_ERROR_LOG(...)
#define LITE_LOG_ERROR(...)
#define MS_ASSERT(f) ((void)0)
#define MS_C_EXCEPTION(...) exit(1)
#endif

#endif // MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_

+ 0
- 218
mindspore/lite/internal/src/lite_session.cc View File

@@ -1,218 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "internal/include/lite_session.h"
#include "internal/include/model.h"
#include "internal/include/ms_tensor.h"
#include "internal/src/allocator.h"
#include "internal/include/errorcode.h"
#include "internal/src/lite_log.h"
#include "internal/src/kernel/fp32/activation.h"
#include "internal/src/kernel/fp32/arithmetic_self.h"
#include "internal/src/kernel/fp32/matmul.h"
#include "internal/src/kernel/fp32/arithmetic.h"
#include "internal/src/kernel/fp32/bias_add.h"
#ifdef SUPPORT_TRAIN
#include "internal/src/kernel/fp32_grad/arithmetic_self_grad.h"
#include "internal/src/kernel/fp32_grad/activation_grad.h"
#endif

static Context *g_ctx;
static Model *g_model;
static LiteSession g_session;
static mindspore::lite::Allocator g_allocator;
static bool g_infershape_interrupt = false;
static bool g_first_load = true;
typedef int (*InferShape)(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param);
typedef int (*RunKernel)(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator);
static InferShape g_infershape_funcs[KernelType::KernelType_END];
static RunKernel g_runkernel_funcs[KernelType::KernelType_END];

static int ModelInferShape() {
NodePtrVector nodes = g_model->nodes_;
size_t nodes_size = nodes.size();
for (size_t i = 0; i < nodes_size; ++i) {
auto node = nodes[i];
if (node->primitive_ == NULL) {
LITE_LOG_ERROR("node's primitive is NULL!");
return RET_ERROR;
}
TensorPtrVector in_tensors;
for (size_t j = 0; j < node->input_indices_.size(); ++j) {
in_tensors.push_back(g_model->all_tensors_[node->input_indices_[j]]);
}
TensorPtrVector out_tensors;
for (size_t j = 0; j < node->output_indices_.size(); ++j) {
out_tensors.push_back(g_model->all_tensors_[node->output_indices_[j]]);
}
int type = node->primitive_->type_;
InferShape infershape = g_infershape_funcs[type];
if (infershape == NULL) {
LITE_ERROR_LOG("Unsupport kernel type: %d", type);
return RET_PARAM_INVALID;
}
int ret = (*infershape)(in_tensors, out_tensors, node->primitive_);
if (ret == RET_INFER_INVALID) {
g_infershape_interrupt = true;
LITE_INFO_LOG("%s inferShape shouldn't be done before runtime, inferShape interrupt!", node->name_.c_str());
}
if (ret != RET_OK) {
LITE_ERROR_LOG("Infer shape fail!ret: %d", ret);
return ret;
}
}
return RET_OK;
}

static void InitFuncs() {
if (g_first_load) {
g_infershape_funcs[KernelType::KernelType_MatMul] = DoMatMulInferShape;
g_infershape_funcs[KernelType::KernelType_Activation] = DoActivationInferShape;
g_infershape_funcs[KernelType::KernelType_Log] = DoArithmeticSelfInferShape;
g_infershape_funcs[KernelType::KernelType_Neg] = DoArithmeticSelfInferShape;
g_infershape_funcs[KernelType::KernelType_Mul] = DoArithmeticInferShape;
g_infershape_funcs[KernelType::KernelType_BiasAdd] = DoBiasAddInferShape;

g_runkernel_funcs[KernelType::KernelType_MatMul] = DoMatMul;
g_runkernel_funcs[KernelType::KernelType_Activation] = DoActivation;
g_runkernel_funcs[KernelType::KernelType_Log] = DoArithmeticSelf;
g_runkernel_funcs[KernelType::KernelType_Neg] = DoArithmeticSelf;
g_runkernel_funcs[KernelType::KernelType_Mul] = DoArithmetic;
g_runkernel_funcs[KernelType::KernelType_BiasAdd] = DoBiasAdd;
#ifdef SUPPORT_TRAIN
g_infershape_funcs[KernelType::KernelType_ActivationGrad] = DoActivationGradInferShape;
g_infershape_funcs[KernelType::KernelType_NegGrad] = DoArithmeticSelfGradInferShape;
g_infershape_funcs[KernelType::KernelType_LogGrad] = DoArithmeticSelfGradInferShape;

g_runkernel_funcs[KernelType::KernelType_NegGrad] = DoArithmeticSelfGrad;
g_runkernel_funcs[KernelType::KernelType_ActivationGrad] = DoActivationGrad;
g_runkernel_funcs[KernelType::KernelType_LogGrad] = DoArithmeticSelfGrad;
#endif
g_first_load = false;
}
}

LiteSession *LiteSession::CreateSession(Context *context) {
g_ctx = context;
return &g_session;
}

int LiteSession::CompileGraph(Model *model) {
InitFuncs();
g_model = model;
for (auto in : g_model->input_indices_) {
if (in >= g_model->all_tensors_.size() || in < 0) {
LITE_LOG_ERROR("Invalid input indices!");
return RET_PARAM_INVALID;
}
g_model->all_tensors_[in]->data_ = g_allocator.Malloc(g_model->all_tensors_[in]->Size());
}
g_infershape_interrupt = false;
int ret = ModelInferShape();
if (ret != RET_OK && ret != RET_INFER_INVALID) {
return ret;
}
return RET_OK;
}

TensorPtrVector LiteSession::GetInputs() const {
TensorPtrVector in(g_model->input_indices_.size());
for (size_t i = 0; i < g_model->input_indices_.size(); ++i) {
auto index = g_model->input_indices_[i];
if (index < 0 || index >= g_model->all_tensors_.size()) {
LITE_ERROR_LOG("Invalid input index: %u", index);
return TensorPtrVector();
}
in.at(i) = g_model->all_tensors_[index];
}
return in;
}

TensorPtrVector LiteSession::GetInputsByName(const String &node_name) const { return TensorPtrVector(); }

TensorPtrVector LiteSession::GetOutputsByNodeName(const String &node_name) const { return TensorPtrVector(); }

TensorPtrVector LiteSession::GetOutputs() const {
TensorPtrVector out(g_model->output_indices_.size());
for (size_t i = 0; i < g_model->output_indices_.size(); ++i) {
auto index = g_model->output_indices_[i];
if (index < 0 || index >= g_model->all_tensors_.size()) {
LITE_ERROR_LOG("Invalid output index: %u", index);
return TensorPtrVector();
}
out.at(i) = g_model->all_tensors_[index];
}
return out;
}

int LiteSession::RunGraph() {
NodePtrVector nodes = g_model->nodes_;
size_t nodes_size = nodes.size();
for (size_t i = 0; i < nodes_size; ++i) {
auto node = nodes[i];
if (node->primitive_ == nullptr) {
LITE_LOG_ERROR("node's primitive is NULL!");
return RET_ERROR;
}
TensorPtrVector in_tensors;
for (size_t j = 0; j < node->input_indices_.size(); ++j) {
in_tensors.push_back(g_model->all_tensors_[node->input_indices_[j]]);
}
TensorPtrVector out_tensors;
for (size_t j = 0; j < node->output_indices_.size(); ++j) {
out_tensors.push_back(g_model->all_tensors_[node->output_indices_[j]]);
}
int type = node->primitive_->type_;
if (g_infershape_interrupt) {
InferShape infershape = g_infershape_funcs[type];
if (infershape == NULL) {
LITE_ERROR_LOG("Unsupport kernel type: %d", type);
return RET_PARAM_INVALID;
}
int ret = (*infershape)(in_tensors, out_tensors, node->primitive_);
if (ret != RET_OK) {
LITE_ERROR_LOG("InferShape fail!ret: %d", ret);
return ret;
}
}
for (size_t j = 0; j < out_tensors.size(); ++j) {
out_tensors[j]->data_ = g_allocator.Malloc(out_tensors[j]->Size());
if (out_tensors[j]->data_ == NULL) {
LITE_LOG_ERROR("Malloc data for out tensor fail!");
return RET_NULL_PTR;
}
}
RunKernel run_kernel = g_runkernel_funcs[type];
if (run_kernel == NULL) {
LITE_ERROR_LOG("Unsupport kernel type: %d", type);
return RET_PARAM_INVALID;
}

int ret = (*run_kernel)(in_tensors, out_tensors, node, &g_allocator);
if (ret != RET_OK) {
LITE_ERROR_LOG("run kernel fail!ret: %d", ret);
return ret;
}
}
g_infershape_interrupt = false;
return RET_OK;
}

StringVector LiteSession::GetOutputTensorNames() const { return StringVector(); }

MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const { return NULL; }

int LiteSession::Resize(const TensorPtrVector &inputs, const Int32VectorVector &dims) { return 0; }

+ 0
- 240
mindspore/lite/internal/src/ms_tensor.cc View File

@@ -1,240 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "internal/include/string.h"
#include "internal/include/vector.h"
#include "internal/include/ms_tensor.h"
#include "internal/src/lite_log.h"

MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape) {
MSTensor *tensor = new MSTensor();
if (tensor == NULL) {
return NULL;
}
tensor->shape_ = shape;
tensor->data_type_ = data_type;
return tensor;
}

void DestroyTensor(MSTensor *ptr) {
if (ptr == nullptr) {
return;
}
delete ptr;
}

int MSTensor::ElementsNum() const {
int result = 1;
for (size_t i = 0; i < shape_.size(); ++i) {
result *= shape_.at(i);
}
return result;
}

size_t MSTensor::Size() const {
size_t size = 0;
switch (this->data_type_) {
case kNumberTypeFloat64:
size = sizeof(double);
break;
case kNumberTypeFloat:
case kNumberTypeFloat32:
size = sizeof(float);
break;
case kNumberTypeInt8:
size = sizeof(int8_t);
break;
case kNumberTypeUInt8:
size = sizeof(uint8_t);
break;
case kNumberTypeFloat16:
size = sizeof(int16_t);
break;
case kNumberTypeInt16:
size = sizeof(int16_t);
break;
case kNumberTypeInt32:
size = sizeof(int32_t);
break;
case kNumberTypeInt64:
size = sizeof(int64_t);
break;
case kNumberTypeUInt16:
size = sizeof(uint16_t);
break;
case kNumberTypeUInt32:
size = sizeof(uint32_t);
break;
case kNumberTypeUInt64:
size = sizeof(uint64_t);
break;
case kNumberTypeBool:
size = sizeof(bool);
break;
default:
LITE_ERROR_LOG("Not support the type: %d", this->data_type_);
return 0;
}
size *= (format_ == Format::Format_NC4HW4 || format_ == Format::Format_NHWC4) ? ElementsC4Num() : ElementsNum();

return size;
}
int32_t MSTensor::Batch() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
LITE_ERROR_LOG("Unsupported tensor shape: %zu", this->shape_.size());
return -1;
}
switch (this->format_) {
case Format::Format_NHWC:
case Format::Format_NHWC4:
case Format::Format_NCHW:
case Format::Format_NC4HW4:
case Format::Format_KCHW:
case Format::Format_KHWC:
case Format::Format_NC:
case Format::Format_NC4:
return this->shape_[0];
case Format::Format_HWCK:
case Format::Format_CHWK:
return this->shape_[3];
case Format::Format_HWKC:
return this->shape_[2];
case Format::Format_CKHW:
return this->shape_[1];
default:
LITE_ERROR_LOG("Unsupported format: %d", this->format_);
return -1;
}
}

int32_t MSTensor::Channel() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
LITE_ERROR_LOG("Unsupported tensor shape: %zu", this->shape_.size());
return -1;
}
switch (this->format_) {
case Format::Format_NCHW:
case Format::Format_KCHW:
case Format::Format_NC:
case Format::Format_NC4:
return this->shape_[1];
case Format::Format_HWCK:
return this->shape_[2];
case Format::Format_HWKC:
case Format::Format_NHWC:
case Format::Format_NHWC4:
case Format::Format_NC4HW4:
case Format::Format_KHWC:
return this->shape_[3];
case Format::Format_CKHW:
case Format::Format_CHWK:
return this->shape_[0];
default:
return -1;
}
}

int32_t MSTensor::Height() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
LITE_ERROR_LOG("Unsupported tensor shape: %zu", this->shape_.size());
return -1;
}
switch (this->format_) {
case Format::Format_NCHW:
case Format::Format_KCHW:
case Format::Format_CKHW:
return this->shape_[2];
case Format::Format_NHWC:
case Format::Format_NHWC4:
case Format::Format_NC4HW4:
case Format::Format_KHWC:
case Format::Format_CHWK:
return this->shape_[1];
case Format::Format_HWCK:
case Format::Format_HWKC:
case Format::Format_HW:
case Format::Format_HW4:
return this->shape_[0];
default:
LITE_ERROR_LOG("Unsupported format: %d", this->format_);
return -1;
}
}

int32_t MSTensor::Width() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
LITE_ERROR_LOG("Unsupported tensor shape: %zu", this->shape_.size());
return -1;
}
switch (this->format_) {
case Format::Format_NCHW:
case Format::Format_KCHW:
case Format::Format_CKHW:
return this->shape_[3];
case Format::Format_KHWC:
case Format::Format_NHWC:
case Format::Format_NHWC4:
case Format::Format_NC4HW4:
case Format::Format_CHWK:
return this->shape_[2];
case Format::Format_HWCK:
case Format::Format_HWKC:
case Format::Format_HW:
case Format::Format_HW4:
return this->shape_[1];
default:
return -1;
}
}

int MSTensor::ElementsC4Num() const {
int result = 0;
if (this->shape_.size() == 4) {
result = Batch() * Height() * Width() * ((Channel() + 3) / 4 * 4);
} else if (this->shape_.size() == 2) {
result = this->shape_[0] * ((this->shape_[1] + 3) / 4 * 4);
}
return result;
}

void *MSTensor::operator new(size_t sz) {
void *storage = malloc(sz);
if (storage == nullptr) {
MS_C_EXCEPTION("malloc tensor fail!");
}
return storage;
}

void *MSTensor::operator new[](size_t sz) {
void *storage = malloc(sz);
if (storage == nullptr) {
MS_C_EXCEPTION("malloc tensor array fail!");
}
return storage;
}

void MSTensor::operator delete(void *ptr, size_t sz) {
if (ptr == nullptr) {
return;
}
free(ptr);
}

void MSTensor::operator delete[](void *ptr, size_t sz) {
if (ptr == nullptr) {
return;
}
free(ptr);
}

+ 1
- 1
mindspore/lite/test/CMakeLists.txt View File

@@ -287,7 +287,7 @@ add_executable(lite-test ${TEST_SRC})

target_link_libraries(lite-test dl ${GTEST_LIBRARY})
if (PLATFORM_ARM64)
target_link_libraries(lite-test mslite_internal nnacl_fp16_mid nnacl_optimize_mid)
target_link_libraries(lite-test nnacl_fp16_mid nnacl_optimize_mid)
endif()

if (PLATFORM_ARM)


+ 0
- 68
mindspore/lite/test/ut/internal/CMakeLists.txt View File

@@ -1,68 +0,0 @@
set(TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../..)
set(TEST_DIR ${TOP_DIR}/mindspore/lite/test)
set(LITE_DIR ${TOP_DIR}/mindspore/lite)

include_directories(${TOP_DIR})
include_directories(${TEST_DIR})
add_compile_definitions(ENABLE_NNACL_INFER_SHAPE)

STRING(REPLACE " -fvisibility=hidden " " -fvisibility=default " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
STRING(REPLACE " -fvisibility=hidden " " -fvisibility=default " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")

### cpu kernel
file(GLOB KERNEL_OP_SRC
${LITE_DIR}/internal/src/kernel/*.cc
${LITE_DIR}/internal/src/kernel/common/*.cc
${LITE_DIR}/internal/src/kernel/fp32/*.cc
${LITE_DIR}/internal/src/kernel/fp32_grad/*.cc
${LITE_DIR}/nnacl/*.c
${LITE_DIR}/nnacl/fp32/*.c
${LITE_DIR}/nnacl/fp32_grad/*.c
${LITE_DIR}/nnacl/int8/*.c
${LITE_DIR}/nnacl/quantization/*.c
)

if (PLATFORM_ARM64)
# assembly
file(GLOB TEST_ASSEMBLY_SRC ${LITE_DIR}/nnacl/assembly/arm64/*.s
${LITE_DIR}/nnacl/assembly/arm64/*.S)

set_property(SOURCE ${TEST_ASSEMBLY_SRC} PROPERTY LANGUAGE C)
set(KERNEL_OP_SRC
${KERNEL_OP_SRC}
${TEST_ASSEMBLY_SRC}
)
endif()

### runtime framework
set(TEST_LITE_SRC
${LITE_DIR}/internal/src/common/string.cc
${LITE_DIR}/internal/src/lite_session.cc
${LITE_DIR}/internal/src/allocator.cc
${LITE_DIR}/internal/src/ms_tensor.cc
${LITE_DIR}/internal/src/common/string.cc
${LITE_DIR}/internal/src/common/vector.cc
${TOP_DIR}/mindspore/core/utils/log_adapter.cc
${TOP_DIR}/mindspore/core/gvar/logging_level.cc
)

### test src
file(GLOB_RECURSE TEST_CASE_KERNEL_SRC
${TEST_DIR}/ut/internal/*.cc
)

file(GLOB_RECURSE TEST_CASE_KERNEL_TRAIN_SRC
${TEST_DIR}/ut/src/runtime/kernel/arm/fp32_grad/*.cc
)

set(TEST_SRC
${TEST_LITE_SRC}
${TEST_CASE_KERNEL_SRC}
${KERNEL_OP_SRC}
${TEST_DIR}/common/common_test.cc
${TEST_DIR}/main.cc
)

add_executable(lite-test-internal ${TEST_SRC})

target_link_libraries(lite-test-internal dl ${GTEST_LIBRARY})

+ 0
- 99
mindspore/lite/test/ut/internal/allocator_test.cc View File

@@ -1,99 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "common/common_test.h"
#include "internal/include/model.h"
#include "internal/include/errorcode.h"
#include "nnacl/op_base.h"
#undef private
#define private public
#include "internal/src/allocator.h"
#undef private

namespace mindspore {
class AllocatorTest : public mindspore::CommonTest {
public:
AllocatorTest() {}
};

TEST_F(AllocatorTest, AllocatorTest1) {
lite::DefaultAllocator allocator;
constexpr int data1_size = 10 * sizeof(float);
ASSERT_EQ(allocator.allocated_list_[0], nullptr);
float *data1 = reinterpret_cast<float *>(allocator.Malloc(data1_size));
ASSERT_NE(data1, nullptr);
ASSERT_NE(allocator.allocated_list_[0], nullptr);

ASSERT_EQ(allocator.free_list_[0], nullptr);
allocator.Free(data1);
ASSERT_EQ(allocator.allocated_list_[0], nullptr);
ASSERT_NE(allocator.free_list_[0], nullptr);
}

TEST_F(AllocatorTest, AllocatorTest2) {
lite::DefaultAllocator allocator;
constexpr int data1_size = 10 * sizeof(float);
ASSERT_EQ(allocator.allocated_list_[0], nullptr);
float *data1 = reinterpret_cast<float *>(allocator.Malloc(data1_size));
ASSERT_NE(data1, nullptr);
ASSERT_NE(allocator.allocated_list_[0], nullptr);

constexpr int data2_size = (1024 << lite::kBlockRange);
ASSERT_EQ(allocator.large_mem_list_, nullptr);
float *data2 = reinterpret_cast<float *>(allocator.Malloc(data2_size));
ASSERT_NE(data2, nullptr);
ASSERT_NE(allocator.large_mem_list_, nullptr);

constexpr int data3_size = (1024 << 3);
ASSERT_EQ(allocator.allocated_list_[3], nullptr);
float *data3 = reinterpret_cast<float *>(allocator.Malloc(data3_size));
ASSERT_NE(data3, nullptr);
ASSERT_NE(allocator.allocated_list_[3], nullptr);

int expect_total_size = data1_size + data2_size + data3_size;
size_t total_size = allocator.GetTotalSize();
ASSERT_EQ(total_size, expect_total_size);

allocator.Clear();
total_size = allocator.GetTotalSize();
ASSERT_EQ(total_size, 0);
}

TEST_F(AllocatorTest, AllocatorTest3) {
lite::DefaultAllocator allocator;
constexpr int data1_size = 10 * sizeof(float);
ASSERT_EQ(allocator.allocated_list_[0], nullptr);
float *data1 = reinterpret_cast<float *>(allocator.Malloc(data1_size));
ASSERT_NE(data1, nullptr);
ASSERT_NE(allocator.allocated_list_[0], nullptr);

constexpr int data2_size = 11 * sizeof(float);
float *data2 = reinterpret_cast<float *>(allocator.Malloc(data2_size));
ASSERT_NE(data2, nullptr);

constexpr int data3_size = 12 * sizeof(float);
float *data3 = reinterpret_cast<float *>(allocator.Malloc(data3_size));
ASSERT_NE(data3, nullptr);

int expect_total_size = data1_size + data2_size + data3_size;
size_t total_size = allocator.GetTotalSize();
ASSERT_EQ(total_size, expect_total_size);

allocator.Free(data2);
total_size = allocator.GetTotalSize();
ASSERT_EQ(total_size, expect_total_size);
}
} // namespace mindspore

+ 0
- 80
mindspore/lite/test/ut/internal/infer_test.cc View File

@@ -1,80 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <cmath>
#include <memory>
#include "common/common_test.h"
#include "internal/include/model.h"
#include "internal/include/lite_session.h"
#include "internal/include/context.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "nnacl/op_base.h"

namespace mindspore {
class InferTest : public mindspore::CommonTest {
public:
InferTest() {}
};

TEST_F(InferTest, TestSession) {
Model model;
Node node;
node.name_ = String("node");
model.nodes_.push_back(&node);

node.node_type_ = NodeType::NodeType_CNode;
PrimitiveC prim;
prim.type_ = KernelType::Neg;
node.primitive_ = &prim;
node.input_indices_.push_back(0);
node.output_indices_.push_back(1);
ShapeVector shape(4);
shape[0] = 1;
shape[1] = 1;
shape[2] = 1;
shape[3] = 10;
MSTensor *in = CreateTensor(kNumberTypeFloat32, shape);
model.all_tensors_.push_back(in);
model.input_indices_.push_back(0);

MSTensor *out = CreateTensor(kNumberTypeFloat32, shape);
model.all_tensors_.push_back(out);
model.output_indices_.push_back(1);

LiteSession session;
session.CompileGraph(&model);
TensorPtrVector invec = session.GetInputs();
ASSERT_EQ(invec.size(), 1);
constexpr int kOutSize = 10;
float expect_out[kOutSize];
for (int i = 0; i < kOutSize; ++i) {
*(reinterpret_cast<float *>(in->data_) + i) = i + 1;
expect_out[i] = -(i + 1);
}
session.RunGraph();
TensorPtrVector outvec = session.GetOutputs();
ASSERT_EQ(outvec.size(), 1);
for (int i = 0; i < kOutSize; ++i) {
std::cout << *(reinterpret_cast<float *>(outvec.at(0)->data_) + i) << " ";
}
std::cout << "\n";
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outvec.at(0)->data_), expect_out, kOutSize, 0.000001));
DestroyTensor(in);
DestroyTensor(out);
}

} // namespace mindspore

+ 0
- 99
mindspore/lite/test/ut/internal/src/kernel/fp32/arithmetic_fp32_test.cc View File

@@ -1,99 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test/common/common_test.h"
#include "src/common/file_utils.h"
#include "schema/ops_generated.h"
#include "mindspore/lite/nnacl/fp32/arithmetic_fp32.h"
#include "internal/src/allocator.h"
#include "internal/include/model.h"
#include "internal/include/ms_tensor.h"
#include "internal/include/lite_utils.h"
#include "internal/src/kernel/fp32/arithmetic.h"
#include "gtest/gtest.h"

class TestInternalArithmeticFp32 : public mindspore::CommonTest {
public:
TestInternalArithmeticFp32() {}
};

TEST_F(TestInternalArithmeticFp32, MulTest) {
auto mul_param = new ArithmeticParameter();
mul_param->activation_type_ = mindspore::schema::ActivationType_NO_ACTIVATION;
mul_param->op_parameter_.type_ = KernelType_Mul;
mul_param->ndim_ = 4;
Node *node = new Node();
node->name_ = "Mul";
node->node_type_ = NodeType::NodeType_CNode;
node->primitive_ = reinterpret_cast<PrimitiveC *>(mul_param);
mindspore::lite::Allocator allocator;
/* 1x2x3x4 NHWC */
std::vector<float> data0 = {12.216284, 3.3466918, 15.327419, 5.234958, 0.804376, 9.952188,
14.727955, -8.080715, 13.71383, 8.055829, 6.5845337, -9.25232,
-4.24519, 11.550042, 9.262012, 1.2780352, 6.7263746, -3.9301445,
3.764492, -8.602078, -3.3558068, 13.619035, -2.6694393, 3.2008505};
std::vector<float> data1 = {0.16771512, 0.7336843, 0.6768286, 0.4453379};
std::vector<float> correct_out = {2.0488555, 2.4554152, 10.374036, 2.3313253, 0.13490601, 7.3017635,
9.968302, -3.5986485, 2.3000166, 5.910435, 4.4566007, -4.120409,
-0.71198255, 8.474085, 6.2687945, 0.5691575, 1.1281147, -2.8834853,
2.547916, -3.8308315, -0.56281954, 9.992072, -1.8067529, 1.42546};

TensorPtrVector in_tensors;
ShapeVector shape0(4);
shape0[0] = 1;
shape0[1] = 2;
shape0[2] = 3;
shape0[3] = 4;
MSTensor in0;
in0.data_ = data0.data();
in0.shape_ = shape0;
in0.data_type_ = TypeId::kNumberTypeFloat32;
in_tensors.push_back(&in0);

ShapeVector shape1(4);
shape1[0] = 1;
shape1[1] = 1;
shape1[2] = 1;
shape1[3] = 4;
MSTensor in1;
in1.data_ = data1.data();
in1.shape_ = shape1;
in1.data_type_ = TypeId::kNumberTypeFloat32;
in_tensors.push_back(&in1);

TensorPtrVector out_tensors;
MSTensor out0;
out0.shape_.resize(4);
out_tensors.push_back(&out0);

DoArithmeticInferShape(in_tensors, out_tensors, reinterpret_cast<OpParameter *>(mul_param));

ShapeVector out_shape0(4);
out_shape0[0] = 1;
out_shape0[1] = 2;
out_shape0[2] = 3;
out_shape0[3] = 4;
ASSERT_EQ(out_tensors.front()->shape_, out_shape0);

out_tensors[0]->data_ = new float[correct_out.size()];
DoArithmetic(in_tensors, out_tensors, node, &allocator);

ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct_out.data(),
correct_out.size(), 0.00001));

delete[] out_tensors[0]->data_;
delete node;
delete mul_param;
}

+ 0
- 91
mindspore/lite/test/ut/internal/src/kernel/fp32/bias_add_fp32_test.cc View File

@@ -1,91 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test/common/common_test.h"
#include "src/common/file_utils.h"
#include "schema/ops_generated.h"
#include "mindspore/lite/nnacl/fp32/arithmetic_fp32.h"
#include "internal/src/allocator.h"
#include "internal/include/model.h"
#include "internal/include/ms_tensor.h"
#include "internal/include/lite_utils.h"
#include "internal/src/kernel/fp32/bias_add.h"
#include "gtest/gtest.h"

class TestInternalBiasAddFp32 : public mindspore::CommonTest {
public:
TestInternalBiasAddFp32() {}
};

TEST_F(TestInternalBiasAddFp32, BiasAddTest) {
auto bias_add_param = new ArithmeticParameter();
bias_add_param->activation_type_ = mindspore::schema::ActivationType_NO_ACTIVATION;
bias_add_param->op_parameter_.type_ = KernelType_BiasAdd;
Node *node = new Node();
node->name_ = "BiasAdd";
node->node_type_ = NodeType::NodeType_CNode;
node->primitive_ = reinterpret_cast<PrimitiveC *>(bias_add_param);
mindspore::lite::Allocator allocator;
std::vector<float> data0 = {12.216284, 3.3466918, 15.327419, 5.234958, 0.804376, 9.952188,
14.727955, -8.080715, 13.71383, 8.055829, 6.5845337, -9.25232,
-4.24519, 11.550042, 9.262012, 1.2780352, 6.7263746, -3.9301445,
3.764492, -8.602078, -3.3558068, 13.619035, -2.6694393, 3.2008505};
std::vector<float> data1 = {0.16771512, 0.7336843, 0.6768286, 0.4453379};
std::vector<float> correct_out = {12.3839989, 4.0803761, 16.0042477, 5.6802959, 0.9720911, 10.6858721,
15.4047832, -7.6353774, 13.8815451, 8.7895136, 7.2613621, -8.8069820,
-4.0774751, 12.2837267, 9.9388399, 1.7233731, 6.8940897, -3.1964602,
4.4413204, -8.1567402, -3.1880918, 14.3527193, -1.9926107, 3.6461883};
TensorPtrVector in_tensors;
ShapeVector shape0(4);
shape0[0] = 1;
shape0[1] = 2;
shape0[2] = 3;
shape0[3] = 4;
MSTensor in0;
in0.data_ = data0.data();
in0.shape_ = shape0;
in0.data_type_ = TypeId::kNumberTypeFloat32;
in_tensors.push_back(&in0);

ShapeVector shape1{4};
MSTensor in1;
in1.data_ = data1.data();
in1.shape_ = shape1;
in1.data_type_ = TypeId::kNumberTypeFloat32;
in_tensors.push_back(&in1);

TensorPtrVector out_tensors;
MSTensor out0;
out_tensors.push_back(&out0);

DoBiasAddInferShape(in_tensors, out_tensors, reinterpret_cast<OpParameter *>(bias_add_param));

ShapeVector out_shape0(4);
out_shape0[0] = 1;
out_shape0[1] = 2;
out_shape0[2] = 3;
out_shape0[3] = 4;
ASSERT_EQ(out_tensors.front()->shape_, out_shape0);

out_tensors[0]->data_ = new float[correct_out.size()];
DoBiasAdd(in_tensors, out_tensors, node, &allocator);

ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct_out.data(),
correct_out.size(), 0.00001));

delete out_tensors[0]->data_;
delete node;
delete bias_add_param;
}

+ 0
- 241
mindspore/lite/test/ut/internal/src/kernel/fp32/reduce_fp32_test.cc View File

@@ -1,241 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test/common/common_test.h"
#include "mindspore/lite/nnacl/reduce_parameter.h"
#include "schema/ops_generated.h"
#include "internal/src/allocator.h"
#include "internal/include/model.h"
#include "internal/include/ms_tensor.h"
#include "internal/include/lite_utils.h"
#include "internal/src/kernel/fp32/reduce.h"
#include "gtest/gtest.h"

class TestInternalReduceFp32 : public mindspore::CommonTest {
public:
TestInternalReduceFp32() {}
};

TEST_F(TestInternalReduceFp32, ReduceSumOneAxisTest) {
Node *node = reinterpret_cast<Node *>(new Node());
node->name_ = "ReduceSum";
node->node_type_ = NodeType::NodeType_CNode;

auto params = new ReduceParameter();
params->mode_ = mindspore::schema::ReduceMode_ReduceSum;
params->num_axes_ = 1;
params->axes_[0] = 1;
params->keep_dims_ = false;
node->primitive_ = reinterpret_cast<PrimitiveC *>(params);
mindspore::lite::Allocator allocator;
float in[96] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0,
32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0,
48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0,
64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0};
float correct[24] = {72.0, 76.0, 80.0, 84.0, 88.0, 92.0, 96.0, 100.0, 104.0, 108.0, 112.0, 116.0,
264.0, 268.0, 272.0, 276.0, 280.0, 284.0, 288.0, 292.0, 296.0, 300.0, 304.0, 308.0};

TensorPtrVector in_tensors;
ShapeVector shape0(4);
shape0[0] = 2;
shape0[1] = 4;
shape0[2] = 4;
shape0[3] = 3;
MSTensor in0;
in0.data_ = in;
in0.shape_ = shape0;
in0.data_type_ = TypeId::kNumberTypeFloat32;
in_tensors.push_back(&in0);

TensorPtrVector out_tensors;
MSTensor out0;
out0.shape_.resize(3);
out_tensors.push_back(&out0);

DoReduceInferShape(in_tensors, out_tensors, reinterpret_cast<OpParameter *>(params));

ShapeVector out_shape0(3);
out_shape0[0] = 2;
out_shape0[1] = 4;
out_shape0[2] = 3;
ASSERT_EQ(out_tensors.front()->shape_, out_shape0);
out_tensors[0]->data_ = new float[24];

DoReduce(in_tensors, out_tensors, node, &allocator);

ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 24, 0.00001));
delete out_tensors[0]->data_;
delete node;
delete params;
}

TEST_F(TestInternalReduceFp32, ReduceSumAllAxisTest) {
Node *node = reinterpret_cast<Node *>(new Node());
node->name_ = "ReduceSum";
node->node_type_ = NodeType::NodeType_CNode;

auto params = new ReduceParameter();
params->mode_ = mindspore::schema::ReduceMode_ReduceSum;
params->num_axes_ = 0;
params->keep_dims_ = false;
node->primitive_ = reinterpret_cast<PrimitiveC *>(params);
mindspore::lite::Allocator allocator;
float in[96] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0,
32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0,
48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0,
64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0};
float correct[1] = {4560.0};

TensorPtrVector in_tensors;
ShapeVector shape0(4);
shape0[0] = 2;
shape0[1] = 4;
shape0[2] = 4;
shape0[3] = 3;
MSTensor in0;
in0.data_ = in;
in0.shape_ = shape0;
in0.data_type_ = TypeId::kNumberTypeFloat32;
in_tensors.push_back(&in0);

TensorPtrVector out_tensors;
MSTensor out0;
out_tensors.push_back(&out0);

DoReduceInferShape(in_tensors, out_tensors, reinterpret_cast<OpParameter *>(params));

ShapeVector out_shape0{};
ASSERT_EQ(out_tensors.front()->shape_, out_shape0);
out_tensors[0]->data_ = new float[1];

DoReduce(in_tensors, out_tensors, node, &allocator);

ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 1, 0.00001));
delete out_tensors[0]->data_;
delete node;
delete params;
}

TEST_F(TestInternalReduceFp32, ReduceMeanOneAxisTest) {
Node *node = reinterpret_cast<Node *>(new Node());
node->name_ = "ReduceMean";
node->node_type_ = NodeType::NodeType_CNode;

auto params = new ReduceParameter();
params->mode_ = mindspore::schema::ReduceMode_ReduceMean;
params->num_axes_ = 1;
params->axes_[0] = 1;
params->keep_dims_ = false;
node->primitive_ = reinterpret_cast<PrimitiveC *>(params);
mindspore::lite::Allocator allocator;
float in[96] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0,
32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0,
48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0,
64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0};
float correct[24] = {18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0};
TensorPtrVector in_tensors;
ShapeVector shape0(4);
shape0[0] = 2;
shape0[1] = 4;
shape0[2] = 4;
shape0[3] = 3;
MSTensor in0;
in0.data_ = in;
in0.shape_ = shape0;
in0.data_type_ = TypeId::kNumberTypeFloat32;
in_tensors.push_back(&in0);

TensorPtrVector out_tensors;
MSTensor out0;
out0.shape_.resize(3);
out_tensors.push_back(&out0);

DoReduceInferShape(in_tensors, out_tensors, reinterpret_cast<OpParameter *>(params));

ShapeVector out_shape0(3);
out_shape0[0] = 2;
out_shape0[1] = 4;
out_shape0[2] = 3;
ASSERT_EQ(out_tensors.front()->shape_, out_shape0);
out_tensors[0]->data_ = new float[24];

DoReduce(in_tensors, out_tensors, node, &allocator);

ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 24, 0.00001));
delete out_tensors[0]->data_;
delete node;
delete params;
}

TEST_F(TestInternalReduceFp32, ReduceMeanAllAxisTest) {
Node *node = reinterpret_cast<Node *>(new Node());
node->name_ = "ReduceMean";
node->node_type_ = NodeType::NodeType_CNode;

auto params = new ReduceParameter();
params->mode_ = mindspore::schema::ReduceMode_ReduceMean;
params->num_axes_ = 0;
params->keep_dims_ = true;
node->primitive_ = reinterpret_cast<PrimitiveC *>(params);
mindspore::lite::Allocator allocator;
float in[96] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0,
32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0,
48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0,
64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0};
float correct[1] = {47.5};

TensorPtrVector in_tensors;
ShapeVector shape0(4);
shape0[0] = 2;
shape0[1] = 4;
shape0[2] = 4;
shape0[3] = 3;
MSTensor in0;
in0.data_ = in;
in0.shape_ = shape0;
in0.data_type_ = TypeId::kNumberTypeFloat32;
in_tensors.push_back(&in0);

TensorPtrVector out_tensors;
MSTensor out0;
out0.shape_.resize(4);
out_tensors.push_back(&out0);

DoReduceInferShape(in_tensors, out_tensors, reinterpret_cast<OpParameter *>(params));

ShapeVector out_shape0(4);
out_shape0[0] = 1;
out_shape0[1] = 1;
out_shape0[2] = 1;
out_shape0[3] = 1;
ASSERT_EQ(out_tensors.front()->shape_, out_shape0);
out_tensors[0]->data_ = new float[1];

DoReduce(in_tensors, out_tensors, node, &allocator);

ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 1, 0.00001));
delete out_tensors[0]->data_;
delete node;
delete params;
}

+ 0
- 54
mindspore/lite/test/ut/internal/vector_test.cc View File

@@ -1,54 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <cmath>
#include <memory>
#include "common/common_test.h"
#include "internal/include/vector.h"
#include "nnacl/op_base.h"

namespace mindspore {
class VectorTest : public mindspore::CommonTest {
public:
VectorTest() {}
};

void CheckArrValue(Vector<int> arr) {
for (size_t i = 0; i < arr.size(); ++i) {
ASSERT_EQ(arr[i], i);
}
}

TEST_F(VectorTest, VectorTest1) {
constexpr int kLen1 = 10;
Vector<int> arr1(kLen1);
for (int i = 0; i < kLen1; ++i) {
arr1[i] = i;
}
Vector<int> arr2 = arr1;
ASSERT_EQ(arr2.size(), kLen1);
for (int i = 0; i < kLen1; ++i) {
ASSERT_EQ(arr2[i], i);
}

Vector<int> arr3;
for (int i = 0; i < kLen1; ++i) {
arr3.push_back(std::move(arr1[i]));
}
CheckArrValue(arr3);
}

} // namespace mindspore

Loading…
Cancel
Save