| @@ -53,6 +53,7 @@ set(CORE_DIR ${TOP_DIR}/mindspore/core) | |||
| set(CCSRC_DIR ${TOP_DIR}/mindspore/ccsrc) | |||
| include_directories(${TOP_DIR}) | |||
| include_directories(${CORE_DIR}) | |||
| include_directories(${CORE_DIR}/ir) | |||
| include_directories(${CCSRC_DIR}) | |||
| include_directories(${CMAKE_CURRENT_SOURCE_DIR}) | |||
| include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/kernel/arm) | |||
| @@ -96,7 +97,6 @@ else () | |||
| set(CMAKE_SHARED_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_SHARED_LINKER_FLAGS}") | |||
| set(CMAKE_EXE_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_EXE_LINKER_FLAGS}") | |||
| endif() | |||
| string(REPLACE " -g " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") | |||
| endif () | |||
| if (BUILD_DEVICE) | |||
| @@ -128,12 +128,6 @@ if (WIN32) | |||
| add_compile_definitions(BUILDING_DLL) | |||
| endif () | |||
| set(CORE_SRC | |||
| ${CORE_DIR}/ir/meta_tensor.cc | |||
| ${CORE_DIR}/gvar/logging_level.cc | |||
| ${CORE_DIR}/gvar/typeid_manager.cc | |||
| ${CORE_DIR}/base/base.cc | |||
| ) | |||
| if (BUILD_CONVERTER) | |||
| if (PLATFORM_ARM64 OR PLATFORM_ARM32) | |||
| MESSAGE(FATAL_ERROR "Cannot build converter in arm platform") | |||
| @@ -224,6 +218,7 @@ endif () | |||
| if (BUILD_DEVICE) | |||
| add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src) | |||
| add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/internal) | |||
| add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/nnacl) | |||
| if (NOT WIN32) | |||
| add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/benchmark) | |||
| @@ -41,32 +41,10 @@ typedef enum { | |||
| DT_NPU /**< NPU device type, not supported yet */ | |||
| } DeviceType; | |||
| /// \brief DeviceContext defined for holding DeviceType. | |||
| typedef struct { | |||
| DeviceType type; /**< device type */ | |||
| } DeviceContext; | |||
| /// \brief Context defined for holding environment variables during runtime. | |||
| class MS_API Context { | |||
| public: | |||
| /// \brief Constructor of MindSpore Lite Context using default value for parameters. | |||
| /// | |||
| /// \return Instance of MindSpore Lite Context. | |||
| Context(); | |||
| /// \brief Constructor of MindSpore Lite Context using input value for parameters. | |||
| /// | |||
| /// \param[in] thread_num Define the work thread number during the runtime. | |||
| /// \param[in] allocator Define the allocator for malloc. | |||
| /// \param[in] device_ctx Define device information during the runtime. | |||
| Context(int thread_num, std::shared_ptr<Allocator> allocator, DeviceContext device_ctx); | |||
| /// \brief Destructor of MindSpore Lite Context. | |||
| virtual ~Context(); | |||
| public: | |||
| struct Context { | |||
| bool float16_priority = false; /**< prior enable float16 inference */ | |||
| DeviceContext device_ctx_{DT_CPU}; | |||
| DeviceType device_type_ = DT_CPU; | |||
| int thread_num_ = 2; /**< thread number config for thread pool */ | |||
| std::shared_ptr<Allocator> allocator = nullptr; | |||
| CpuBindMode cpu_bind_mode_ = MID_CPU; | |||
| @@ -51,7 +51,7 @@ constexpr int RET_OP_EXECUTE_FAILURE = -304; /**< Failed to execution operator. | |||
| constexpr int RET_FORMAT_ERR = -401; /**< Failed to checking tensor format. */ | |||
| /* InferShape error code, range: [-501,-600] */ | |||
| constexpr int RET_INFER_ERR = -501; /**< Failed to infer shape. */ | |||
| constexpr int RET_INFER_ERR = -501; /**< Failed to infer shape. */ | |||
| constexpr int RET_INFER_INVALID = -502; /**< Invalid infer shape before runtime. */ | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -86,22 +86,18 @@ class MS_API LiteSession { | |||
| /// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h. | |||
| virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0; | |||
| /// \brief Get output MindSpore Lite MSTensors of model mapped by node name. | |||
| /// | |||
| /// \return The map of output node name and MindSpore Lite MSTensor. | |||
| virtual std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> GetOutputMapByNode() const = 0; | |||
| /// \brief Get output MindSpore Lite MSTensors of model by node name. | |||
| /// | |||
| /// \param[in] node_name Define node name. | |||
| /// | |||
| /// \return The vector of MindSpore Lite MSTensor. | |||
| /// deprecated, replace with GetOutputByTensorName | |||
| virtual std::vector<tensor::MSTensor *> GetOutputsByNodeName(const std::string &node_name) const = 0; | |||
| /// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name. | |||
| /// | |||
| /// \return The map of output tensor name and MindSpore Lite MSTensor. | |||
| virtual std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputMapByTensor() const = 0; | |||
| virtual std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputs() const = 0; | |||
| /// \brief Get name of output tensors of model compiled by this session. | |||
| /// | |||
| @@ -33,17 +33,6 @@ class MS_API MSTensor { | |||
| /// \return Instance of MindSpore Lite MSTensor. | |||
| MSTensor() = default; | |||
| /// \brief Static method to create a MSTensor pointer. | |||
| /// | |||
| /// \param[in] data_type Define data type of tensor to be created. | |||
| /// \param[in] shape Define Shape of tensor to be created. | |||
| /// | |||
| /// \note TypeId is defined in mindspore/mindspore/core/ir/dtype/type_id.h. Only number types in TypeId enum are | |||
| /// suitable for MSTensor. | |||
| /// | |||
| /// \return the pointer of MSTensor. | |||
| static MSTensor *CreateTensor(TypeId data_type, const std::vector<int> &shape); | |||
| /// \brief Destructor of MindSpore Lite Model. | |||
| virtual ~MSTensor() = default; | |||
| @@ -55,25 +44,11 @@ class MS_API MSTensor { | |||
| /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor. | |||
| virtual TypeId data_type() const = 0; | |||
| /// \brief Set data type for the MindSpore Lite MSTensor. | |||
| /// | |||
| /// \param[in] data_type Define MindSpore Lite TypeId to be set in the MindSpore Lite MSTensor. | |||
| /// | |||
| /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor after set. | |||
| virtual TypeId set_data_type(TypeId data_type) = 0; | |||
| /// \brief Get shape of the MindSpore Lite MSTensor. | |||
| /// | |||
| /// \return A vector of int as the shape of the MindSpore Lite MSTensor. | |||
| virtual std::vector<int> shape() const = 0; | |||
| /// \brief Set shape for the MindSpore Lite MSTensor. | |||
| /// | |||
| /// \param[in] shape Define a vector of int as shape to be set into the MindSpore Lite MSTensor. | |||
| /// | |||
| /// \return size of shape of the MindSpore Lite MSTensor after set. | |||
| virtual size_t set_shape(const std::vector<int> &shape) = 0; | |||
| /// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index. | |||
| /// | |||
| /// \param[in] index Define index of dimension returned. | |||
| @@ -86,11 +61,6 @@ class MS_API MSTensor { | |||
| /// \return Number of element in MSTensor. | |||
| virtual int ElementsNum() const = 0; | |||
| /// \brief Get hash of the MindSpore Lite MSTensor. | |||
| /// | |||
| /// \return Hash of the MindSpore Lite MSTensor. | |||
| virtual std::size_t hash() const = 0; | |||
| /// \brief Get byte size of data in MSTensor. | |||
| /// | |||
| /// \return Byte size of data in MSTensor. | |||
| @@ -101,7 +71,7 @@ class MS_API MSTensor { | |||
| /// \note The data pointer can be used to both write and read data in MSTensor. | |||
| /// | |||
| /// \return the pointer points to data in MSTensor. | |||
| virtual void *MutableData() const = 0; | |||
| virtual void *MutableData() = 0; | |||
| }; | |||
| } // namespace tensor | |||
| } // namespace mindspore | |||
| @@ -25,9 +25,7 @@ namespace mindspore { | |||
| namespace lite { | |||
| struct Model; | |||
| } | |||
| namespace lite::tensor { | |||
| class Tensor; | |||
| } | |||
| namespace session { | |||
| class TrainSession : public lite::LiteSession { | |||
| @@ -40,9 +38,10 @@ class TrainSession : public lite::LiteSession { | |||
| int CompileGraph(lite::Model *model) override; | |||
| virtual void ReplaceOps(); | |||
| virtual void* ExportToBuf(void* buf, size_t* len) const; | |||
| virtual void *ExportToBuf(void *buf, size_t *len) const; | |||
| std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> GetOutputs() const; | |||
| // todo: output tensors by tensor name | |||
| std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> GetOutputMap() const; | |||
| std::vector<tensor::MSTensor *> GetOutputsByName(const std::string &node_name) const; | |||
| virtual void train(); | |||
| @@ -51,11 +50,10 @@ class TrainSession : public lite::LiteSession { | |||
| bool is_eval() { return train_mode_ == false; } | |||
| protected: | |||
| bool train_mode_ = false; | |||
| lite::Model* model_ = nullptr; | |||
| bool train_mode_ = false; | |||
| lite::Model *model_ = nullptr; | |||
| std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> ext_output_map_; | |||
| // private: | |||
| }; | |||
| } // namespace session | |||
| @@ -0,0 +1,31 @@ | |||
| cmake_minimum_required(VERSION 3.14) | |||
| project (Lite_Internal) | |||
| set(TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../) | |||
| include_directories(${TOP_DIR}) | |||
| file(GLOB_RECURSE C_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cc) | |||
| file(GLOB KERNEL_SRC | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/*.c | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/*.c | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/int8/*.c | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/quantization/*.c | |||
| ) | |||
| list(REMOVE_ITEM KERNEL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/opt_op_handler.c) | |||
| set(CCSRC | |||
| ${TOP_DIR}/src/common/log_adapter.cc | |||
| ${TOP_DIR}/src/runtime/allocator.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../core/gvar/logging_level.cc | |||
| ) | |||
| if (PLATFORM_ARM64) | |||
| # assembly | |||
| file(GLOB ASSEMBLY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/*.s | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/*.S) | |||
| set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C) | |||
| set(KERNEL_SRC ${KERNEL_SRC} ${ASSEMBLY_SRC}) | |||
| add_library(mslite_internal SHARED ${C_SRC} ${CCSRC} ${KERNEL_SRC}) | |||
| target_link_libraries(mslite_internal log) | |||
| endif() | |||
| @@ -0,0 +1,40 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_CONTEXT_H_ | |||
| #define MINDSPORE_LITE_INTERNAL_INCLUDE_CONTEXT_H_ | |||
| /// \brief CpuBindMode defined for holding bind cpu strategy argument. | |||
| typedef enum { | |||
| MID_CPU = -1, /**< bind middle cpu first */ | |||
| HIGHER_CPU = 1, /**< bind higher cpu first */ | |||
| NO_BIND = 0 /**< no bind */ | |||
| } CpuBindMode; | |||
| /// \brief DeviceType defined for holding user's preferred backend. | |||
| typedef enum { | |||
| DT_CPU, /**< CPU device type */ | |||
| DT_GPU, /**< GPU device type */ | |||
| DT_NPU /**< NPU device type, not supported yet */ | |||
| } DeviceType; | |||
| /// \brief Context defined for holding environment variables during runtime. | |||
| typedef struct { | |||
| bool float16_priority = false; /**< prior enable float16 inference */ | |||
| DeviceType device_type_ = DT_CPU; | |||
| int thread_num_ = 2; /**< thread number config for thread pool */ | |||
| } Context; | |||
| #endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_ | |||
| @@ -0,0 +1,55 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_ERRORCODE_H_ | |||
| #define MINDSPORE_LITE_INTERNAL_INCLUDE_ERRORCODE_H_ | |||
| /// \brief STATUS defined for holding error code in MindSpore Lite. | |||
| using STATUS = int; | |||
| /* Success */ | |||
| constexpr int RET_OK = 0; /**< No error occurs. */ | |||
| /* Common error code, range: [-1, -100]*/ | |||
| constexpr int RET_ERROR = -1; /**< Common error code. */ | |||
| constexpr int RET_NULL_PTR = -2; /**< NULL pointer returned.*/ | |||
| constexpr int RET_PARAM_INVALID = -3; /**< Invalid parameter.*/ | |||
| constexpr int RET_NO_CHANGE = -4; /**< No change. */ | |||
| constexpr int RET_SUCCESS_EXIT = -5; /**< No error but exit. */ | |||
| constexpr int RET_MEMORY_FAILED = -6; /**< Fail to create memory. */ | |||
| /* Executor error code, range: [-101,-200] */ | |||
| constexpr int RET_OUT_OF_TENSOR_RANGE = -101; /**< Failed to check range. */ | |||
| constexpr int RET_INPUT_TENSOR_ERROR = -102; /**< Failed to check input tensor. */ | |||
| constexpr int RET_REENTRANT_ERROR = -103; /**< Exist executor running. */ | |||
| /* Graph error code, range: [-201,-300] */ | |||
| constexpr int RET_GRAPH_FILE_ERR = -201; /**< Failed to verify graph file. */ | |||
| /* Node error code, range: [-301,-400] */ | |||
| constexpr int RET_NOT_FIND_OP = -301; /**< Failed to find operator. */ | |||
| constexpr int RET_INVALID_OP_NAME = -302; /**< Invalid operator name. */ | |||
| constexpr int RET_INVALID_OP_ATTR = -303; /**< Invalid operator attr. */ | |||
| constexpr int RET_OP_EXECUTE_FAILURE = -304; /**< Failed to execution operator. */ | |||
| /* Tensor error code, range: [-401,-500] */ | |||
| constexpr int RET_FORMAT_ERR = -401; /**< Failed to checking tensor format. */ | |||
| /* InferShape error code, range: [-501,-600] */ | |||
| constexpr int RET_INFER_ERR = -501; /**< Failed to infer shape. */ | |||
| constexpr int RET_INFER_INVALID = -502; /**< Invalid infer shape before runtime. */ | |||
| #endif // MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ | |||
| @@ -0,0 +1,90 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_SESSION_H | |||
| #define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_SESSION_H | |||
| #include "internal/include/ms_tensor.h" | |||
| #include "internal/include/model.h" | |||
| #include "internal/include/context.h" | |||
| #include "internal/include/lite_utils.h" | |||
| /// \brief LiteSession defined session in MindSpore Lite for compiling Model and forwarding model. | |||
| typedef struct LiteSession { | |||
| /// \brief Static method to create a LiteSession pointer. | |||
| /// | |||
| /// \param[in] context Define the context of session to be created. | |||
| /// | |||
| /// \return Pointer of MindSpore Lite LiteSession. | |||
| static LiteSession *CreateSession(Context *context); | |||
| /// \brief Compile MindSpore Lite model. | |||
| /// | |||
| /// \note CompileGraph should be called before RunGraph. | |||
| /// | |||
| /// \param[in] model Define the model to be compiled. | |||
| /// | |||
| /// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h. | |||
| int CompileGraph(Model *model); | |||
| /// \brief Get input MindSpore Lite MSTensors of model. | |||
| /// | |||
| /// \return The vector of MindSpore Lite MSTensor. | |||
| TensorPtrVector GetInputs() const; | |||
| /// \brief Get input MindSpore Lite MSTensors of model by node name. | |||
| /// | |||
| /// \param[in] node_name Define node name. | |||
| /// | |||
| /// \return The vector of MindSpore Lite MSTensor. | |||
| TensorPtrVector GetInputsByName(const String &node_name) const; | |||
| /// \brief Get output MindSpore Lite MSTensors of model by node name. | |||
| /// | |||
| /// \param[in] node_name Define node name. | |||
| /// | |||
| /// \return The vector of MindSpore Lite MSTensor. | |||
| TensorPtrVector GetOutputsByNodeName(const String &node_name) const; | |||
| /// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name. | |||
| /// | |||
| /// \return The map of output tensor name and MindSpore Lite MSTensor. | |||
| TensorPtrVector GetOutputs() const; | |||
| /// \brief Get name of output tensors of model compiled by this session. | |||
| /// | |||
| /// \return The vector of string as output tensor names in order. | |||
| StringVector GetOutputTensorNames() const; | |||
| /// \brief Get output MindSpore Lite MSTensors of model by tensor name. | |||
| /// | |||
| /// \param[in] tensor_name Define tensor name. | |||
| /// | |||
| /// \return Pointer of MindSpore Lite MSTensor. | |||
| MSTensor *GetOutputByTensorName(const String &tensor_name) const; | |||
| /// \note RunGraph should be called after CompileGraph. | |||
| int RunGraph(); | |||
| /// \brief Resize inputs shape. | |||
| /// | |||
| /// \param[in] inputs Define the new inputs shape. | |||
| /// | |||
| /// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h. | |||
| int Resize(const TensorPtrVector &inputs); | |||
| } LiteSession; | |||
| #endif // MINDSPORE_LITE_INCLUDE_LITE_SESSION_H | |||
| @@ -14,17 +14,18 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef LITE_MS_TENSOR_UTILS_H | |||
| #define LITE_MS_TENSOR_UTILS_H | |||
| #ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_ | |||
| #define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_ | |||
| #include <vector> | |||
| #include "include/ms_tensor.h" | |||
| #include "src/ir/tensor.h" | |||
| #include <string> | |||
| namespace mindspore { | |||
| namespace tensor { | |||
| std::vector<MSTensor *> PackToMSTensors(const std::vector<mindspore::lite::tensor::Tensor *> &in_tensors); | |||
| } | |||
| } // namespace mindspore | |||
| struct MSTensor; | |||
| struct Node; | |||
| using TensorPtrVector = std::vector<MSTensor *>; | |||
| using Uint32Vector = std::vector<uint32_t>; | |||
| using String = std::string; | |||
| using StringVector = std::vector<std::string>; | |||
| using ShapeVector = std::vector<int>; | |||
| using NodePtrVector = std::vector<struct Node *>; | |||
| #endif // LITE_MS_TENSOR_UTILS_H | |||
| #endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ | |||
| @@ -0,0 +1,59 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H | |||
| #define MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H | |||
| #include "internal/include/lite_utils.h" | |||
| #include "nnacl/op_base.h" | |||
| using PrimitiveC = OpParameter; | |||
| enum NodeType { | |||
| NodeType_ValueNode = 0, | |||
| NodeType_Parameter = 1, | |||
| NodeType_CNode = 2, | |||
| NodeType_MIN = NodeType_ValueNode, | |||
| NodeType_MAX = NodeType_CNode | |||
| }; | |||
| typedef struct Node { | |||
| String name_; | |||
| NodeType node_type_; | |||
| PrimitiveC *primitive_; | |||
| Uint32Vector input_indices_; | |||
| Uint32Vector output_indices_; | |||
| } Node; | |||
| typedef struct Model { | |||
| String name_; | |||
| String version_; | |||
| TensorPtrVector all_tensors_; | |||
| Uint32Vector input_indices_; | |||
| Uint32Vector output_indices_; | |||
| NodePtrVector nodes_; | |||
| char *buf; | |||
| /// \brief Static method to create a Model pointer. | |||
| /// | |||
| /// \param[in] model_buf Define the buffer read from a model file. | |||
| /// \param[in] size Define bytes number of model buffer. | |||
| /// | |||
| /// \return Pointer of MindSpore Lite Model. | |||
| static Model *Import(const char *model_buf, size_t size); | |||
| /// \brief Free all the temporary buffer | |||
| void Free(); | |||
| } Model; | |||
| #endif // MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H | |||
| @@ -0,0 +1,142 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_MS_TENSOR_H_ | |||
| #define MINDSPORE_LITE_INTERNAL_INCLUDE_MS_TENSOR_H_ | |||
| #include "internal/include/lite_utils.h" | |||
| enum TypeId : int { | |||
| kTypeUnknown = 0, | |||
| kMetaTypeBegin = kTypeUnknown, | |||
| kMetaTypeType, // Type | |||
| kMetaTypeAnything, | |||
| kMetaTypeObject, | |||
| kMetaTypeTypeType, // TypeType | |||
| kMetaTypeProblem, | |||
| kMetaTypeExternal, | |||
| kMetaTypeNone, | |||
| kMetaTypeNull, | |||
| kMetaTypeEllipsis, | |||
| kMetaTypeEnd, | |||
| // | |||
| // Object types | |||
| // | |||
| kObjectTypeBegin = kMetaTypeEnd, | |||
| kObjectTypeNumber, | |||
| kObjectTypeString, | |||
| kObjectTypeList, | |||
| kObjectTypeTuple, | |||
| kObjectTypeSlice, | |||
| kObjectTypeKeyword, | |||
| kObjectTypeTensorType, | |||
| kObjectTypeRowTensorType, | |||
| kObjectTypeSparseTensorType, | |||
| kObjectTypeUndeterminedType, | |||
| kObjectTypeClass, | |||
| kObjectTypeDictionary, | |||
| kObjectTypeFunction, | |||
| kObjectTypeJTagged, | |||
| kObjectTypeSymbolicKeyType, | |||
| kObjectTypeEnvType, | |||
| kObjectTypeRefKey, | |||
| kObjectTypeRef, | |||
| kObjectTypeEnd, | |||
| // | |||
| // Number Types | |||
| // | |||
| kNumberTypeBegin = kObjectTypeEnd, | |||
| kNumberTypeBool, | |||
| kNumberTypeInt, | |||
| kNumberTypeInt8, | |||
| kNumberTypeInt16, | |||
| kNumberTypeInt32, | |||
| kNumberTypeInt64, | |||
| kNumberTypeUInt, | |||
| kNumberTypeUInt8, | |||
| kNumberTypeUInt16, | |||
| kNumberTypeUInt32, | |||
| kNumberTypeUInt64, | |||
| kNumberTypeFloat, | |||
| kNumberTypeFloat16, | |||
| kNumberTypeFloat32, | |||
| kNumberTypeFloat64, | |||
| kNumberTypeEnd | |||
| }; | |||
| enum Format { | |||
| Format_NCHW = 0, | |||
| Format_NHWC = 1, | |||
| Format_NHWC4 = 2, | |||
| Format_HWKC = 3, | |||
| Format_HWCK = 4, | |||
| Format_KCHW = 5, | |||
| Format_CKHW = 6, | |||
| Format_KHWC = 7, | |||
| Format_CHWK = 8, | |||
| Format_HW = 9, | |||
| Format_HW4 = 10, | |||
| Format_NC = 11, | |||
| Format_NC4 = 12, | |||
| Format_NC4HW4 = 100, | |||
| Format_NUM_OF_FORMAT = 101, | |||
| Format_MIN = Format_NCHW, | |||
| Format_MAX = Format_NUM_OF_FORMAT | |||
| }; | |||
| typedef struct MSTensor { | |||
| enum Category { | |||
| CONST, // weight tensor | |||
| VAR // activation tensor | |||
| }; | |||
| void *data_ = NULL; | |||
| void *device_data_ = NULL; | |||
| TypeId data_type_; | |||
| Format format_ = Format_NHWC; | |||
| Category category_ = VAR; | |||
| ShapeVector shape_ = {}; | |||
| size_t refCount = 0; | |||
| int32_t Batch() const; | |||
| int32_t Channel() const; | |||
| int32_t Height() const; | |||
| int32_t Width() const; | |||
| /// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index. | |||
| /// | |||
| /// \param[in] index Define index of dimension returned. | |||
| /// | |||
| /// \return Size of dimension of the MindSpore Lite MSTensor. | |||
| int DimensionSize(size_t index) const; | |||
| /// \brief Get number of element in MSTensor. | |||
| /// | |||
| /// \return Number of element in MSTensor. | |||
| int ElementsNum() const; | |||
| int ElementsC4Num() const; | |||
| /// \brief Get byte size of data in MSTensor. | |||
| /// | |||
| /// \return Byte size of data in MSTensor. | |||
| size_t Size() const; | |||
| } MSTensor; | |||
| MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape); | |||
| #endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ | |||
| @@ -0,0 +1,68 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "internal/include/lite_session.h" | |||
| #include "internal/include/model.h" | |||
| #include "internal/include/ms_tensor.h" | |||
| #include "src/runtime/allocator.h" | |||
| static Context *g_Ctx; | |||
| static Model *g_Model; | |||
| static LiteSession g_Session; | |||
| static mindspore::lite::DefaultAllocator allocator; | |||
| LiteSession *LiteSession::CreateSession(Context *context) { | |||
| g_Ctx = context; | |||
| return &g_Session; | |||
| } | |||
| int LiteSession::CompileGraph(Model *model) { | |||
| g_Model = model; | |||
| for (auto in : g_Model->input_indices_) { | |||
| g_Model->all_tensors_[in]->data_ = allocator.Malloc(g_Model->all_tensors_[in]->Size()); | |||
| } | |||
| return 0; | |||
| } | |||
| TensorPtrVector LiteSession::GetInputs() const { | |||
| TensorPtrVector in(g_Model->input_indices_.size()); | |||
| // for(auto index : g_Model->input_indices_){ | |||
| // in.emplace_back(g_Model->all_tensors_[index]); | |||
| // } | |||
| return in; | |||
| } | |||
| TensorPtrVector LiteSession::GetInputsByName(const String &node_name) const { return TensorPtrVector(); } | |||
| TensorPtrVector LiteSession::GetOutputsByNodeName(const String &node_name) const { return TensorPtrVector(); } | |||
| TensorPtrVector LiteSession::GetOutputs() const { | |||
| TensorPtrVector out(g_Model->output_indices_.size()); | |||
| // for(auto index : g_Model->output_indices_){ | |||
| // out.emplace_back(g_Model->all_tensors_[index]); | |||
| // } | |||
| return out; | |||
| } | |||
| int LiteSession::RunGraph() { | |||
| // invoke nnacl kernel | |||
| return 0; | |||
| } | |||
| StringVector LiteSession::GetOutputTensorNames() const { return StringVector(); } | |||
| MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const { return NULL; } | |||
| int LiteSession::Resize(const TensorPtrVector &inputs) { return 0; } | |||
| @@ -0,0 +1,194 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include <iostream> | |||
| #include <vector> | |||
| #include <numeric> | |||
| #include <string> | |||
| #include <functional> | |||
| #include "internal/include/ms_tensor.h" | |||
| MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape) { | |||
| MSTensor *tensor = new MSTensor(); | |||
| tensor->shape_ = shape; | |||
| tensor->data_type_ = data_type; | |||
| return tensor; | |||
| } | |||
| int MSTensor::ElementsNum() const { return std::accumulate(shape_.begin(), shape_.end(), 1LL, std::multiplies<int>()); } | |||
| size_t MSTensor::Size() const { | |||
| size_t size = 0; | |||
| switch (this->data_type_) { | |||
| case kNumberTypeFloat64: | |||
| size = sizeof(double); | |||
| break; | |||
| case kNumberTypeFloat: | |||
| case kNumberTypeFloat32: | |||
| size = sizeof(float); | |||
| break; | |||
| case kNumberTypeInt8: | |||
| size = sizeof(int8_t); | |||
| break; | |||
| case kNumberTypeUInt8: | |||
| size = sizeof(uint8_t); | |||
| break; | |||
| case kNumberTypeFloat16: | |||
| size = sizeof(int16_t); | |||
| break; | |||
| case kNumberTypeInt16: | |||
| size = sizeof(int16_t); | |||
| break; | |||
| case kNumberTypeInt32: | |||
| size = sizeof(int32_t); | |||
| break; | |||
| case kNumberTypeInt64: | |||
| size = sizeof(int64_t); | |||
| break; | |||
| case kNumberTypeUInt16: | |||
| size = sizeof(uint16_t); | |||
| break; | |||
| case kNumberTypeUInt32: | |||
| size = sizeof(uint32_t); | |||
| break; | |||
| case kNumberTypeUInt64: | |||
| size = sizeof(uint64_t); | |||
| break; | |||
| case kNumberTypeBool: | |||
| size = sizeof(bool); | |||
| break; | |||
| default: | |||
| std::cout << "Not support the type: " << this->data_type_; | |||
| return 0; | |||
| } | |||
| size *= (format_ == Format::Format_NC4HW4 || format_ == Format::Format_NHWC4) ? ElementsC4Num() : ElementsNum(); | |||
| return size; | |||
| } | |||
| int32_t MSTensor::Batch() const { | |||
| if (this->shape_.size() != 4 && this->shape_.size() != 2) { | |||
| std::cout << "Unsupported tensor shape: " << this->shape_.size(); | |||
| return -1; | |||
| } | |||
| switch (this->format_) { | |||
| case Format::Format_NHWC: | |||
| case Format::Format_NHWC4: | |||
| case Format::Format_NCHW: | |||
| case Format::Format_NC4HW4: | |||
| case Format::Format_KCHW: | |||
| case Format::Format_KHWC: | |||
| case Format::Format_NC: | |||
| case Format::Format_NC4: | |||
| return this->shape_[0]; | |||
| case Format::Format_HWCK: | |||
| case Format::Format_CHWK: | |||
| return this->shape_[3]; | |||
| case Format::Format_HWKC: | |||
| return this->shape_[2]; | |||
| case Format::Format_CKHW: | |||
| return this->shape_[1]; | |||
| default: | |||
| // std::cout << "Unsupported format: " << EnumNameFormat(this->format_); | |||
| return -1; | |||
| } | |||
| } | |||
| int32_t MSTensor::Channel() const { | |||
| if (this->shape_.size() != 4 && this->shape_.size() != 2) { | |||
| std::cout << "Unsupported tensor shape: " << this->shape_.size(); | |||
| return -1; | |||
| } | |||
| switch (this->format_) { | |||
| case Format::Format_NCHW: | |||
| case Format::Format_KCHW: | |||
| case Format::Format_NC: | |||
| case Format::Format_NC4: | |||
| return this->shape_[1]; | |||
| case Format::Format_HWCK: | |||
| return this->shape_[2]; | |||
| case Format::Format_HWKC: | |||
| case Format::Format_NHWC: | |||
| case Format::Format_NHWC4: | |||
| case Format::Format_NC4HW4: | |||
| case Format::Format_KHWC: | |||
| return this->shape_[3]; | |||
| case Format::Format_CKHW: | |||
| case Format::Format_CHWK: | |||
| return this->shape_[0]; | |||
| default: | |||
| return -1; | |||
| } | |||
| } | |||
| int32_t MSTensor::Height() const { | |||
| if (this->shape_.size() != 4 && this->shape_.size() != 2) { | |||
| std::cout << "Unsupported tensor shape: " << this->shape_.size(); | |||
| return -1; | |||
| } | |||
| switch (this->format_) { | |||
| case Format::Format_NCHW: | |||
| case Format::Format_KCHW: | |||
| case Format::Format_CKHW: | |||
| return this->shape_[2]; | |||
| case Format::Format_NHWC: | |||
| case Format::Format_NHWC4: | |||
| case Format::Format_NC4HW4: | |||
| case Format::Format_KHWC: | |||
| case Format::Format_CHWK: | |||
| return this->shape_[1]; | |||
| case Format::Format_HWCK: | |||
| case Format::Format_HWKC: | |||
| case Format::Format_HW: | |||
| case Format::Format_HW4: | |||
| return this->shape_[0]; | |||
| default: | |||
| // std::cout << "Unsupported format: " << EnumNameFormat(this->format_); | |||
| return -1; | |||
| } | |||
| } | |||
| int32_t MSTensor::Width() const { | |||
| if (this->shape_.size() != 4 && this->shape_.size() != 2) { | |||
| std::cout << "Unsupported tensor shape: " << this->shape_.size(); | |||
| return -1; | |||
| } | |||
| switch (this->format_) { | |||
| case Format::Format_NCHW: | |||
| case Format::Format_KCHW: | |||
| case Format::Format_CKHW: | |||
| return this->shape_[3]; | |||
| case Format::Format_KHWC: | |||
| case Format::Format_NHWC: | |||
| case Format::Format_NHWC4: | |||
| case Format::Format_NC4HW4: | |||
| case Format::Format_CHWK: | |||
| return this->shape_[2]; | |||
| case Format::Format_HWCK: | |||
| case Format::Format_HWKC: | |||
| case Format::Format_HW: | |||
| case Format::Format_HW4: | |||
| return this->shape_[1]; | |||
| default: | |||
| return -1; | |||
| } | |||
| } | |||
| int MSTensor::ElementsC4Num() const { | |||
| int result = 0; | |||
| if (this->shape_.size() == 4) { | |||
| result = Batch() * Height() * Width() * ((Channel() + 3) / 4 * 4); | |||
| } else if (this->shape_.size() == 2) { | |||
| result = this->shape_[0] * ((this->shape_[1] + 3) / 4 * 4); | |||
| } | |||
| return result; | |||
| } | |||
| @@ -14,7 +14,6 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_JAVA_SRC_COMMON_JNI_UTILS_H | |||
| #define MINDSPORE_LITE_JAVA_SRC_COMMON_JNI_UTILS_H | |||
| @@ -14,7 +14,6 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_JAVA_SRC_COMMON_MS_LOG_H | |||
| #define MINDSPORE_LITE_JAVA_SRC_COMMON_MS_LOG_H | |||
| @@ -30,13 +30,13 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_config_MSConfig_creat | |||
| } | |||
| switch (device_type) { | |||
| case 0: | |||
| context->device_ctx_.type = mindspore::lite::DT_CPU; | |||
| context->device_type_ = mindspore::lite::DT_CPU; | |||
| break; | |||
| case 1: | |||
| context->device_ctx_.type = mindspore::lite::DT_GPU; | |||
| context->device_type_ = mindspore::lite::DT_GPU; | |||
| break; | |||
| case 2: | |||
| context->device_ctx_.type = mindspore::lite::DT_NPU; | |||
| context->device_type_ = mindspore::lite::DT_NPU; | |||
| break; | |||
| default: | |||
| MS_LOGE("Invalid device_type : %d", device_type); | |||
| @@ -26,70 +26,63 @@ | |||
| #include <android/asset_manager.h> | |||
| #endif | |||
| extern "C" JNIEXPORT jstring JNICALL | |||
| Java_com_example_mindsporepredict_MainActivity_stringFromJNI( | |||
| JNIEnv* env, | |||
| jobject /* this */) { | |||
| std::string hello = "Hello World!"; | |||
| MS_LOG(DEBUG) << hello; | |||
| return env->NewStringUTF(hello.c_str()); | |||
| extern "C" JNIEXPORT jstring JNICALL Java_com_example_mindsporepredict_MainActivity_stringFromJNI(JNIEnv *env, | |||
| jobject /* this */) { | |||
| std::string hello = "Hello World!"; | |||
| MS_LOG(DEBUG) << hello; | |||
| return env->NewStringUTF(hello.c_str()); | |||
| } | |||
| using Dataset = mindspore::dataset::api::Dataset; | |||
| using Iterator = mindspore::dataset::api::Iterator; | |||
| using mindspore::dataset::Tensor; | |||
| using mindspore::dataset::Path; | |||
| using mindspore::dataset::Tensor; | |||
| using mindspore::dataset::api::Cifar10; | |||
| using mindspore::dataset::api::RandomSampler; | |||
| extern "C" JNIEXPORT void JNICALL | |||
| Java_com_example_mindsporepredict_MainActivity_pathTest( | |||
| JNIEnv* env, | |||
| jobject /* this */, | |||
| jstring path) { | |||
| MS_LOG(WARNING) << env->GetStringUTFChars(path, 0); | |||
| Path f(env->GetStringUTFChars(path, 0)); | |||
| MS_LOG(WARNING) << f.Exists() << f.IsDirectory() << f.ParentPath(); | |||
| // Print out the first few items in the directory | |||
| auto dir_it = Path::DirIterator::OpenDirectory(&f); | |||
| MS_LOG(WARNING) << dir_it.get(); | |||
| int i = 0; | |||
| while (dir_it->hasNext()) { | |||
| Path v = dir_it->next(); | |||
| MS_LOG(WARNING) << v.toString(); | |||
| i++; | |||
| if (i > 5) | |||
| break; | |||
| } | |||
| extern "C" JNIEXPORT void JNICALL Java_com_example_mindsporepredict_MainActivity_pathTest(JNIEnv *env, | |||
| jobject /* this */, | |||
| jstring path) { | |||
| MS_LOG(WARNING) << env->GetStringUTFChars(path, 0); | |||
| Path f(env->GetStringUTFChars(path, 0)); | |||
| MS_LOG(WARNING) << f.Exists() << f.IsDirectory() << f.ParentPath(); | |||
| // Print out the first few items in the directory | |||
| auto dir_it = Path::DirIterator::OpenDirectory(&f); | |||
| MS_LOG(WARNING) << dir_it.get(); | |||
| int i = 0; | |||
| while (dir_it->hasNext()) { | |||
| Path v = dir_it->next(); | |||
| MS_LOG(WARNING) << v.toString(); | |||
| i++; | |||
| if (i > 5) break; | |||
| } | |||
| } | |||
| extern "C" JNIEXPORT void JNICALL | |||
| Java_com_example_mindsporepredict_MainActivity_TestCifar10Dataset( | |||
| JNIEnv* env, | |||
| jobject /* this */, | |||
| jstring path) { | |||
| MS_LOG(INFO) << "Doing MindDataTestPipeline-TestCifar10Dataset."; | |||
| extern "C" JNIEXPORT void JNICALL Java_com_example_mindsporepredict_MainActivity_TestCifar10Dataset(JNIEnv *env, | |||
| jobject /* this */, | |||
| jstring path) { | |||
| MS_LOG(INFO) << "Doing MindDataTestPipeline-TestCifar10Dataset."; | |||
| // Create a Cifar10 Dataset | |||
| std::string folder_path = env->GetStringUTFChars(path, 0); | |||
| std::shared_ptr<Dataset> ds = Cifar10(folder_path, RandomSampler(false, 10)); | |||
| // Create a Cifar10 Dataset | |||
| std::string folder_path = env->GetStringUTFChars(path, 0); | |||
| std::shared_ptr<Dataset> ds = Cifar10(folder_path, RandomSampler(false, 10)); | |||
| // Create an iterator over the result of the above dataset | |||
| // This will trigger the creation of the Execution Tree and launch it. | |||
| std::shared_ptr<Iterator> iter = ds->CreateIterator(); | |||
| // Create an iterator over the result of the above dataset | |||
| // This will trigger the creation of the Execution Tree and launch it. | |||
| std::shared_ptr<Iterator> iter = ds->CreateIterator(); | |||
| // Iterate the dataset and get each row | |||
| std::unordered_map<std::string, std::shared_ptr<Tensor>> row; | |||
| iter->GetNextRow(&row); | |||
| // Iterate the dataset and get each row | |||
| std::unordered_map<std::string, std::shared_ptr<Tensor>> row; | |||
| iter->GetNextRow(&row); | |||
| uint64_t i = 0; | |||
| while (row.size() != 0) { | |||
| i++; | |||
| auto image = row["image"]; | |||
| MS_LOG(INFO) << "Tensor image shape: " << image->shape(); | |||
| iter->GetNextRow(&row); | |||
| } | |||
| uint64_t i = 0; | |||
| while (row.size() != 0) { | |||
| i++; | |||
| auto image = row["image"]; | |||
| MS_LOG(INFO) << "Tensor image shape: " << image->shape(); | |||
| iter->GetNextRow(&row); | |||
| } | |||
| // Manually terminate the pipeline | |||
| iter->Stop(); | |||
| // Manually terminate the pipeline | |||
| iter->Stop(); | |||
| } | |||
| @@ -28,28 +28,28 @@ using mindspore::dataset::api::Cifar10; | |||
| using mindspore::dataset::api::RandomSampler; | |||
| int main() { | |||
| MS_LOG(INFO) << "Doing MindDataTestPipeline-TestCifar10Dataset."; | |||
| MS_LOG(INFO) << "Doing MindDataTestPipeline-TestCifar10Dataset."; | |||
| // Create a Cifar10 Dataset | |||
| std::string folder_path = "./testCifar10Data/"; | |||
| std::shared_ptr<Dataset> ds = Cifar10(folder_path, RandomSampler(false, 10)); | |||
| // Create a Cifar10 Dataset | |||
| std::string folder_path = "./testCifar10Data/"; | |||
| std::shared_ptr<Dataset> ds = Cifar10(folder_path, RandomSampler(false, 10)); | |||
| // Create an iterator over the result of the above dataset | |||
| // This will trigger the creation of the Execution Tree and launch it. | |||
| std::shared_ptr<Iterator> iter = ds->CreateIterator(); | |||
| // Create an iterator over the result of the above dataset | |||
| // This will trigger the creation of the Execution Tree and launch it. | |||
| std::shared_ptr<Iterator> iter = ds->CreateIterator(); | |||
| // Iterate the dataset and get each row | |||
| std::unordered_map<std::string, std::shared_ptr<Tensor>> row; | |||
| iter->GetNextRow(&row); | |||
| // Iterate the dataset and get each row | |||
| std::unordered_map<std::string, std::shared_ptr<Tensor>> row; | |||
| iter->GetNextRow(&row); | |||
| uint64_t i = 0; | |||
| while (row.size() != 0) { | |||
| i++; | |||
| auto image = row["image"]; | |||
| MS_LOG(INFO) << "Tensor image shape: " << image->shape(); | |||
| iter->GetNextRow(&row); | |||
| } | |||
| uint64_t i = 0; | |||
| while (row.size() != 0) { | |||
| i++; | |||
| auto image = row["image"]; | |||
| MS_LOG(INFO) << "Tensor image shape: " << image->shape(); | |||
| iter->GetNextRow(&row); | |||
| } | |||
| // Manually terminate the pipeline | |||
| iter->Stop(); | |||
| // Manually terminate the pipeline | |||
| iter->Stop(); | |||
| } | |||
| @@ -112,7 +112,7 @@ int ElementGreaterFp16(float16_t *input0, float16_t *input1, float16_t *output, | |||
| int ElementGreaterEqualFp16(float16_t *input0, float16_t *input1, float16_t *output, int element_size); | |||
| void TileDimensionsFp16(float16_t *data0, float16_t *data1, float16_t *tile_data0, float16_t *tile_data1, | |||
| ArithmeticParameter *param); | |||
| ArithmeticParameter *param); | |||
| #ifdef __cplusplus | |||
| } | |||
| #endif | |||
| @@ -26,7 +26,7 @@ | |||
| extern "C" { | |||
| #endif | |||
| int ReduceMeanFp16(const int outer_size, const int inner_size, const int axis_size, const float16_t *src_data, | |||
| const int *src_shape, float16_t *dst_data, const int tid, const int thread_num); | |||
| const int *src_shape, float16_t *dst_data, const int tid, const int thread_num); | |||
| #ifdef __cplusplus | |||
| } | |||
| @@ -19,7 +19,6 @@ | |||
| #include "nnacl/op_base.h" | |||
| #include "nnacl/reduce_parameter.h" | |||
| #ifdef __cplusplus | |||
| extern "C" { | |||
| #endif | |||
| @@ -29,8 +29,7 @@ typedef struct SpaceToBatchParameter { | |||
| #ifdef __cplusplus | |||
| extern "C" { | |||
| #endif | |||
| void DoSpaceToBatchNHWC(const float *input, float *output, SpaceToBatchParameter *param, int *in_shape, | |||
| int *out_shape); | |||
| void DoSpaceToBatchNHWC(const float *input, float *output, SpaceToBatchParameter *param, int *in_shape, int *out_shape); | |||
| void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, int *padding, int *out_shape, | |||
| const float *pedding_h_data, const float *pedding_w_data); | |||
| #ifdef __cplusplus | |||
| @@ -29,13 +29,11 @@ typedef struct BNGradParameter { | |||
| extern "C" { | |||
| #endif | |||
| void sumSpatialBatch(const float *in, int size, int ch, float *out); | |||
| void scaleBias(const float *scales, int batch, int n, int size, float *output); | |||
| void normalize(const float *x, const float *mean, const float *invar, int batch, int filters, int spatial, | |||
| float *out); | |||
| void backwardScale(const float *x, const float *mean, const float *invar, const float *delta, int batch, | |||
| int n, int size, float *scale_updates); | |||
| void normalize(const float *x, const float *mean, const float *invar, int batch, int filters, int spatial, float *out); | |||
| void backwardScale(const float *x, const float *mean, const float *invar, const float *delta, int batch, int n, | |||
| int size, float *scale_updates); | |||
| void meanVar(const float *in, int batch, int size, int ch, float eps, float *mean, float *invar); | |||
| void meanDelta(float *yt, int size, int ch, float *invar, float *mean_delta); | |||
| void varianceDelta(const float *x, const float *delta, const float *mean, const float *invar, int batch, int ch, | |||
| @@ -29,4 +29,3 @@ int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param, int th | |||
| #endif | |||
| #endif // MINDSPORE_LITE_NNACL_INT8_SLICE_INT8_H_ | |||
| @@ -21,8 +21,7 @@ | |||
| #ifdef __cplusplus | |||
| extern "C" { | |||
| #endif | |||
| int L2NormFp32(const float *input_ptr, float *output_ptr, | |||
| L2NormParameter *param); | |||
| int L2NormFp32(const float *input_ptr, float *output_ptr, L2NormParameter *param); | |||
| #ifdef __cplusplus | |||
| } | |||
| #endif | |||
| @@ -59,16 +59,16 @@ void Conv3x3Fp32OutputTransform(const float *gemm_out, float *out_data, const fl | |||
| void Conv3x3Int8InputUnit(int16_t *tmp_data, int16_t *trans_input_data, size_t step, int input_zp); | |||
| void Conv3x3Int8InputTransform(const int16_t *input_data, int16_t *trans_input, int16_t *tmp_data, int start_index, | |||
| int real_cal_num, int out_w_block, ConvParameter *conv_param); | |||
| int real_cal_num, int out_w_block, ConvParameter *conv_param); | |||
| void Conv3x3Int8FilterTransform(const int16_t *weight_data, int16_t *trans_weight, int iC8, int output_channel, | |||
| int kernel_plane); | |||
| void Conv3x3Int8OutputUnit(const int32_t *gemm_out, const int32_t *bias_data, int8_t *output_data, bool h_not_bound, | |||
| bool w_not_bound, int output_w, int real_num, int oc_start, ConvParameter *conv_param); | |||
| bool w_not_bound, int output_w, int real_num, int oc_start, ConvParameter *conv_param); | |||
| void Conv3x3Int8OutputTransform(const int32_t *gemm_out, int8_t *out_data, const int32_t *bias_data, int start_index, | |||
| int real_cal_num, int out_w_block, ConvParameter *conv_param); | |||
| int real_cal_num, int out_w_block, ConvParameter *conv_param); | |||
| #ifdef __cplusplus | |||
| } | |||
| #endif | |||
| @@ -1,22 +1,21 @@ | |||
| if (PLATFORM_ARM32 OR PLATFORM_ARM64) | |||
| # for performance | |||
| if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release") | |||
| set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") | |||
| set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") | |||
| #-fno-rtti -fno-exceptions | |||
| set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") | |||
| set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") | |||
| endif() | |||
| endif () | |||
| set(LITE_SRC | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../core/gvar/logging_level.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/common/log_adapter.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.c | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/ir/meta_tensor_extends.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/context.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/tensor.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/executor.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc | |||
| @@ -59,17 +58,14 @@ endif () | |||
| file(GLOB_RECURSE C_OPS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/ops/*.cc) | |||
| add_library(mindspore-lite SHARED ${LITE_SRC} ${C_OPS_SRC}) | |||
| set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-private-field") | |||
| add_library(core_mid_ OBJECT ${CORE_SRC}) | |||
| if (SUPPORT_GPU) | |||
| add_subdirectory(runtime/kernel/opencl) | |||
| target_link_libraries(mindspore-lite | |||
| core_mid_ | |||
| cpu_kernel_mid_ | |||
| opencl_kernel_lib_ | |||
| ) | |||
| else () | |||
| target_link_libraries(mindspore-lite | |||
| core_mid_ | |||
| cpu_kernel_mid_ | |||
| ) | |||
| endif () | |||
| @@ -18,7 +18,7 @@ | |||
| #define MINDSPORE_LITE_COMMON_COMMON_H_ | |||
| #include <string> | |||
| #include "schema/model_generated.h" | |||
| #include "src/tensor.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| @@ -51,9 +51,8 @@ static const char CALIB_NORM[] = "NORM"; | |||
| // dims | |||
| static const int32_t DIM_DEFAULT_SIZE = 4; | |||
| static const schema::Format DEFAULT_FORMAT = schema::Format_NCHW; | |||
| static const schema::Format DEFAULT_FORMAT = schema::Format::Format_NCHW; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_LITE_COMMON_COMMON_H_ | |||
| @@ -119,13 +119,13 @@ int CompareOutputData(float *output_data, float *correct_data, int data_size) { | |||
| return 0; | |||
| } | |||
| int CompareOutput(float *output_data, std::string file_path) { | |||
| int CompareOutput(float *output_data, std::string file_path) { | |||
| size_t output_size; | |||
| auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &output_size)); | |||
| size_t output_num = output_size / sizeof(float); | |||
| printf("output num : %zu\n", output_num); | |||
| int res = CompareOutputData(output_data, ground_truth, output_num); | |||
| delete [] ground_truth; | |||
| delete[] ground_truth; | |||
| return res; | |||
| } | |||
| } // namespace lite | |||
| @@ -34,7 +34,7 @@ char *ReadFile(const char *file, size_t *size); | |||
| std::string RealPath(const char *path); | |||
| template <typename T> | |||
| void WriteToTxt(const std::string& file_path, void *data, size_t element_size) { | |||
| void WriteToTxt(const std::string &file_path, void *data, size_t element_size) { | |||
| std::ofstream out_file; | |||
| out_file.open(file_path, std::ios::out); | |||
| auto real_data = reinterpret_cast<T *>(data); | |||
| @@ -44,10 +44,10 @@ void WriteToTxt(const std::string& file_path, void *data, size_t element_size) { | |||
| out_file.close(); | |||
| } | |||
| int WriteToBin(const std::string& file_path, void *data, size_t size); | |||
| int WriteToBin(const std::string &file_path, void *data, size_t size); | |||
| int CompareOutputData(float *output_data, float *correct_data, int data_size); | |||
| int CompareOutput(float *output_data, std::string file_path); | |||
| int CompareOutput(float *output_data, std::string file_path); | |||
| std::string GetAndroidPackageName(); | |||
| std::string GetAndroidPackagePath(); | |||
| @@ -55,4 +55,3 @@ std::string GetAndroidPackagePath(); | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_LITE_COMMON_FILE_UTILS_H_ | |||
| @@ -48,7 +48,7 @@ int CompareRelativeOutput(float *output_data, std::string file_path) { | |||
| size_t output_num = output_size / sizeof(float); | |||
| std::cout << "output num : " << output_num << "\n"; | |||
| int res = CompareOutputRelativeData(output_data, ground_truth, output_num); | |||
| delete [] ground_truth; | |||
| delete[] ground_truth; | |||
| return res; | |||
| } | |||
| } // namespace lite | |||
| @@ -18,7 +18,6 @@ | |||
| #define MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_ | |||
| #include <string> | |||
| namespace mindspore { | |||
| namespace lite { | |||
| int CompareRelativeOutput(float *output_data, std::string file_path); | |||
| @@ -27,7 +27,7 @@ | |||
| #include "ir/visitor.h" | |||
| #include "ir/func_graph.h" | |||
| #include "utils/label.h" | |||
| #include "utils/label.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/common/utils.h" | |||
| @@ -147,4 +147,3 @@ std::vector<AnfNodePtr> DeepLinkedGraphSearch(const AnfNodePtr &root, const Incl | |||
| return DeepLinkedGraphSearcher(include).Search(root); | |||
| } | |||
| } // namespace mindspore | |||
| @@ -29,17 +29,12 @@ namespace mindspore { | |||
| constexpr const char *ANDROID_LOG_TAG = "MS_LITE"; | |||
| int EnvToInt(const char *env) { | |||
| if (env == nullptr) | |||
| return -1; | |||
| if (strcmp(env, "DEBUG") == 0) | |||
| return 0; | |||
| if (strcmp(env, "INFO") == 0) | |||
| return 1; | |||
| if (strcmp(env, "WARNING") == 0) | |||
| return 2; | |||
| if (strcmp(env, "ERROR") == 0) | |||
| return 3; | |||
| return -1; | |||
| if (env == nullptr) return -1; | |||
| if (strcmp(env, "DEBUG") == 0) return 0; | |||
| if (strcmp(env, "INFO") == 0) return 1; | |||
| if (strcmp(env, "WARNING") == 0) return 2; | |||
| if (strcmp(env, "ERROR") == 0) return 3; | |||
| return -1; | |||
| } | |||
| bool IsPrint(int level) { | |||
| @@ -55,15 +50,15 @@ bool IsPrint(int level) { | |||
| // convert MsLogLevel to corresponding android level | |||
| static int GetAndroidLogLevel(MsLogLevel level) { | |||
| switch (level) { | |||
| case DEBUG: | |||
| return ANDROID_LOG_DEBUG; | |||
| case INFO: | |||
| return ANDROID_LOG_INFO; | |||
| case WARNING: | |||
| return ANDROID_LOG_WARN; | |||
| case ERROR: | |||
| default: | |||
| return ANDROID_LOG_ERROR; | |||
| case DEBUG: | |||
| return ANDROID_LOG_DEBUG; | |||
| case INFO: | |||
| return ANDROID_LOG_INFO; | |||
| case WARNING: | |||
| return ANDROID_LOG_WARN; | |||
| case ERROR: | |||
| default: | |||
| return ANDROID_LOG_ERROR; | |||
| } | |||
| } | |||
| #endif | |||
| @@ -114,16 +109,20 @@ static std::string ExceptionTypeToString(ExceptionType type) { | |||
| } | |||
| void LogWriter::OutputLog(const std::ostringstream &msg) const { | |||
| if (IsPrint(log_level_)) { | |||
| if (IsPrint(log_level_)) { | |||
| std::string sm = ""; | |||
| if (submodule_ != SM_UNKNOWN) { | |||
| sm = std::to_string(submodule_) + " "; | |||
| } | |||
| // #ifdef USE_ANDROID_LOG | |||
| #ifdef ENABLE_ARM | |||
| __android_log_print(GetAndroidLogLevel(log_level_), ANDROID_LOG_TAG, "[%s:%d] %s] %s", location_.file_, | |||
| location_.line_, location_.func_, msg.str().c_str()); | |||
| __android_log_print(GetAndroidLogLevel(log_level_), ANDROID_LOG_TAG, "[%s:%d] %s] %s%s", location_.file_, | |||
| location_.line_, location_.func_, sm.c_str(), msg.str().c_str()); | |||
| #else | |||
| printf("%s [%s:%d] %s] %s\n", EnumStrForMsLogLevel(log_level_), location_.file_, location_.line_, location_.func_, | |||
| msg.str().c_str()); | |||
| printf("%s [%s:%d] %s] %s%s\n", EnumStrForMsLogLevel(log_level_), location_.file_, location_.line_, location_.func_, | |||
| sm.c_str(), msg.str().c_str()); | |||
| #endif | |||
| } | |||
| } | |||
| } | |||
| void LogWriter::operator<(const LogStream &stream) const noexcept { | |||
| @@ -155,4 +154,3 @@ void LogWriter::operator^(const LogStream &stream) const { | |||
| throw std::runtime_error(oss.str()); | |||
| } | |||
| } // namespace mindspore | |||
| @@ -1,41 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "src/common/ms_tensor_utils.h" | |||
| #include <vector> | |||
| #include "utils/log_adapter.h" | |||
| namespace mindspore { | |||
| namespace tensor { | |||
| using mindspore::lite::tensor::LiteTensor; | |||
| using mindspore::lite::tensor::Tensor; | |||
| std::vector<MSTensor *> PackToMSTensors(const std::vector<Tensor *> &in_tensors) { | |||
| std::vector<MSTensor *> ret; | |||
| for (auto *lite_tensor : in_tensors) { | |||
| MS_ASSERT(lite_tensor != nullptr); | |||
| auto *ms_tensor = new (std::nothrow) LiteTensor(lite_tensor); | |||
| if (ms_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "new LiteTensor failed"; | |||
| return ret; | |||
| } | |||
| ret.emplace_back(ms_tensor); | |||
| } | |||
| return ret; | |||
| } | |||
| } // namespace tensor | |||
| } // namespace mindspore | |||
| @@ -29,4 +29,3 @@ inline std::string GetOpTypeName(const schema::CNode &opDef) { return schema::En | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_LITE_COMMON_OP_UTILS_H_ | |||
| @@ -21,7 +21,7 @@ | |||
| namespace mindspore { | |||
| namespace lite { | |||
| std::vector<std::string> StringSplit(std::string str, const std::string& pattern) { | |||
| std::vector<std::string> StringSplit(std::string str, const std::string &pattern) { | |||
| std::vector<std::string> result; | |||
| if (str.empty()) { | |||
| return result; | |||
| @@ -259,4 +259,3 @@ uint32_t getHwCap(int hwcap_type) { | |||
| #endif | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -32,7 +32,7 @@ namespace mindspore { | |||
| namespace lite { | |||
| const int USEC = 1000000; | |||
| const int MSEC = 1000; | |||
| std::vector<std::string> StringSplit(std::string str, const std::string& pattern); | |||
| std::vector<std::string> StringSplit(std::string str, const std::string &pattern); | |||
| uint64_t GetTimeUs(void); | |||
| @@ -190,4 +190,3 @@ inline Option<bool> GenericParseValue(const std::string &value) { | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_LITE_COMMON_UTILS_H_ | |||
| @@ -1,31 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "include/context.h" | |||
| #include "src/runtime/allocator.h" | |||
| namespace mindspore::lite { | |||
| Context::Context() { allocator = Allocator::Create(); } | |||
| Context::~Context() = default; | |||
| Context::Context(int thread_num, std::shared_ptr<Allocator> allocator, DeviceContext device_ctx) { | |||
| this->allocator = std::move(allocator); | |||
| this->thread_num_ = thread_num; | |||
| this->device_ctx_ = device_ctx; | |||
| } | |||
| } // namespace mindspore::lite | |||
| @@ -17,10 +17,9 @@ | |||
| #include "mindspore/lite/src/executor.h" | |||
| #include "nnacl/pack.h" | |||
| #include "include/errorcode.h" | |||
| #include "src/common/ms_tensor_utils.h" | |||
| namespace mindspore::lite { | |||
| int Executor::Run(std::vector<tensor::Tensor *> &in_tensors, std::vector<tensor::Tensor *> &out_tensors, | |||
| int Executor::Run(std::vector<Tensor *> &in_tensors, std::vector<Tensor *> &out_tensors, | |||
| std::vector<kernel::LiteKernel *> &kernels, Allocator *allocator, | |||
| const session::KernelCallBack &before, const session::KernelCallBack &after) { | |||
| MS_ASSERT(nullptr != allocator); | |||
| @@ -29,11 +28,11 @@ int Executor::Run(std::vector<tensor::Tensor *> &in_tensors, std::vector<tensor: | |||
| MS_LOG(ERROR) << "Graph input tensor is nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| if (inTensor->Data() == nullptr) { | |||
| if (inTensor->MutableData() == nullptr) { | |||
| MS_LOG(ERROR) << "Graph input tensor data is nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| if (inTensor->GetFormat() != schema::Format_NHWC) { | |||
| if (inTensor->GetFormat() != schema::Format::Format_NHWC) { | |||
| MS_LOG(ERROR) << "Model input tensor should be NHWC"; | |||
| return RET_ERROR; | |||
| } | |||
| @@ -47,7 +46,7 @@ int Executor::Run(std::vector<tensor::Tensor *> &in_tensors, std::vector<tensor: | |||
| MS_ASSERT(nullptr != kernel); | |||
| if (before != nullptr) { | |||
| if (!before(PackToMSTensors(kernel->in_tensors()), PackToMSTensors(kernel->out_tensors()), | |||
| if (!before(TensorVectorCast(kernel->in_tensors()), TensorVectorCast(kernel->out_tensors()), | |||
| {kernel->name(), kernel->type_str()})) { | |||
| MS_LOG(ERROR) << "run kernel before_callback failed, name: " << kernel->name(); | |||
| } | |||
| @@ -59,9 +58,8 @@ int Executor::Run(std::vector<tensor::Tensor *> &in_tensors, std::vector<tensor: | |||
| MS_LOG(ERROR) << "run kernel failed, name: " << kernel->name(); | |||
| return ret; | |||
| } | |||
| if (after != nullptr) { | |||
| if (!after(PackToMSTensors(kernel->in_tensors()), PackToMSTensors(kernel->out_tensors()), | |||
| if (!after(TensorVectorCast(kernel->in_tensors()), TensorVectorCast(kernel->out_tensors()), | |||
| {kernel->name(), kernel->type_str()})) { | |||
| MS_LOG(ERROR) << "run kernel after_callback failed, name: " << kernel->name(); | |||
| } | |||
| @@ -80,7 +78,7 @@ int Executor::Run(std::vector<tensor::Tensor *> &in_tensors, std::vector<tensor: | |||
| return RET_OK; | |||
| } | |||
| int Executor::TransformTensorLayout(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator) { | |||
| int Executor::TransformTensorLayout(Tensor *tensor, schema::Format dst_format, Allocator *allocator) { | |||
| MS_ASSERT(nullptr != tensor); | |||
| MS_ASSERT(nullptr != allocator); | |||
| MS_ASSERT(4 == tensor->shape().size()); | |||
| @@ -96,13 +94,17 @@ int Executor::TransformTensorLayout(tensor::Tensor *tensor, schema::Format dst_f | |||
| return RET_OK; | |||
| } | |||
| int Executor::TransformTensorLayoutFp32(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator) { | |||
| int Executor::TransformTensorLayoutFp32(Tensor *tensor, schema::Format dst_format, Allocator *allocator) { | |||
| MS_ASSERT(nullptr != tensor); | |||
| MS_ASSERT(nullptr != allocator); | |||
| MS_ASSERT(4 == tensor->shape().size()); | |||
| auto src_format = tensor->GetFormat(); | |||
| if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC) { | |||
| auto *src_data = tensor->Data(); | |||
| if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC) { | |||
| auto *src_data = tensor->MutableData(); | |||
| if (src_data == nullptr) { | |||
| MS_LOG(ERROR) << "MutableData return nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| auto *dst_data = allocator->Malloc(tensor->Size()); | |||
| if (dst_data == nullptr) { | |||
| MS_LOG(ERROR) << "Malloc data failed"; | |||
| @@ -114,18 +116,18 @@ int Executor::TransformTensorLayoutFp32(tensor::Tensor *tensor, schema::Format d | |||
| allocator->Free(src_data); | |||
| return RET_OK; | |||
| } else { | |||
| MS_LOG(ERROR) << "Unsupported layout transform: " << schema::EnumNameFormat(tensor->GetFormat()) << " to " | |||
| << schema::EnumNameFormat(dst_format) << " in float32"; | |||
| MS_LOG(ERROR) << "Unsupported layout transform: " << EnumNameFormat(tensor->GetFormat()) << " to " | |||
| << EnumNameFormat(dst_format) << " in float32"; | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| int Executor::TransformTensorLayoutUint8(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator) { | |||
| int Executor::TransformTensorLayoutUint8(Tensor *tensor, schema::Format dst_format, Allocator *allocator) { | |||
| MS_ASSERT(nullptr != tensor); | |||
| MS_ASSERT(nullptr != allocator); | |||
| MS_ASSERT(4 == tensor->shape().size()); | |||
| MS_LOG(ERROR) << "Unsupported layout transform: " << schema::EnumNameFormat(tensor->GetFormat()) << " to " | |||
| << schema::EnumNameFormat(dst_format) << " in uint8"; | |||
| MS_LOG(ERROR) << "Unsupported layout transform: " << EnumNameFormat(tensor->GetFormat()) << " to " | |||
| << EnumNameFormat(dst_format) << " in uint8"; | |||
| return RET_ERROR; | |||
| } | |||
| } // namespace mindspore::lite | |||
| @@ -30,16 +30,16 @@ class Executor { | |||
| virtual int Prepare(std::vector<kernel::LiteKernel *> &kernels) { return 0; } | |||
| virtual int Run(std::vector<tensor::Tensor *> &in_tensors, std::vector<tensor::Tensor *> &out_tensors, | |||
| std::vector<kernel::LiteKernel *> &kernels, Allocator *allocator = nullptr, | |||
| const session::KernelCallBack &before = nullptr, const session::KernelCallBack &after = nullptr); | |||
| virtual int Run(std::vector<Tensor *> &in_tensors, std::vector<Tensor *> &out_tensors, | |||
| std::vector<kernel::LiteKernel *> &kernels, Allocator *allocator = nullptr, | |||
| const session::KernelCallBack &before = nullptr, const session::KernelCallBack &after = nullptr); | |||
| protected: | |||
| int TransformTensorLayoutFp32(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); | |||
| int TransformTensorLayoutFp32(Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); | |||
| int TransformTensorLayoutUint8(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); | |||
| int TransformTensorLayoutUint8(Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); | |||
| int TransformTensorLayout(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); | |||
| int TransformTensorLayout(Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); | |||
| }; | |||
| } // namespace mindspore::lite | |||
| @@ -1,28 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "ir/meta_tensor.h" | |||
| namespace mindspore { | |||
| namespace tensor { | |||
| abstract::AbstractBasePtr MetaTensor::ToAbstract() { | |||
| MS_LOG(ERROR) << "MetaTensor ToAbstract is not implemented"; | |||
| return nullptr; | |||
| } | |||
| TypePtr MetaTensor::Dtype() const { return nullptr; } | |||
| } // namespace tensor | |||
| } // namespace mindspore | |||
| @@ -96,10 +96,9 @@ bool KernelRegistry::Merge(const std::unordered_map<KernelKey, KernelCreator> &n | |||
| const kernel::KernelCreator *KernelRegistry::GetCreatorArrays() { return creator_arrays_; } | |||
| kernel::LiteKernel *KernelRegistry::GetKernel(const std::vector<tensor::Tensor *> &in_tensors, | |||
| const std::vector<tensor::Tensor *> &out_tensors, | |||
| const PrimitiveC *primitive, const Context *ctx, | |||
| const kernel::KernelKey &key) { | |||
| kernel::LiteKernel *KernelRegistry::GetKernel(const std::vector<Tensor *> &in_tensors, | |||
| const std::vector<Tensor *> &out_tensors, const PrimitiveC *primitive, | |||
| const Context *ctx, const kernel::KernelKey &key) { | |||
| MS_ASSERT(nullptr != primitive); | |||
| MS_ASSERT(nullptr != ctx); | |||
| auto parameter = kernel::PopulateParameter(primitive); | |||
| @@ -44,9 +44,8 @@ class KernelRegistry { | |||
| void RegKernel(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType type, | |||
| kernel::KernelCreator creator); | |||
| bool Merge(const std::unordered_map<kernel::KernelKey, kernel::KernelCreator> &newCreators); | |||
| kernel::LiteKernel *GetKernel(const std::vector<tensor::Tensor *> &in_tensors, | |||
| const std::vector<tensor::Tensor *> &out_tensors, const PrimitiveC *primitive, | |||
| const Context *ctx, const kernel::KernelKey &key); | |||
| kernel::LiteKernel *GetKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, | |||
| const PrimitiveC *primitive, const Context *ctx, const kernel::KernelKey &key); | |||
| protected: | |||
| static const int device_type_length_{kKernelArch_MAX - kKernelArch_MIN + 1}; | |||
| @@ -101,10 +101,9 @@ std::vector<kernel::LiteKernel *> LiteKernelUtil::SubgraphOutputKernels( | |||
| return output_kernels; | |||
| } | |||
| std::vector<lite::tensor::Tensor *> LiteKernelUtil::SubgraphInputTensors( | |||
| const std::vector<kernel::LiteKernel *> &kernels) { | |||
| std::vector<lite::tensor::Tensor *> input_tensors; | |||
| std::vector<lite::tensor::Tensor *> all_output_tensors; | |||
| std::vector<lite::Tensor *> LiteKernelUtil::SubgraphInputTensors(const std::vector<kernel::LiteKernel *> &kernels) { | |||
| std::vector<lite::Tensor *> input_tensors; | |||
| std::vector<lite::Tensor *> all_output_tensors; | |||
| for (const auto &kernel : kernels) { | |||
| all_output_tensors.insert(all_output_tensors.end(), kernel->out_tensors().begin(), kernel->out_tensors().end()); | |||
| } | |||
| @@ -112,7 +111,7 @@ std::vector<lite::tensor::Tensor *> LiteKernelUtil::SubgraphInputTensors( | |||
| for (const auto &kernel : input_kernels) { | |||
| for (const auto &tensor : kernel->in_tensors()) { | |||
| auto iter = std::find(all_output_tensors.begin(), all_output_tensors.end(), tensor); | |||
| if (iter == all_output_tensors.end() && tensor->Data() == nullptr) { | |||
| if (iter == all_output_tensors.end() && tensor->data_c() == nullptr) { | |||
| input_tensors.emplace_back(tensor); | |||
| } | |||
| } | |||
| @@ -120,10 +119,9 @@ std::vector<lite::tensor::Tensor *> LiteKernelUtil::SubgraphInputTensors( | |||
| return input_tensors; | |||
| } | |||
| std::vector<lite::tensor::Tensor *> LiteKernelUtil::SubgraphOutputTensors( | |||
| const std::vector<kernel::LiteKernel *> &kernels) { | |||
| std::vector<lite::tensor::Tensor *> output_tensors; | |||
| std::vector<lite::tensor::Tensor *> all_input_tensors; | |||
| std::vector<lite::Tensor *> LiteKernelUtil::SubgraphOutputTensors(const std::vector<kernel::LiteKernel *> &kernels) { | |||
| std::vector<lite::Tensor *> output_tensors; | |||
| std::vector<lite::Tensor *> all_input_tensors; | |||
| for (const auto &kernel : kernels) { | |||
| all_input_tensors.insert(all_input_tensors.end(), kernel->in_tensors().begin(), kernel->in_tensors().end()); | |||
| } | |||
| @@ -165,5 +163,5 @@ void LiteKernelUtil::InitTensorRefCount(std::vector<kernel::LiteKernel *> &kerne | |||
| } | |||
| } | |||
| int LiteKernelUtil::SetInput(LiteKernel &kernelMod, std::vector<lite::tensor::Tensor *> inputs) { return -1; } | |||
| int LiteKernelUtil::SetInput(LiteKernel &kernelMod, std::vector<lite::Tensor *> inputs) { return -1; } | |||
| } // namespace mindspore::kernel | |||
| @@ -24,7 +24,7 @@ | |||
| #include "src/ops/primitive_c.h" | |||
| #include "nnacl/op_base.h" | |||
| #include "include/context.h" | |||
| #include "src/ir/tensor.h" | |||
| #include "src/tensor.h" | |||
| #include "include/errorcode.h" | |||
| // using mindspore::kernel::AddressPtr; | |||
| @@ -52,8 +52,8 @@ class LiteKernel { | |||
| public: | |||
| LiteKernel() = default; | |||
| // parameter should be deleted or freed by caller, and should be deleted or freed after LiteKernel is deleted | |||
| LiteKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &in_tensors, | |||
| const std::vector<lite::tensor::Tensor *> &out_tensors, const lite::Context *ctx, | |||
| LiteKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &in_tensors, | |||
| const std::vector<lite::Tensor *> &out_tensors, const lite::Context *ctx, | |||
| const mindspore::lite::PrimitiveC *primitive) | |||
| : op_parameter_(parameter), | |||
| in_tensors_(in_tensors), | |||
| @@ -105,13 +105,13 @@ class LiteKernel { | |||
| std::string type_str() { return schema::EnumNamePrimitiveType(this->Type()); } | |||
| void set_in_tensors(const std::vector<lite::tensor::Tensor *> &in_tensors) { this->in_tensors_ = in_tensors; } | |||
| void set_in_tensors(const std::vector<lite::Tensor *> &in_tensors) { this->in_tensors_ = in_tensors; } | |||
| void set_out_tensors(const std::vector<lite::tensor::Tensor *> &out_tensors) { this->out_tensors_ = out_tensors; } | |||
| void set_out_tensors(const std::vector<lite::Tensor *> &out_tensors) { this->out_tensors_ = out_tensors; } | |||
| std::vector<lite::tensor::Tensor *> &in_tensors() { return this->in_tensors_; } | |||
| std::vector<lite::Tensor *> &in_tensors() { return this->in_tensors_; } | |||
| std::vector<lite::tensor::Tensor *> &out_tensors() { return this->out_tensors_; } | |||
| std::vector<lite::Tensor *> &out_tensors() { return this->out_tensors_; } | |||
| void AddInKernel(LiteKernel *kernel) { this->in_kernels_.emplace_back(kernel); } | |||
| @@ -142,8 +142,8 @@ class LiteKernel { | |||
| std::string name_; | |||
| OpParameter *op_parameter_ = nullptr; | |||
| // tensor will free in ~lite_session() | |||
| std::vector<lite::tensor::Tensor *> in_tensors_; | |||
| std::vector<lite::tensor::Tensor *> out_tensors_; | |||
| std::vector<lite::Tensor *> in_tensors_; | |||
| std::vector<lite::Tensor *> out_tensors_; | |||
| const mindspore::lite::PrimitiveC *primitive_ = nullptr; | |||
| const lite::Context *context_ = nullptr; | |||
| std::vector<LiteKernel *> in_kernels_; | |||
| @@ -154,8 +154,7 @@ class LiteKernel { | |||
| class SubGraphKernel : public LiteKernel { | |||
| public: | |||
| explicit SubGraphKernel(const std::vector<lite::tensor::Tensor *> &inputs, | |||
| const std::vector<lite::tensor::Tensor *> &outputs, | |||
| explicit SubGraphKernel(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, | |||
| const std::vector<kernel::LiteKernel *> &in_kernels, | |||
| const std::vector<kernel::LiteKernel *> &out_kernels, | |||
| const std::vector<kernel::LiteKernel *> &nodes, const lite::Context *ctx, | |||
| @@ -174,8 +173,8 @@ class SubGraphKernel : public LiteKernel { | |||
| std::vector<LiteKernel *> nodes_; | |||
| }; | |||
| typedef LiteKernel *(*KernelCreator)(const std::vector<lite::tensor::Tensor *> &inputs, | |||
| const std::vector<lite::tensor::Tensor *> &outputs, OpParameter *parameter, | |||
| typedef LiteKernel *(*KernelCreator)(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, | |||
| const lite::Context *ctx, const KernelKey &desc, | |||
| const mindspore::lite::PrimitiveC *primitive); | |||
| @@ -187,13 +186,13 @@ class LiteKernelUtil { | |||
| static std::vector<kernel::LiteKernel *> SubgraphOutputKernels(const std::vector<kernel::LiteKernel *> &kernels); | |||
| static std::vector<lite::tensor::Tensor *> SubgraphInputTensors(const std::vector<kernel::LiteKernel *> &kernels); | |||
| static std::vector<lite::Tensor *> SubgraphInputTensors(const std::vector<kernel::LiteKernel *> &kernels); | |||
| static std::vector<lite::tensor::Tensor *> SubgraphOutputTensors(const std::vector<kernel::LiteKernel *> &kernels); | |||
| static std::vector<lite::Tensor *> SubgraphOutputTensors(const std::vector<kernel::LiteKernel *> &kernels); | |||
| static void InitTensorRefCount(std::vector<kernel::LiteKernel *> &kernels); | |||
| static int SetInput(LiteKernel &kernelMod, std::vector<lite::tensor::Tensor *> inputs); | |||
| static int SetInput(LiteKernel &kernelMod, std::vector<lite::Tensor *> inputs); | |||
| }; | |||
| } // namespace mindspore::kernel | |||
| @@ -34,9 +34,8 @@ namespace mindspore { | |||
| namespace lite { | |||
| static std::vector<schema::PrimitiveType> packed_op = { | |||
| schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D, | |||
| schema::PrimitiveType_DepthwiseConv2D, schema::PrimitiveType_DeDepthwiseConv2D, | |||
| schema::PrimitiveType_MatMul}; | |||
| schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D, schema::PrimitiveType_DepthwiseConv2D, | |||
| schema::PrimitiveType_DeDepthwiseConv2D, schema::PrimitiveType_MatMul}; | |||
| // this method will not check whether tensor_idx is a weight tensor index, caller should ensure this. | |||
| static bool WeightTensorNeedCopy(const lite::Model *model, const uint32_t tensor_idx) { | |||
| @@ -63,7 +62,7 @@ int LiteSession::ConvertTensors(const lite::Model *model) { | |||
| if (srcTensor->dims() == nullptr) { | |||
| MS_LOG(DEBUG) << "Dims of " << i << "th tensor is nullptr"; | |||
| } else { | |||
| if (srcTensor->nodeType() == schema::NodeType_ValueNode) { | |||
| if (TensorCategory(srcTensor) == Tensor::Category::CONST) { | |||
| for (size_t j = 0; j < srcTensor->dims()->size(); j++) { | |||
| shape.push_back(srcTensor->dims()->data()[j]); | |||
| } | |||
| @@ -71,12 +70,12 @@ int LiteSession::ConvertTensors(const lite::Model *model) { | |||
| } | |||
| int dataType = srcTensor->dataType(); | |||
| auto *dstTensor = | |||
| new (std::nothrow) tensor::Tensor(TypeId(dataType), shape, srcTensor->format(), srcTensor->nodeType()); | |||
| new (std::nothrow) Tensor(TypeId(dataType), shape, srcTensor->format(), TensorCategory(srcTensor)); | |||
| if (dstTensor == nullptr) { | |||
| MS_LOG(ERROR) << "new " << i << "th tensor failed"; | |||
| return RET_NULL_PTR; | |||
| } | |||
| if (srcTensor->nodeType() == schema::NodeType_ValueNode && srcTensor->data() != nullptr && | |||
| if (TensorCategory(srcTensor) == Tensor::Category::CONST && srcTensor->data() != nullptr && | |||
| srcTensor->data()->size() > 0) { | |||
| if (shape.empty()) { | |||
| shape.push_back(1); | |||
| @@ -84,12 +83,12 @@ int LiteSession::ConvertTensors(const lite::Model *model) { | |||
| } | |||
| MS_ASSERT(dstTensor->Size() == srcTensor->data()->size()); | |||
| if (WeightTensorNeedCopy(model, i)) { | |||
| auto ret = dstTensor->MallocData(); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "Malloc data for " << i << "th tensor failed"; | |||
| auto dst_data = dstTensor->MutableData(); | |||
| if (dst_data == nullptr) { | |||
| MS_LOG(ERROR) << "MutableData from " << i << "th tensor is nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| memcpy(dstTensor->Data(), srcTensor->data()->data(), dstTensor->Size()); | |||
| memcpy(dst_data, srcTensor->data()->data(), dstTensor->Size()); | |||
| copyed_tensor_idxes_.emplace_back(i); | |||
| } else { | |||
| dstTensor->SetData(const_cast<unsigned char *>(srcTensor->data()->data())); | |||
| @@ -98,7 +97,7 @@ int LiteSession::ConvertTensors(const lite::Model *model) { | |||
| auto quant_params = srcTensor->quantParams(); | |||
| if (quant_params != nullptr) { | |||
| for (size_t j = 0; j < quant_params->size(); j++) { | |||
| tensor::QuantArg quant_arg{}; | |||
| QuantArg quant_arg{}; | |||
| quant_arg.scale = quant_params->Get(j)->scale(); | |||
| quant_arg.zeroPoint = quant_params->Get(j)->zeroPoint(); | |||
| dstTensor->AddQuantParam(quant_arg); | |||
| @@ -126,7 +125,7 @@ void LiteSession::InitGraphInputMSTensors() { | |||
| MS_ASSERT(this->input_vec_.empty()); | |||
| for (auto &input_tensor : this->inputs_) { | |||
| MS_ASSERT(input_tensor != nullptr); | |||
| this->input_vec_.emplace_back(new lite::tensor::LiteTensor(input_tensor)); | |||
| this->input_vec_.emplace_back(input_tensor); | |||
| } | |||
| } | |||
| @@ -168,13 +167,11 @@ void LiteSession::InitGraphInputMap(const lite::Model *model) { | |||
| } | |||
| MS_ASSERT(in_tensor_index < this->tensors_.size()); | |||
| auto *in_tensor = this->tensors_.at(in_tensor_index); | |||
| MS_ASSERT(in_tensor != nullptr); | |||
| auto *ms_tensor = new (std::nothrow) tensor::LiteTensor(in_tensor); | |||
| if (ms_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "new lite tensor fail!"; | |||
| if (in_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "in_tensor is null!"; | |||
| return; | |||
| } | |||
| this->input_map_[in_node->name_].emplace_back(ms_tensor); | |||
| this->input_map_[in_node->name_].emplace_back(in_tensor); | |||
| } | |||
| } | |||
| } | |||
| @@ -202,13 +199,11 @@ void LiteSession::InitGraphOutputNodeMap(const lite::Model *model) { | |||
| } | |||
| MS_ASSERT(out_tensor_index < this->tensors_.size()); | |||
| auto *out_tensor = this->tensors_.at(out_tensor_index); | |||
| MS_ASSERT(out_tensor != nullptr); | |||
| auto *ms_tensor = new (std::nothrow) tensor::LiteTensor(out_tensor); | |||
| if (ms_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "new lite tensor fail!"; | |||
| if (out_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "out_tensor is null!"; | |||
| return; | |||
| } | |||
| this->output_node_map_[out_node->name_].emplace_back(ms_tensor); | |||
| this->output_node_map_[out_node->name_].emplace_back(out_tensor); | |||
| } | |||
| } | |||
| } | |||
| @@ -230,13 +225,11 @@ void LiteSession::InitGraphOutputTensorMap(const lite::Model *model) { | |||
| size_t graph_out_index = model->output_indices_[i]; | |||
| MS_ASSERT(graph_out_index < this->tensors_.size()); | |||
| auto *out_tensor = this->tensors_.at(graph_out_index); | |||
| MS_ASSERT(out_tensor != nullptr); | |||
| auto *ms_tensor = new (std::nothrow) tensor::LiteTensor(out_tensor); | |||
| if (ms_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "new lite tensor fail!"; | |||
| if (out_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "out_tensor is null!"; | |||
| return; | |||
| } | |||
| this->output_tensor_map_.insert(std::make_pair(std::to_string(graph_out_index), ms_tensor)); | |||
| this->output_tensor_map_.insert(std::make_pair(std::to_string(graph_out_index), out_tensor)); | |||
| } | |||
| } | |||
| @@ -291,13 +284,20 @@ int LiteSession::RunGraph(const session::KernelCallBack &before, const session:: | |||
| int LiteSession::Init(Context *context) { | |||
| MS_ASSERT(nullptr != context); | |||
| this->context_ = new (std::nothrow) Context(context->thread_num_, context->allocator, context->device_ctx_); | |||
| this->context_ = new (std::nothrow) Context(); | |||
| if (this->context_ == nullptr) { | |||
| MS_LOG(ERROR) << "new context failed"; | |||
| return RET_MEMORY_FAILED; | |||
| } | |||
| // context->thread_num_, context->allocator, context->device_ctx | |||
| this->context_->thread_num_ = context->thread_num_; | |||
| this->context_->allocator = context->allocator; | |||
| this->context_->device_type_ = context->device_type_; | |||
| this->context_->float16_priority = context->float16_priority; | |||
| this->context_->cpu_bind_mode_ = context->cpu_bind_mode_; | |||
| if (context_->allocator == nullptr) { | |||
| context_->allocator = Allocator::Create(); | |||
| } | |||
| ConfigThreadPool(THREAD_POOL_DEFAULT, context->thread_num_, context->cpu_bind_mode_); | |||
| auto ret = KernelRegistry::GetInstance()->Init(); | |||
| if (ret != RET_OK) { | |||
| @@ -305,7 +305,7 @@ int LiteSession::Init(Context *context) { | |||
| return ret; | |||
| } | |||
| #if SUPPORT_GPU | |||
| if (context_->device_ctx_.type == DT_GPU) { | |||
| if (context_->device_type_ == DT_GPU) { | |||
| auto opencl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); | |||
| opencl_runtime->Init(); | |||
| MS_LOG(INFO) << "Init OpenCL runtime."; | |||
| @@ -330,46 +330,22 @@ LiteSession::~LiteSession() { | |||
| auto *tensor = tensors_.at(i); | |||
| MS_ASSERT(tensor != nullptr); | |||
| // data of weight tensor of node in packed_op can not be to free, we will free weight data when freeing meta_graph | |||
| if (tensor->TensorType() == schema::NodeType_ValueNode && !IsContain(this->inputs_, tensor) && | |||
| if (tensor->category() == Tensor::Category::CONST && !IsContain(this->inputs_, tensor) && | |||
| !IsContain(copyed_tensor_idxes_, i)) { | |||
| tensor->SetData(nullptr); | |||
| } | |||
| delete tensor; | |||
| } | |||
| // tensor::Tensor * in input_map output_map are freed in tensors | |||
| for (auto iter : this->input_map_) { | |||
| for (auto *ms_tensor : iter.second) { | |||
| ((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr); | |||
| delete ms_tensor; | |||
| } | |||
| iter.second.clear(); | |||
| } | |||
| // Tensor * in input_map output_map are freed in tensors | |||
| input_map_.clear(); | |||
| for (auto iter : this->output_node_map_) { | |||
| for (auto *ms_tensor : iter.second) { | |||
| ((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr); | |||
| delete ms_tensor; | |||
| } | |||
| iter.second.clear(); | |||
| } | |||
| output_node_map_.clear(); | |||
| for (auto iter : this->output_tensor_map_) { | |||
| ((tensor::LiteTensor *)(iter.second))->SetTensorImpl(nullptr); | |||
| delete (iter.second); | |||
| } | |||
| output_tensor_map_.clear(); | |||
| input_vec_.clear(); | |||
| for (auto *kernel : kernels_) { | |||
| delete kernel; | |||
| } | |||
| for (auto *ms_tensor : input_vec_) { | |||
| if (ms_tensor != nullptr) { | |||
| ((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr); | |||
| delete ms_tensor; | |||
| } | |||
| } | |||
| input_vec_.clear(); | |||
| #if SUPPORT_GPU | |||
| if (context_->device_ctx_.type == DT_GPU) { | |||
| if (context_->device_type_ == DT_GPU) { | |||
| lite::opencl::OpenCLRuntime::DeleteInstance(); | |||
| } | |||
| #endif | |||
| @@ -388,10 +364,6 @@ std::vector<mindspore::tensor::MSTensor *> LiteSession::GetInputsByName(const st | |||
| return ret->second; | |||
| } | |||
| std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> LiteSession::GetOutputMapByNode() const { | |||
| return this->output_node_map_; | |||
| } | |||
| std::vector<mindspore::tensor::MSTensor *> LiteSession::GetOutputsByNodeName(const std::string &node_name) const { | |||
| auto ret = output_node_map_.find(node_name); | |||
| if (ret == output_node_map_.end()) { | |||
| @@ -413,7 +385,7 @@ mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const std::strin | |||
| return ret->second; | |||
| } | |||
| std::unordered_map<std::string, mindspore::tensor::MSTensor *> LiteSession::GetOutputMapByTensor() const { | |||
| std::unordered_map<std::string, mindspore::tensor::MSTensor *> LiteSession::GetOutputs() const { | |||
| return this->output_tensor_map_; | |||
| } | |||
| @@ -434,7 +406,7 @@ int LiteSession::ResizeInputs(const std::vector<mindspore::tensor::MSTensor *> & | |||
| } | |||
| int LiteSession::Resize(const std::vector<mindspore::tensor::MSTensor *> &inputs) { | |||
| std::vector<tensor::Tensor *> inputs_old(inputs_); | |||
| std::vector<Tensor *> inputs_old(inputs_); | |||
| auto ret = ResizeInputs(inputs); | |||
| if (ret != RET_OK) { | |||
| inputs_ = inputs_old; | |||
| @@ -28,6 +28,7 @@ | |||
| #include "include/context.h" | |||
| #include "schema/model_generated.h" | |||
| #include "src/executor.h" | |||
| #include "src/tensor.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| @@ -50,15 +51,13 @@ class LiteSession : public session::LiteSession { | |||
| int RunGraph(const session::KernelCallBack &before = nullptr, | |||
| const session::KernelCallBack &after = nullptr) override; | |||
| std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> GetOutputMapByNode() const override; | |||
| std::vector<mindspore::tensor::MSTensor *> GetOutputsByNodeName(const std::string &node_name) const override; | |||
| std::vector<std::string> GetOutputTensorNames() const override; | |||
| mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const override; | |||
| std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputMapByTensor() const override; | |||
| std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputs() const override; | |||
| int Resize(const std::vector<mindspore::tensor::MSTensor *> &inputs) override; | |||
| @@ -86,12 +85,12 @@ class LiteSession : public session::LiteSession { | |||
| protected: | |||
| Context *context_ = nullptr; | |||
| std::vector<kernel::LiteKernel *> kernels_; | |||
| std::vector<tensor::Tensor *> tensors_; | |||
| std::vector<Tensor *> tensors_; | |||
| std::vector<size_t> copyed_tensor_idxes_; | |||
| // graph input tensors | |||
| std::vector<tensor::Tensor *> inputs_; | |||
| std::vector<Tensor *> inputs_; | |||
| // graph output tensors | |||
| std::vector<tensor::Tensor *> outputs_; | |||
| std::vector<Tensor *> outputs_; | |||
| // graph input MSTensors | |||
| std::vector<mindspore::tensor::MSTensor *> input_vec_; | |||
| // graph input node name -- input tensors | |||
| @@ -30,7 +30,7 @@ bool ConvertNodes(const schema::MetaGraph *meta_graph, Model *model) { | |||
| } | |||
| auto c_node = meta_graph->nodes()->GetAs<schema::CNode>(i); | |||
| auto src_prim = c_node->primitive(); | |||
| node->primitive_ = PrimitiveC::UnPackFromSchemaPrimitive(const_cast<schema::Primitive *>(src_prim)); | |||
| node->primitive_ = PrimitiveC::Create(const_cast<schema::Primitive *>(src_prim)); | |||
| if (node->primitive_ == nullptr) { | |||
| MS_LOG(ERROR) << "unpack primitive == nullptr!"; | |||
| return false; | |||
| @@ -56,7 +56,7 @@ bool ConvertTensors(const schema::MetaGraph *meta_graph, Model *model) { | |||
| for (uint32_t i = 0; i < tensor_count; ++i) { | |||
| auto *tensor = meta_graph->allTensors()->GetAs<schema::Tensor>(i); | |||
| if (tensor == nullptr) { | |||
| MS_LOG(ERROR) << i << "th tensor in model is nullptr"; | |||
| MS_LOG(ERROR) << i << "th tensor in model is nullptr"; | |||
| return false; | |||
| } | |||
| model->all_tensors_.push_back(const_cast<mindspore::schema::Tensor *>(tensor)); | |||
| @@ -44,7 +44,7 @@ int AddN::GetN() const { return this->primitive_->value_as_AddN()->N(); } | |||
| namespace { | |||
| constexpr int kLeastInputNum = 2; | |||
| } | |||
| int AddN::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) { | |||
| int AddN::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| auto input = inputs.front(); | |||
| MS_ASSERT(input != nullptr); | |||
| @@ -37,7 +37,7 @@ class AddN : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetN() const; | |||
| }; | |||
| } // namespace lite | |||
| @@ -17,7 +17,6 @@ | |||
| namespace mindspore { | |||
| namespace lite { | |||
| #ifdef PRIMITIVE_WRITEABLE | |||
| #else | |||
| @@ -36,7 +35,7 @@ int ApplyMomentum::UnPackToFlatBuilder(const schema::Primitive *primitive, flatb | |||
| } | |||
| #endif | |||
| int ApplyMomentum::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) { | |||
| int ApplyMomentum::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { | |||
| if (5 != inputs.size()) { | |||
| MS_LOG(ERROR) << "ApplyMomentum should have at 5 input tensors"; | |||
| return RET_ERROR; | |||
| @@ -36,7 +36,7 @@ class ApplyMomentum : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -54,7 +54,7 @@ int ArgMax::GetAxisType() const { return this->primitive_->value_as_ArgMax()->ax | |||
| #endif | |||
| int ArgMax::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::Tensor *> outputs_) { | |||
| int ArgMax::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| auto input = inputs_.front(); | |||
| MS_ASSERT(input != nullptr); | |||
| @@ -41,7 +41,7 @@ class ArgMax : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetAxis() const; | |||
| bool GetOutMaxValue() const; | |||
| int GetTopK() const; | |||
| @@ -54,7 +54,7 @@ int ArgMin::GetAxisType() const { return this->primitive_->value_as_ArgMin()->ax | |||
| #endif | |||
| int ArgMin::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) { | |||
| int ArgMin::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| auto input = inputs_.front(); | |||
| MS_ASSERT(input != nullptr); | |||
| @@ -41,7 +41,7 @@ class ArgMin : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetAxis() const; | |||
| bool GetOutMaxValue() const; | |||
| int GetTopK() const; | |||
| @@ -17,11 +17,11 @@ | |||
| #include "src/ops/arithmetic.h" | |||
| #include "include/errorcode.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/ir/tensor.h" | |||
| #include "src/tensor.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) { | |||
| int Arithmetic::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| if (inputs_.size() != kDoubleNum) { | |||
| MS_LOG(ERROR) << "The number of input must be " << kDoubleNum; | |||
| @@ -38,7 +38,7 @@ class Arithmetic : public PrimitiveC { | |||
| return RET_ERROR; | |||
| } | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| bool Broadcasting() { return this->broadcasting_; } | |||
| int NDims() { return this->ndim_; } | |||
| std::vector<int> InShape0() { return this->in_shape0_; } | |||
| @@ -17,12 +17,11 @@ | |||
| #include "src/ops/arithmetic_grad.h" | |||
| #include "include/errorcode.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/ir/tensor.h" | |||
| #include "src/tensor.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| int ArithmeticGrad::InferShape(std::vector<lite::tensor::Tensor *> inputs_, | |||
| std::vector<lite::tensor::Tensor *> outputs_) { | |||
| int ArithmeticGrad::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { | |||
| if (inputs_.size() != 3) { | |||
| MS_LOG(ERROR) << "The number of input must be 3"; | |||
| return RET_ERROR; | |||
| @@ -38,7 +38,7 @@ class ArithmeticGrad : public PrimitiveC { | |||
| return RET_ERROR; | |||
| } | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| bool Broadcasting() { return this->broadcasting_; } | |||
| int NDims() { return this->ndim_; } | |||
| std::vector<int> dyShape() { return this->dy_shape_; } | |||
| @@ -21,7 +21,7 @@ | |||
| namespace mindspore { | |||
| namespace lite { | |||
| int ArithmeticSelf::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::Tensor *> outputs_) { | |||
| int ArithmeticSelf::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| auto input = inputs_.front(); | |||
| MS_ASSERT(input != nullptr); | |||
| @@ -35,7 +35,7 @@ class ArithmeticSelf : public PrimitiveC { | |||
| return RET_ERROR; | |||
| } | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -18,7 +18,7 @@ | |||
| #include "src/common/common.h" | |||
| #include "include/errorcode.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/ir/tensor.h" | |||
| #include "src/tensor.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| @@ -74,7 +74,7 @@ constexpr int kBlockShapeSize = 2; | |||
| constexpr int kCropsSize = 4; | |||
| } // namespace | |||
| int BatchToSpace::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector<lite::tensor::Tensor *> outputs) { | |||
| int BatchToSpace::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| if (outputs.size() != kBatchToSpaceOutputNum || inputs.size() != kBatchToSpaceInputNum) { | |||
| MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); | |||
| @@ -82,7 +82,7 @@ int BatchToSpace::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::ve | |||
| } | |||
| auto input = inputs.at(0); | |||
| if (input->GetFormat() != schema::Format_NHWC) { | |||
| if (input->GetFormat() != schema::Format::Format_NHWC) { | |||
| MS_LOG(ERROR) << "batch_to_space only support NHWC now!"; | |||
| return RET_FORMAT_ERR; | |||
| } | |||
| @@ -39,7 +39,7 @@ class BatchToSpace : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| std::vector<int> GetBlockShape() const; | |||
| std::vector<int> GetCrops() const; | |||
| }; | |||
| @@ -75,7 +75,7 @@ std::vector<int> BiasGrad::GetAxis() const { | |||
| return std::vector<int>(fb_vector->begin(), fb_vector->end()); | |||
| } | |||
| int BiasGrad::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) { | |||
| int BiasGrad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { | |||
| if (1 != inputs.size()) { | |||
| MS_LOG(ERROR) << "BiasGrad should have one input"; | |||
| return RET_ERROR; | |||
| @@ -100,7 +100,6 @@ int BiasGrad::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tenso | |||
| return RET_OK; | |||
| } | |||
| #endif | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -38,7 +38,7 @@ class BiasGrad : public PrimitiveC { | |||
| BiasGrad() = default; | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| int InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) override; | |||
| int InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) override; | |||
| #endif | |||
| std::vector<int> GetAxis() const; | |||
| }; | |||
| @@ -56,7 +56,7 @@ constexpr int kBroadcastToInputNum = 1; | |||
| constexpr int kBroadcastToOutputNum = 1; | |||
| } // namespace | |||
| int BroadcastTo::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) { | |||
| int BroadcastTo::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { | |||
| if (inputs.size() != kBroadcastToInputNum || outputs.size() != kBroadcastToOutputNum) { | |||
| MS_LOG(ERROR) << "input size:" << inputs.size() << ", output size:" << outputs.size(); | |||
| return RET_PARAM_INVALID; | |||
| @@ -39,7 +39,7 @@ class BroadcastTo : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| std::vector<int> GetDstShape() const; | |||
| }; | |||
| } // namespace lite | |||
| @@ -44,7 +44,7 @@ int Cast::GetDstT() const { return this->primitive_->value_as_Cast()->dstT(); } | |||
| #endif | |||
| int Cast::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::Tensor *> outputs_) { | |||
| int Cast::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| auto input = inputs_.front(); | |||
| MS_ASSERT(input != nullptr); | |||
| @@ -38,7 +38,7 @@ class Cast : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetSrcT() const; | |||
| int GetDstT() const; | |||
| }; | |||
| @@ -18,7 +18,7 @@ | |||
| #include <memory> | |||
| #include "include/errorcode.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/ir/tensor.h" | |||
| #include "src/tensor.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| @@ -81,7 +81,7 @@ int Concat::GetN() const { return this->primitive_->value_as_Concat()->n(); } | |||
| namespace { | |||
| constexpr int kConcatOutputNum = 1; | |||
| } | |||
| int Concat::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::Tensor *> outputs_) { | |||
| int Concat::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | |||
| if (this->primitive_ == nullptr) { | |||
| MS_LOG(ERROR) << "primitive is nullptr!"; | |||
| return RET_PARAM_INVALID; | |||
| @@ -39,7 +39,7 @@ class Concat : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetAxis() const; | |||
| int GetN() const; | |||
| }; | |||
| @@ -17,7 +17,7 @@ | |||
| #include "src/ops/constant_of_shape.h" | |||
| #include "include/errorcode.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/ir/tensor.h" | |||
| #include "src/tensor.h" | |||
| namespace mindspore::lite { | |||
| namespace { | |||
| @@ -47,7 +47,7 @@ float ConstantOfShape::GetValue() const { return this->primitive_->value_as_Cons | |||
| #endif | |||
| int ConstantOfShape::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::Tensor *> outputs_) { | |||
| int ConstantOfShape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | |||
| if (inputs_.size() != kShapeInputNum) { | |||
| MS_LOG(ERROR) << "inputs to ConstantOfShape operator should be 1, but " << inputs_.size() << " is given."; | |||
| return RET_ERROR; | |||
| @@ -67,7 +67,7 @@ int ConstantOfShape::InferShape(std::vector<tensor::Tensor *> inputs_, std::vect | |||
| if (!GetInferFlag()) { | |||
| return RET_OK; | |||
| } | |||
| auto in_data = reinterpret_cast<int *>(in_tensor->Data()); | |||
| auto in_data = reinterpret_cast<int *>(in_tensor->MutableData()); | |||
| int size = in_tensor->ElementsNum(); | |||
| std::vector<int> out_shape(size); | |||
| for (int i = 0; i < size; ++i) { | |||
| @@ -37,7 +37,7 @@ class ConstantOfShape : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| float GetValue() const; | |||
| }; | |||
| } // namespace lite | |||
| @@ -128,11 +128,11 @@ void Conv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT | |||
| auto attr = std::make_unique<schema::DepthwiseConv2DT>(); | |||
| auto format = GetValue<std::string>(prim.GetAttr("data_format")); | |||
| if (format == "NCHW") { | |||
| attr->format = schema::Format_NCHW; | |||
| attr->format = schema::Format::Format_NCHW; | |||
| } else if (format == "NHWC") { | |||
| attr->format = schema::Format_NHWC; | |||
| attr->format = schema::Format::Format_NHWC; | |||
| } else { | |||
| attr->format = schema::Format_NUM_OF_FORMAT; | |||
| attr->format = schema::Format::Format_NUM_OF_FORMAT; | |||
| } | |||
| auto pad_list = GetValue<std::vector<int>>(prim.GetAttr("pad_list")); | |||
| attr->padUp = pad_list[0]; | |||
| @@ -191,11 +191,11 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive | |||
| attr->group = group; | |||
| auto format = GetValue<std::string>(prim.GetAttr("data_format")); | |||
| if (format == "NCHW") { | |||
| attr->format = schema::Format_NCHW; | |||
| attr->format = schema::Format::Format_NCHW; | |||
| } else if (format == "NHWC") { | |||
| attr->format = schema::Format_NHWC; | |||
| attr->format = schema::Format::Format_NHWC; | |||
| } else { | |||
| attr->format = schema::Format_NUM_OF_FORMAT; | |||
| attr->format = schema::Format::Format_NUM_OF_FORMAT; | |||
| } | |||
| auto pad_list = GetValue<std::vector<int>>(prim.GetAttr("pad_list")); | |||
| attr->padUp = pad_list[0]; | |||
| @@ -346,7 +346,7 @@ void Conv2D::ConvInferShape(int input_h, int input_w, int *output_h, int *output | |||
| } | |||
| } | |||
| int Conv2D::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::Tensor *> outputs_) { | |||
| int Conv2D::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | |||
| if (inputs_.size() != 2 && inputs_.size() != 3) { | |||
| MS_LOG(ERROR) << "Add should has two or three inputs"; | |||
| return RET_ERROR; | |||
| @@ -66,7 +66,7 @@ class Conv2D : public PrimitiveC { | |||
| #endif | |||
| public: | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int PadUp() const; | |||
| int PadDown() const; | |||
| int PadLeft() const; | |||
| @@ -232,7 +232,7 @@ int Conv2DGradFilter::GetActivationType() const { | |||
| #endif | |||
| int Conv2DGradFilter::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) { | |||
| int Conv2DGradFilter::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { | |||
| if (3 != inputs.size()) { | |||
| MS_LOG(ERROR) << "Conv2d Grad Filter should have 3 inputs"; | |||
| return RET_ERROR; | |||
| @@ -247,7 +247,7 @@ int Conv2DGradFilter::InferShape(std::vector<tensor::Tensor *> inputs, std::vect | |||
| MS_ASSERT(out != nullptr); | |||
| std::vector<int> output_shape; | |||
| int *out_shape = reinterpret_cast<int *>(in->Data()); | |||
| int *out_shape = reinterpret_cast<int *>(in->MutableData()); | |||
| int new_size = in->ElementsNum(); | |||
| if (in0->GetFormat() == in->GetFormat()) { | |||
| for (int i = 0; i < new_size; i++) output_shape.push_back(out_shape[i]); | |||
| @@ -59,7 +59,7 @@ class Conv2DGradFilter : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetFormat() const; | |||
| int GetGroup() const; | |||
| int GetChannelIn() const; | |||
| @@ -230,7 +230,7 @@ int Conv2DGradInput::GetActivationType() const { | |||
| #endif | |||
| int Conv2DGradInput::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) { | |||
| int Conv2DGradInput::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { | |||
| if (3 != inputs.size()) { | |||
| MS_LOG(ERROR) << "Conv2d Grad Input should have 3 inputs"; | |||
| return RET_ERROR; | |||
| @@ -245,7 +245,7 @@ int Conv2DGradInput::InferShape(std::vector<tensor::Tensor *> inputs, std::vecto | |||
| MS_ASSERT(out != nullptr); | |||
| std::vector<int> output_shape; | |||
| int *out_shape = reinterpret_cast<int *>(in->Data()); | |||
| int *out_shape = reinterpret_cast<int *>(in->MutableData()); | |||
| int new_size = in->ElementsNum(); | |||
| if (in0->GetFormat() == in->GetFormat()) { | |||
| for (int i = 0; i < new_size; i++) output_shape.push_back(out_shape[i]); | |||
| @@ -59,7 +59,7 @@ class Conv2DGradInput : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetFormat() const; | |||
| int GetGroup() const; | |||
| int GetChannelIn() const; | |||
| @@ -56,7 +56,7 @@ namespace { | |||
| constexpr int kCropOutputNum = 1; | |||
| constexpr int kCropInputNum = 2; | |||
| } // namespace | |||
| int Crop::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) { | |||
| int Crop::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { | |||
| if (outputs.size() != kCropOutputNum || inputs.size() != kCropInputNum) { | |||
| MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); | |||
| return RET_PARAM_INVALID; | |||
| @@ -39,7 +39,7 @@ class Crop : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int64_t GetAxis() const; | |||
| std::vector<int64_t> GetOffsets() const; | |||
| }; | |||
| @@ -93,7 +93,7 @@ bool DeConv2D::GetHasBias() const { return this->primitive_->value_as_DeConv2D() | |||
| int DeConv2D::GetActivationType() const { return this->primitive_->value_as_DeConv2D()->activationType(); } | |||
| #endif | |||
| int DeConv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) { | |||
| int DeConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| auto input = inputs_.front(); | |||
| MS_ASSERT(input != nullptr); | |||
| @@ -53,7 +53,7 @@ class DeConv2D : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetFormat() const; | |||
| int GetGroup() const; | |||
| int GetChannelIn() const; | |||
| @@ -110,8 +110,7 @@ int DeDepthwiseConv2D::GetActivationType() const { | |||
| } | |||
| #endif | |||
| int DeDepthwiseConv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_, | |||
| std::vector<lite::tensor::Tensor *> outputs_) { | |||
| int DeDepthwiseConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { | |||
| if (inputs_.size() != kDoubleNum && inputs_.size() != kMultiNum) { | |||
| MS_LOG(ERROR) << "inputs number is invalid"; | |||
| return 1; | |||
| @@ -52,7 +52,7 @@ class DeDepthwiseConv2D : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetFormat() const; | |||
| int GetChannelIn() const; | |||
| int GetChannelMultiplier() const; | |||
| @@ -48,7 +48,7 @@ constexpr int kDepthToSpaceOutputNum = 1; | |||
| constexpr int kDepthToSpaceInputNum = 1; | |||
| } // namespace | |||
| int DepthToSpace::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector<lite::tensor::Tensor *> outputs) { | |||
| int DepthToSpace::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| if (outputs.size() != kDepthToSpaceOutputNum || inputs.size() != kDepthToSpaceInputNum) { | |||
| MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); | |||
| @@ -56,7 +56,7 @@ int DepthToSpace::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::ve | |||
| } | |||
| auto input = inputs.at(0); | |||
| if (input->GetFormat() != schema::Format_NHWC) { | |||
| if (input->GetFormat() != schema::Format::Format_NHWC) { | |||
| MS_LOG(ERROR) << "depth_to_space only support NHWC now!"; | |||
| return RET_FORMAT_ERR; | |||
| } | |||
| @@ -38,7 +38,7 @@ class DepthToSpace : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetBlockSize() const; | |||
| int GetFormat() const; | |||
| }; | |||
| @@ -76,11 +76,11 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNode | |||
| auto format = GetValue<std::string>(prim.GetAttr("data_format")); | |||
| if (format == "NCHW") { | |||
| attr->format = schema::Format_NCHW; | |||
| attr->format = schema::Format::Format_NCHW; | |||
| } else if (format == "NHWC") { | |||
| attr->format = schema::Format_NHWC; | |||
| attr->format = schema::Format::Format_NHWC; | |||
| } else { | |||
| attr->format = schema::Format_NUM_OF_FORMAT; | |||
| attr->format = schema::Format::Format_NUM_OF_FORMAT; | |||
| } | |||
| auto pad_list = GetValue<std::vector<int>>(prim.GetAttr("pads")); | |||
| attr->padUp = pad_list[0]; | |||
| @@ -188,8 +188,7 @@ int DepthwiseConv2D::GetActivationType() const { | |||
| } | |||
| #endif | |||
| int DepthwiseConv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_, | |||
| std::vector<lite::tensor::Tensor *> outputs_) { | |||
| int DepthwiseConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { | |||
| if (inputs_.size() != kDoubleNum && inputs_.size() != kMultiNum) { | |||
| MS_LOG(ERROR) << "inputs number is invalid"; | |||
| return 1; | |||
| @@ -60,7 +60,7 @@ class DepthwiseConv2D : public PrimitiveC { | |||
| #endif | |||
| public: | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetFormat() const; | |||
| int GetChannelIn() const; | |||
| int GetChannelMultiplier() const; | |||
| @@ -144,8 +144,7 @@ namespace { | |||
| constexpr int kDetectionPostProcessOutputNum = 4; | |||
| constexpr int kDetectionPostProcessInputNum = 3; | |||
| } // namespace | |||
| int DetectionPostProcess::InferShape(std::vector<lite::tensor::Tensor *> inputs_, | |||
| std::vector<lite::tensor::Tensor *> outputs_) { | |||
| int DetectionPostProcess::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { | |||
| if (outputs_.size() != kDetectionPostProcessOutputNum || inputs_.size() != kDetectionPostProcessInputNum) { | |||
| MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs_.size() << ",input size: " << inputs_.size(); | |||
| return RET_PARAM_INVALID; | |||
| @@ -50,7 +50,7 @@ class DetectionPostProcess : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetFormat() const; | |||
| int GetInputSize() const; | |||
| float GetHScale() const; | |||
| @@ -43,7 +43,7 @@ float EmbeddingLookup::GetMaxNorm() const { return this->primitive_->value_as_Em | |||
| #endif | |||
| int EmbeddingLookup::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::Tensor *> outputs_) { | |||
| int EmbeddingLookup::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| if (inputs_.size() < kDoubleNum) { | |||
| MS_LOG(ERROR) << "Embedding Lookup should have at least two inputs"; | |||
| @@ -38,7 +38,7 @@ class EmbeddingLookup : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| float GetMaxNorm() const; | |||
| }; | |||
| } // namespace lite | |||
| @@ -42,7 +42,7 @@ int ExpandDims::GetDim() const { return this->primitive_->value_as_ExpandDims()- | |||
| #endif | |||
| int ExpandDims::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::Tensor *> outputs_) { | |||
| int ExpandDims::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| auto input = inputs_.front(); | |||
| MS_ASSERT(input != nullptr); | |||
| @@ -38,7 +38,7 @@ class ExpandDims : public PrimitiveC { | |||
| int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; | |||
| #endif | |||
| int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; | |||
| int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; | |||
| int GetDim() const; | |||
| }; | |||
| } // namespace lite | |||
| @@ -50,7 +50,7 @@ std::vector<int> Fill::GetDims() const { | |||
| #endif | |||
| int Fill::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::Tensor *> outputs_) { | |||
| int Fill::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | |||
| MS_ASSERT(this->primitive_ != nullptr); | |||
| auto input = inputs_.front(); | |||
| auto output = outputs_.front(); | |||