diff --git a/mindspore/lite/micro/example/mnist_stm32f746/Core/Inc/main.h b/mindspore/lite/micro/example/mnist_stm32f746/Core/Inc/main.h
new file mode 100755
index 0000000000..ea74fe9961
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/Core/Inc/main.h
@@ -0,0 +1,71 @@
+/* USER CODE BEGIN Header */
+/**
+ ******************************************************************************
+ * @file : main.h
+ * @brief : Header for main.c file.
+ * This file contains the common defines of the application.
+ ******************************************************************************
+ * @attention
+ *
+ *
© Copyright (c) 2021 STMicroelectronics.
+ * All rights reserved.
+ *
+ * This software component is licensed by ST under BSD 3-Clause license,
+ * the "License"; You may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at:
+ * opensource.org/licenses/BSD-3-Clause
+ *
+ ******************************************************************************
+ */
+/* USER CODE END Header */
+
+/* Define to prevent recursive inclusion -------------------------------------*/
+#ifndef __MAIN_H
+#define __MAIN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Includes ------------------------------------------------------------------*/
+#include "stm32f7xx_hal.h"
+
+/* Private includes ----------------------------------------------------------*/
+/* USER CODE BEGIN Includes */
+
+/* USER CODE END Includes */
+
+/* Exported types ------------------------------------------------------------*/
+/* USER CODE BEGIN ET */
+
+/* USER CODE END ET */
+
+/* Exported constants --------------------------------------------------------*/
+/* USER CODE BEGIN EC */
+
+/* USER CODE END EC */
+
+/* Exported macro ------------------------------------------------------------*/
+/* USER CODE BEGIN EM */
+
+/* USER CODE END EM */
+
+/* Exported functions prototypes ---------------------------------------------*/
+void Error_Handler(void);
+
+/* USER CODE BEGIN EFP */
+
+/* USER CODE END EFP */
+
+/* Private defines -----------------------------------------------------------*/
+/* USER CODE BEGIN Private defines */
+
+/* USER CODE END Private defines */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MAIN_H */
+
+/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/Core/Src/main.c b/mindspore/lite/micro/example/mnist_stm32f746/Core/Src/main.c
new file mode 100755
index 0000000000..dc619ddae5
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/Core/Src/main.c
@@ -0,0 +1,214 @@
+/* USER CODE BEGIN Header */
+/**
+ ******************************************************************************
+ * @file : main.c
+ * @brief : Main program body
+ ******************************************************************************
+ * @attention
+ *
+ * © Copyright (c) 2021 STMicroelectronics.
+ * All rights reserved.
+ *
+ * This software component is licensed by ST under BSD 3-Clause license,
+ * the "License"; You may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at:
+ * opensource.org/licenses/BSD-3-Clause
+ *
+ ******************************************************************************
+ */
+/* USER CODE END Header */
+/* Includes ------------------------------------------------------------------*/
+#include "main.h"
+#include "SEGGER_RTT.h"
+#include "include/errorcode.h"
+#include "include/lite_session.h"
+#include "include/ms_tensor.h"
+#include "mnist_input_data.h"
+// #include
+
+using namespace mindspore;
+/* Private includes ----------------------------------------------------------*/
+/* USER CODE BEGIN Includes */
+
+/* USER CODE END Includes */
+
+/* Private typedef -----------------------------------------------------------*/
+/* USER CODE BEGIN PTD */
+
+/* USER CODE END PTD */
+
+/* Private define ------------------------------------------------------------*/
+/* USER CODE BEGIN PD */
+/* USER CODE END PD */
+
+/* Private macro -------------------------------------------------------------*/
+/* USER CODE BEGIN PM */
+
+/* USER CODE END PM */
+
+/* Private variables ---------------------------------------------------------*/
+
+/* USER CODE BEGIN PV */
+
+/* USER CODE END PV */
+
+/* Private function prototypes -----------------------------------------------*/
+void SystemClock_Config(void);
+/* USER CODE BEGIN PFP */
+
+/* USER CODE END PFP */
+
+/* Private user code ---------------------------------------------------------*/
+/* USER CODE BEGIN 0 */
+
+/* USER CODE END 0 */
+
+/**
+ * @brief The application entry point.
+ * @retval int
+ */
+int main(void) {
+ /* USER CODE BEGIN 1 */
+
+ /* USER CODE END 1 */
+
+ /* MCU Configuration--------------------------------------------------------*/
+
+ /* Reset of all peripherals, Initializes the Flash interface and the Systick. */
+ HAL_Init();
+
+ /* USER CODE BEGIN Init */
+
+ /* USER CODE END Init */
+
+ /* Configure the system clock */
+ SystemClock_Config();
+
+ /* USER CODE BEGIN SysInit */
+
+ /* USER CODE END SysInit */
+
+ /* Initialize all configured peripherals */
+ /* USER CODE BEGIN 2 */
+
+ /* USER CODE END 2 */
+
+ /* Infinite loop */
+ /* USER CODE BEGIN WHILE */
+ // float inputs_binbuf[784] = {0};
+ while (1) {
+ /* USER CODE END WHILE */
+ SEGGER_RTT_printf(0, "***********mnist test start***********\n");
+ const char *model_buffer = nullptr;
+ int model_size = 0;
+ session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, nullptr);
+ Vector inputs = session->GetInputs();
+ size_t inputs_num = inputs.size();
+ void *inputs_binbuf[inputs_num];
+ int inputs_size[inputs_num];
+ for (size_t i = 0; i < inputs_num; ++i) {
+ inputs_size[i] = inputs[i]->Size();
+ }
+ // here mnist only have one input data,just hard code to it's array;
+ inputs_binbuf[0] = mnist_inputs_data;
+ for (size_t i = 0; i < inputs_num; ++i) {
+ void *input_data = inputs[i]->MutableData();
+ memcpy(input_data, inputs_binbuf[i], inputs_size[i]);
+ }
+ int ret = session->RunGraph();
+ if (ret != lite::RET_OK) {
+ return lite::RET_ERROR;
+ }
+ Vector outputs_name = session->GetOutputTensorNames();
+ for (int i = 0; i < outputs_name.size(); ++i) {
+ tensor::MSTensor *output_tensor = session->GetOutputByTensorName(outputs_name[i]);
+ if (output_tensor == nullptr) {
+ return -1;
+ }
+ SEGGER_RTT_printf(0, "***********mnist test start5.2***********\n");
+ float *casted_data = static_cast(output_tensor->MutableData());
+ if (casted_data == nullptr) {
+ return -1;
+ }
+ SEGGER_RTT_printf(0, "***********mnist test start5.3***********\n");
+ for (size_t j = 0; j < 10 && j < output_tensor->ElementsNum(); j++) {
+ SEGGER_RTT_printf(0, "output: [%d] is : [%d]/100\n", i, casted_data[i] * 100);
+ }
+ }
+ delete session;
+ SEGGER_RTT_printf(0, "***********mnist test end***********\n");
+ /* USER CODE BEGIN 3 */
+ }
+ /* USER CODE END 3 */
+}
+
+/**
+ * @brief System Clock Configuration
+ * @retval None
+ */
+void SystemClock_Config(void) {
+ RCC_OscInitTypeDef RCC_OscInitStruct = {0};
+ RCC_ClkInitTypeDef RCC_ClkInitStruct = {0};
+
+ /** Configure the main internal regulator output voltage
+ */
+ __HAL_RCC_PWR_CLK_ENABLE();
+ __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE3);
+ /** Initializes the RCC Oscillators according to the specified parameters
+ * in the RCC_OscInitTypeDef structure.
+ */
+ RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSI;
+ RCC_OscInitStruct.HSIState = RCC_HSI_ON;
+ RCC_OscInitStruct.HSICalibrationValue = RCC_HSICALIBRATION_DEFAULT;
+ RCC_OscInitStruct.PLL.PLLState = RCC_PLL_NONE;
+ if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) {
+ Error_Handler();
+ }
+ /** Initializes the CPU, AHB and APB buses clocks
+ */
+ RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_SYSCLK
+ | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2;
+ RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_HSI;
+ RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1;
+ RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV1;
+ RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1;
+
+ if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_0) != HAL_OK) {
+ Error_Handler();
+ }
+}
+
+/* USER CODE BEGIN 4 */
+
+/* USER CODE END 4 */
+
+/**
+ * @brief This function is executed in case of error occurrence.
+ * @retval None
+ */
+void Error_Handler(void) {
+ /* USER CODE BEGIN Error_Handler_Debug */
+ /* User can add his own implementation to report the HAL error return state */
+ __disable_irq();
+ while (1) {
+ }
+ /* USER CODE END Error_Handler_Debug */
+}
+
+#ifdef USE_FULL_ASSERT
+/**
+ * @brief Reports the name of the source file and the source line number
+ * where the assert_param error has occurred.
+ * @param file: pointer to the source file name
+ * @param line: assert_param error line source number
+ * @retval None
+ */
+void assert_failed(uint8_t *file, uint32_t line) {
+ /* USER CODE BEGIN 6 */
+ /* User can add his own implementation to report the file name and line number,
+ ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */
+ /* USER CODE END 6 */
+}
+#endif /* USE_FULL_ASSERT */
+
+/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/README.md b/mindspore/lite/micro/example/mnist_stm32f746/README.md
new file mode 100644
index 0000000000..654af301df
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/README.md
@@ -0,0 +1,174 @@
+# Arm Cortex-M编译部署
+
+ `Linux` `Cortex-M` `IOT` `C/C++` `全流程` `模型编译` `模型代码生成` `模型部署` `推理应用` `初级` `中级` `高级`
+
+
+
+- Arm Cortex-M编译部署
+ - [STM32F746编译依赖](#STM32F746编译依赖)
+ - [STM32F746构建](#STM32F746构建)
+ - [STM32F746工程部署](#STM32F746工程部署)
+ - [更多详情](#更多详情)
+ - [Linux x86_64平台编译部署](#Linux x86_64平台编译部署)
+ - [Android平台编译部署](#STM32746平台编译部署)
+
+
+
+## Arm Cortex-M编译部署
+
+本教程以在STM32F746单板上编译部署生成模型代码为例,演示了codegen编译模型在Cortex-M平台的使用。更多关于Arm Cortex-M的详情可参见其[官网](https://developer.arm.com/ip-products/processors/cortex-m)。
+
+### STM32F746编译依赖
+
+模型推理代码的编译部署需要在windows上安装[Jlink]((https://www.segger.com/))、[STM32CubeMX](https://www.st.com/content/st_com/en.html)、[gcc-arm-none-ebai](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm)等工具来进行交叉编译。
+
+- [STM32CubeMX-Win](https://www.st.com/content/ccc/resource/technical/software/sw_development_suite/group0/0b/05/f0/25/c7/2b/42/9d/stm32cubemx_v6-1-1/files/stm32cubemx_v6-1-1.zip/jcr:content/translations/en.stm32cubemx_v6-1-1.zip) >= 6.0.1
+
+- [gcc-arm-none-eabi](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm/downloads) >= 9-2019-q4-major-win32
+
+- [JLink-windows](https://www.segger.com/downloads/jlink/) >= 6.56
+- [GCC](https://gcc.gnu.org/releases.html) >= 7.3.0
+- [CMake](https://cmake.org/download/) >= 3.18.3
+
+### STM32F746构建与运行
+
+首先使用codegen编译LeNet模型,生成对应的STM32F46推理代码。具体命令如下:
+
+```bash
+./codegen --codePath=. --modelPath=LeNet.ms --moduleName=LeNet --target=ARM32M
+```
+
+#### 代码工程说明
+
+```bash
+├── LeNet
+└── operator_library
+```
+
+##### 算子静态库目录说明
+
+在编译此工程之前需要预先获取Cortex-M 平台对应的[算子库]()。
+
+预置算子静态库的目录如下:
+
+```bash
+├── operator_library # 对应平台算子库目录
+ ├── include # 对应平台算子库头文件目录
+ └── lib # 对应平台算子库静态库目录
+```
+
+生成代码工程目录如下:
+
+```bash
+├── LeNet # 生成代码的根目录
+ ├── benchmark # 生成代码的benchmark目录
+ ├── include # 模型推理代码对外暴露头文件目录
+ └── src # 模型推理代码目录
+```
+
+#### 代码工程编译
+
+##### 环境测试
+
+安装好交叉编译所需环境后,需要在windows环境中依次将其加入到环境变量中
+
+```bash
+gcc -v # 查看GCC版本
+arm-none-eabi-gdb -v # 查看交叉编译环境
+jlink -v # 查看jlink版本
+make -v # 查看make版本
+```
+
+以上的命令均有成功返回值时,表明环境准备ok,可以继续进入下一步,否则先安装上述环境!!!
+
+##### 生成STM32F746单板初始化代码([详情示例代码]())
+
+1. 启动 STM32CubeMX,新建project,选择单板STM32F746IG
+
+2. 成功以后,选择`Makefile` ,`generator code`
+
+3. 在生成的工程目录下打开`cmd`,执行`make`,测试初始代码是否成功编译。
+
+ ```bash
+ # make成功结果
+ arm-none-eabi-size build/test_stm32f746.elf
+ text data bss dec hex filename
+ 3660 20 1572 5252 1484 build/test_stm32f746.elf
+ arm-none-eabi-objcopy -O ihex build/test_stm32f746.elf build/test_stm32f746.hex
+ arm-none-eabi-objcopy -O binary -S build/test_stm32f746.elf build/test_stm32f746.bin
+ ```
+
+##### 编译生成模型静态库
+
+1. 拷贝mindspore团队提供的cortex-m7的算子静态库以及对应头文件到STM32CubeMX生成的工程目录中。
+
+2. 拷贝codegen生成模型推理代码到 STM32CubeMX生成的代码工程目录中
+
+ ```bash
+ ├── .mxproject
+ └── build # 工程编译目录最终的elf文件存在于此
+ └── Core
+ └── Drivers
+ └── LeNet # codegen生成的cortex-m7 模型推理代码
+ └── Makefile # 组织工程makefile文件需要用户自己修改组织lenet && operator_library到工程目录中
+ └── operator_library # mindspore团队提供的对应平台算子库
+ └── startup_stm32f746xx.s
+ └── STM32F746IGKx_FLASH.ld
+ └── test_stm32f746.ioc
+ ```
+
+3. 修改makefile文件,组织算子静态库以及模型推理代码
+
+ ```bash
+ # C includes
+ C_INCLUDES = \
+ -ICore/Inc \
+ -IDrivers/STM32F7xx_HAL_Driver/Inc \
+ -IDrivers/STM32F7xx_HAL_Driver/Inc/Legacy \
+ -IDrivers/CMSIS/Device/ST/STM32F7xx/Include \
+ -Ioperator_library/include \ # 新增,指定算子库头文件目录
+ -ILeNet/include \ # 新增,指定模型推理代码头文件
+ -ILeNet/src # 新增,指定模型推理代码头文件
+ # libraries
+ LIBS = -lc -lm -lnosys -lops # 修改,导入mindspore团队提供算子库
+ LIBDIR = -Ioperator_library/lib/arm32m # 新增,指定算子库所在路径
+ ```
+
+4. 在工程目录的Core/Src的main.c编写模型调用代码,具体代码新增如下:
+
+ ```cpp
+ ```
+
+5. 在工程跟目中目录使用管理员权限打开`cmd` 执行 `make`进行编译
+
+ ```bash
+ make
+ ```
+
+### STM32F746工程部署
+
+使用jlink 将可执行文件拷贝到单板上并做推理
+
+```bash
+jlinkgdbserver # 启动jlinkgdbserver 选定target device为STM32F746IG
+jlinkRTTViewer # 启动jlinkRTTViewer 选定target devices为STM32F746IG
+arm-none-eabi-gdb # 启动arm-gcc gdb服务
+file build/target.elf # 打开调测文件
+target remote 127.0.0.1 # 连接jlink服务器
+monitor reset # 重置单板
+monitor halt # 挂起单板
+load # 加载可执行文件到单板
+c # 执行模型推理
+```
+
+#### 执行结果
+
+```bash
+```
+
+## 更多详情
+
+### [Linux x86_64平台编译部署]()
+
+### [Android平台编译部署]()
+
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/CMakeLists.txt b/mindspore/lite/micro/example/mnist_stm32f746/mnist/CMakeLists.txt
new file mode 100755
index 0000000000..4f38570e27
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/CMakeLists.txt
@@ -0,0 +1,59 @@
+
+
+cmake_minimum_required(VERSION 3.14)
+project(benchmark)
+
+if(NOT DEFINED PKG_PATH)
+ message(FATAL_ERROR "PKG_PATH not set")
+endif()
+
+get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
+
+set(HEADER_PATH ${PKG_PATH}/inference)
+
+option(MICRO_BUILD_ARM64 "build android arm64" OFF)
+option(MICRO_BUILD_ARM32A "build android arm32" OFF)
+
+add_compile_definitions(NOT_USE_STL)
+
+if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A)
+ add_compile_definitions(ENABLE_NEON)
+ add_compile_definitions(ENABLE_ARM)
+endif()
+
+if(MICRO_BUILD_ARM64)
+ add_compile_definitions(ENABLE_ARM64)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod")
+endif()
+
+if(MICRO_BUILD_ARM32A)
+ add_compile_definitions(ENABLE_ARM32)
+ add_definitions(-mfloat-abi=softfp -mfpu=neon)
+endif()
+
+set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
+if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
+ message(STATUS "build benchmark with debug info")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
+else()
+ set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
+ -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
+ set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
+ -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
+endif()
+
+add_subdirectory(src)
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/)
+include_directories(${HEADER_PATH})
+set(SRC_FILES
+ benchmark/benchmark.cc
+ benchmark/load_input.c
+)
+add_executable(benchmark ${SRC_FILES})
+target_link_libraries(benchmark net -lm -pthread)
+
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/CMakeLists.txt b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/CMakeLists.txt
new file mode 100755
index 0000000000..be96a1343f
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/CMakeLists.txt
@@ -0,0 +1,65 @@
+
+cmake_minimum_required(VERSION 3.14)
+project(benchmark)
+
+if(NOT DEFINED MODEL_LIB)
+ message(FATAL_ERROR "MODEL_LIB not set")
+endif()
+
+if(NOT DEFINED HEADER_PATH)
+ message(FATAL_ERROR "HEADER_PATH not set")
+endif()
+
+get_filename_component(MODEL_LIB ${MODEL_LIB} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
+get_filename_component(HEADER_PATH ${HEADER_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
+
+function(parse_lib_info lib_full_path lib_name lib_path)
+ string(FIND "${lib_full_path}" "/" POS REVERSE)
+ math(EXPR POS "${POS} + 1")
+ string(SUBSTRING ${lib_full_path} 0 ${POS} path)
+ set(${lib_path} ${path} PARENT_SCOPE)
+ string(SUBSTRING ${lib_full_path} "${POS}" "-1" name)
+ set(${lib_name} ${name} PARENT_SCOPE)
+endfunction(parse_lib_info)
+
+parse_lib_info(${MODEL_LIB} MODEL_LIB_NAME MODEL_LIB_PATH)
+
+message("project name: ${MODEL_LIB_NAME}")
+
+option(MICRO_BUILD_ARM64 "build android arm64" OFF)
+option(MICRO_BUILD_ARM32A "build android arm32" OFF)
+
+if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A)
+ add_compile_definitions(ENABLE_NEON)
+ add_compile_definitions(ENABLE_ARM)
+endif()
+
+if(MICRO_BUILD_ARM64)
+ add_compile_definitions(ENABLE_ARM64)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod")
+endif()
+
+if(MICRO_BUILD_ARM32A)
+ add_compile_definitions(ENABLE_ARM32)
+ add_definitions(-mfloat-abi=softfp -mfpu=neon)
+endif()
+
+set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
+if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
+ message(STATUS "build benchmark with debug info")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
+else()
+ set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
+ -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
+ set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
+ -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
+endif()
+link_directories(${MODEL_LIB_PATH})
+include(benchmark.cmake)
+add_executable(benchmark ${SRC_FILES})
+target_link_libraries(benchmark ${MODEL_LIB_NAME} -lm -pthread)
+
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cc b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cc
new file mode 100755
index 0000000000..df5ac0d7c9
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cc
@@ -0,0 +1,136 @@
+
+
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include
+#include
+#include
+
+#include "include/lite_session.h"
+#include "include/ms_tensor.h"
+#include "include/errorcode.h"
+
+#include "load_input.h"
+
+using namespace mindspore;
+
+void usage() {
+ printf(
+ "-- mindspore benchmark params usage:\n"
+ "args[0]: executable file\n"
+ "args[1]: inputs binary file\n"
+ "args[2]: model weight binary file\n"
+ "args[3]: loop count for performance test\n"
+ "args[4]: runtime thread num\n"
+ "args[5]: runtime thread bind mode\n\n");
+}
+
+template
+void PrintData(void *data, size_t data_number) {
+ if (data == nullptr) {
+ return;
+ }
+ auto casted_data = static_cast(data);
+ for (size_t i = 0; i < 10 && i < data_number; i++) {
+ std::cout << std::to_string(casted_data[i]) << ", ";
+ }
+ std::cout << std::endl;
+}
+
+void TensorToString(tensor::MSTensor *tensor) {
+ std::cout << ", DataType: " << tensor->data_type();
+ std::cout << ", Size: " << tensor->Size();
+ std::cout << ", Shape:";
+ for (auto &dim : tensor->shape()) {
+ std::cout << " " << dim;
+ }
+ std::cout << ", Data:" << std::endl;
+ switch (tensor->data_type()) {
+ case kNumberTypeFloat32: {
+ PrintData(tensor->MutableData(), tensor->ElementsNum());
+ } break;
+ case kNumberTypeFloat16: {
+ PrintData(tensor->MutableData(), tensor->ElementsNum());
+ } break;
+ case kNumberTypeInt32: {
+ PrintData(tensor->MutableData(), tensor->ElementsNum());
+ } break;
+ case kNumberTypeInt16: {
+ PrintData(tensor->MutableData(), tensor->ElementsNum());
+ } break;
+ case kNumberTypeInt8: {
+ PrintData(tensor->MutableData(), tensor->ElementsNum());
+ } break;
+ case kNumberTypeUInt8: {
+ PrintData(tensor->MutableData(), tensor->ElementsNum());
+ } break;
+ default:
+ std::cout << "Unsupported data type to print" << std::endl;
+ break;
+ }
+}
+
+int main(int argc, const char **argv) {
+ if (argc < 2) {
+ std::cout << "input command is invalid\n" << std::endl;
+ usage();
+ return lite::RET_ERROR;
+ }
+ std::cout << "start run benchmark" << std::endl;
+
+ const char *model_buffer = nullptr;
+ int model_size = 0;
+ // read .bin file by ReadBinaryFile;
+ if (argc >= 3) {
+ model_buffer = static_cast(ReadInputData(argv[2], &model_size));
+ }
+ session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, nullptr);
+ if (session == nullptr) {
+ std::cerr << "create lite session failed" << std::endl;
+ return lite::RET_ERROR;
+ }
+
+ // set model inputs tensor data
+ Vector inputs = session->GetInputs();
+ size_t inputs_num = inputs.size();
+ void *inputs_binbuf[inputs_num];
+ int inputs_size[inputs_num];
+ for (size_t i = 0; i < inputs_num; ++i) {
+ inputs_size[i] = inputs[i]->Size();
+ }
+ int ret = ReadInputsFile(const_cast(argv[1]), inputs_binbuf, inputs_size, inputs_num);
+ if (ret != lite::RET_OK) {
+ return lite::RET_ERROR;
+ }
+ for (size_t i = 0; i < inputs_num; ++i) {
+ void *input_data = inputs[i]->MutableData();
+ memcpy(input_data, inputs_binbuf[i], inputs_size[i]);
+ }
+
+ ret = session->RunGraph();
+ if (ret != lite::RET_OK) {
+ return lite::RET_ERROR;
+ }
+
+ std::cout << "run benchmark success" << std::endl;
+ delete session;
+ for (size_t i = 0; i < inputs_num; ++i) {
+ free(inputs_binbuf[i]);
+ }
+ return lite::RET_OK;
+}
+
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cmake b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cmake
new file mode 100755
index 0000000000..92461e9a56
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cmake
@@ -0,0 +1,7 @@
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/)
+include_directories(${HEADER_PATH})
+set(SRC_FILES
+ benchmark.cc
+ load_input.c
+)
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.c
new file mode 100755
index 0000000000..f0baa78f7d
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.c
@@ -0,0 +1,95 @@
+
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "load_input.h"
+#include
+#include
+#include
+
+void *ReadInputData(const char *real_input_path, int *size) {
+ if (real_input_path == NULL) {
+ return NULL;
+ }
+ if (strstr(real_input_path, ".bin") || strstr(real_input_path, ".net")) {
+ FILE *file;
+ file = fopen(real_input_path, "rb+");
+ if (!file) {
+ printf("Can't find %s\n", real_input_path);
+ return NULL;
+ }
+ int curr_file_posi = ftell(file);
+ fseek(file, 0, SEEK_END);
+ *size = ftell(file);
+ unsigned char *buf = malloc((*size));
+ (void)memset(buf, 0, (*size));
+ fseek(file, curr_file_posi, SEEK_SET);
+ int read_size = (int)(fread(buf, 1, *size, file));
+ if (read_size != (*size)) {
+ printf("read file failed, total file size: %d, read_size: %d\n", (*size), read_size);
+ fclose(file);
+ free(buf);
+ return NULL;
+ }
+ fclose(file);
+ return (void *)buf;
+ } else {
+ printf("input data file should be .bin , .net");
+ return NULL;
+ }
+}
+
+void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size) {
+ FILE *output_file;
+ output_file = fopen(final_name, "w");
+ if (output_file == NULL) {
+ printf("fopen output file: %s failed\n", final_name);
+ return;
+ }
+ unsigned char str[out_size];
+ for (unsigned int i = 0; i < out_size; ++i) {
+ str[i] = output_data[i];
+ fprintf(output_file, "%d\t", str[i]);
+ }
+ fclose(output_file);
+}
+
+int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num) {
+ char *inputs_path[inputs_num];
+ char *delim = ",";
+ char *token;
+ int i = 0;
+ while ((token = strtok_r(path, delim, &path))) {
+ if (i >= inputs_num) {
+ printf("inputs num is error, need: %d\n", inputs_num);
+ return -1;
+ }
+ inputs_path[i] = token;
+ printf("input %d: %s\n", i, inputs_path[i]);
+ i++;
+ }
+
+ for (i = 0; i < inputs_num; ++i) {
+ int size = 0;
+ buffers[i] = ReadInputData(inputs_path[i], &size);
+ if (size != inputs_size[i] || buffers[i] == NULL) {
+ printf("size mismatch, %s, input: %d, needed: %d\n", inputs_path[i], size, inputs_size[i]);
+ return -1;
+ }
+ }
+ return 0;
+}
+
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.h
new file mode 100755
index 0000000000..909a4ac16b
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.h
@@ -0,0 +1,36 @@
+
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
+#define MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void *ReadInputData(const char *real_input_path, int *size);
+
+void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size);
+
+int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
+
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/cell.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/cell.h
new file mode 100755
index 0000000000..3039fa816b
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/cell.h
@@ -0,0 +1,133 @@
+/**
+ * Copyright 2020 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MINDSPORE_INCLUDE_API_CELL_H
+#define MINDSPORE_INCLUDE_API_CELL_H
+#include
+#include
+#include
+#include
+#include "include/api/status.h"
+#include "include/api/types.h"
+#include "include/api/graph.h"
+
+namespace mindspore {
+class InputAndOutput;
+using Input = InputAndOutput;
+using Output = InputAndOutput;
+
+class MS_API CellBase {
+ public:
+ CellBase() = default;
+ virtual ~CellBase() = default;
+ virtual std::vector Construct(const std::vector &inputs) { return {}; }
+ virtual std::shared_ptr Clone() const = 0;
+ virtual Status Run(const std::vector &inputs, std::vector *outputs) { return kSuccess; }
+ std::vector operator()(const std::vector &inputs) const;
+};
+
+template
+class MS_API Cell : public CellBase {
+ public:
+ virtual ~Cell() = default;
+ std::shared_ptr Clone() const override { return std::make_shared(static_cast(*this)); }
+};
+
+class MS_API ParameterCell final : public Cell {
+ public:
+ ParameterCell() = default;
+ ~ParameterCell() override = default;
+
+ ParameterCell(const ParameterCell &);
+ ParameterCell &operator=(const ParameterCell &);
+
+ ParameterCell(ParameterCell &&);
+ ParameterCell &operator=(ParameterCell &&);
+
+ explicit ParameterCell(const MSTensor &);
+ ParameterCell &operator=(const MSTensor &);
+
+ explicit ParameterCell(MSTensor &&);
+ ParameterCell &operator=(MSTensor &&);
+
+ MSTensor GetTensor() const { return tensor_; }
+
+ private:
+ MSTensor tensor_;
+};
+
+class MS_API OpCellBase : public CellBase {
+ public:
+ explicit OpCellBase(const std::string &name) : name_(name) {}
+ ~OpCellBase() override = default;
+ const std::string &GetOpType() const { return name_; }
+
+ protected:
+ std::string name_;
+};
+
+template
+class MS_API OpCell : public OpCellBase, public std::enable_shared_from_this {
+ public:
+ explicit OpCell(const std::string &name) : OpCellBase(name) {}
+ ~OpCell() override = default;
+ std::shared_ptr Clone() const override { return std::make_shared(static_cast(*this)); }
+};
+
+class MS_API GraphCell final : public Cell {
+ public:
+ class GraphImpl;
+
+ GraphCell() = default;
+ ~GraphCell() override = default;
+
+ explicit GraphCell(const Graph &);
+ explicit GraphCell(Graph &&);
+ explicit GraphCell(const std::shared_ptr &);
+
+ const std::shared_ptr &GetGraph() const { return graph_; }
+ Status Run(const std::vector &inputs, std::vector *outputs) override;
+ std::vector GetInputs();
+ std::vector GetOutputs();
+
+ private:
+ friend class ModelImpl;
+ Status Load();
+
+ std::shared_ptr graph_;
+ std::shared_ptr executor_;
+};
+
+class MS_API InputAndOutput {
+ public:
+ InputAndOutput();
+ ~InputAndOutput() = default;
+
+ // no explicit
+ InputAndOutput(const MSTensor &); // NOLINT(runtime/explicit)
+ InputAndOutput(MSTensor &&); // NOLINT(runtime/explicit)
+
+ InputAndOutput(const std::shared_ptr &, const std::vector &, int32_t index);
+
+ int32_t GetIndex() const { return index_; }
+ void SetIndex(int32_t index) { index_ = index; }
+
+ private:
+ std::shared_ptr cell_;
+ std::vector prev_;
+ int32_t index_;
+};
+} // namespace mindspore
+#endif // MINDSPORE_INCLUDE_API_CELL_H
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/context.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/context.h
new file mode 100755
index 0000000000..3f52d7ae9d
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/context.h
@@ -0,0 +1,185 @@
+/**
+ * Copyright 2020 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MINDSPORE_INCLUDE_API_CONTEXT_H
+#define MINDSPORE_INCLUDE_API_CONTEXT_H
+
+#include
+#include
+#include
+#include
+#include "include/api/types.h"
+#include "include/api/dual_abi_helper.h"
+
+namespace mindspore {
+constexpr auto kDeviceTypeAscend310 = "Ascend310";
+constexpr auto kDeviceTypeAscend910 = "Ascend910";
+constexpr auto kDeviceTypeGPU = "GPU";
+
+struct MS_API Context {
+ public:
+ Context();
+ virtual ~Context() = default;
+ struct Data;
+ std::shared_ptr data;
+};
+
+struct MS_API GlobalContext : public Context {
+ public:
+ static std::shared_ptr GetGlobalContext();
+
+ static inline void SetGlobalDeviceTarget(const std::string &device_target);
+ static inline std::string GetGlobalDeviceTarget();
+
+ static void SetGlobalDeviceID(const uint32_t &device_id);
+ static uint32_t GetGlobalDeviceID();
+
+ static inline void SetGlobalDumpConfigPath(const std::string &cfg_path);
+ static inline std::string GetGlobalDumpConfigPath();
+
+ private:
+ // api without std::string
+ static void SetGlobalDeviceTarget(const std::vector &device_target);
+ static std::vector GetGlobalDeviceTargetChar();
+
+ static void SetGlobalDumpConfigPath(const std::vector &cfg_path);
+ static std::vector GetGlobalDumpConfigPathChar();
+};
+
+struct MS_API ModelContext : public Context {
+ public:
+ static inline void SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path);
+ static inline std::string GetInsertOpConfigPath(const std::shared_ptr &context);
+
+ static inline void SetInputFormat(const std::shared_ptr &context, const std::string &format);
+ static inline std::string GetInputFormat(const std::shared_ptr &context);
+
+ static inline void SetInputShape(const std::shared_ptr &context, const std::string &shape);
+ static inline std::string GetInputShape(const std::shared_ptr &context);
+
+ static void SetInputShapeMap(const std::shared_ptr &context, const std::map> &shape);
+ static std::map> GetInputShapeMap(const std::shared_ptr &context);
+
+ static void SetDynamicBatchSize(const std::shared_ptr &context,
+ const std::vector &dynamic_batch_size);
+ static inline std::string GetDynamicBatchSize(const std::shared_ptr &context);
+
+ static void SetOutputType(const std::shared_ptr &context, enum DataType output_type);
+ static enum DataType GetOutputType(const std::shared_ptr &context);
+
+ static inline void SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode);
+ static inline std::string GetPrecisionMode(const std::shared_ptr &context);
+
+ static inline void SetOpSelectImplMode(const std::shared_ptr &context,
+ const std::string &op_select_impl_mode);
+ static inline std::string GetOpSelectImplMode(const std::shared_ptr &context);
+
+ static inline void SetFusionSwitchConfigPath(const std::shared_ptr &context, const std::string &cfg_path);
+ static inline std::string GetFusionSwitchConfigPath(const std::shared_ptr &context);
+
+ static inline void SetGpuTrtInferMode(const std::shared_ptr &context, const std::string &gpu_trt_infer_mode);
+ static inline std::string GetGpuTrtInferMode(const std::shared_ptr &context);
+
+ private:
+ // api without std::string
+ static void SetInsertOpConfigPath(const std::shared_ptr &context, const std::vector &cfg_path);
+ static std::vector GetInsertOpConfigPathChar(const std::shared_ptr &context);
+
+ static void SetInputFormat(const std::shared_ptr &context, const std::vector &format);
+ static std::vector GetInputFormatChar(const std::shared_ptr &context);
+
+ static void SetInputShape(const std::shared_ptr &context, const std::vector &shape);
+ static std::vector GetInputShapeChar(const std::shared_ptr &context);
+
+ static void SetPrecisionMode(const std::shared_ptr &context, const std::vector &precision_mode);
+ static std::vector GetPrecisionModeChar(const std::shared_ptr &context);
+
+ static void SetOpSelectImplMode(const std::shared_ptr &context,
+ const std::vector &op_select_impl_mode);
+ static std::vector GetOpSelectImplModeChar(const std::shared_ptr &context);
+
+ static void SetFusionSwitchConfigPath(const std::shared_ptr &context, const std::vector &cfg_path);
+ static std::vector GetFusionSwitchConfigPathChar(const std::shared_ptr &context);
+
+ static void SetGpuTrtInferMode(const std::shared_ptr &context, const std::vector &gpu_trt_infer_mode);
+ static std::vector GetGpuTrtInferModeChar(const std::shared_ptr &context);
+ static std::vector GetDynamicBatchSizeChar(const std::shared_ptr &context);
+};
+
+void GlobalContext::SetGlobalDeviceTarget(const std::string &device_target) {
+ SetGlobalDeviceTarget(StringToChar(device_target));
+}
+std::string GlobalContext::GetGlobalDeviceTarget() { return CharToString(GetGlobalDeviceTargetChar()); }
+
+void GlobalContext::SetGlobalDumpConfigPath(const std::string &cfg_path) {
+ SetGlobalDumpConfigPath(StringToChar(cfg_path));
+}
+std::string GlobalContext::GetGlobalDumpConfigPath() { return CharToString(GetGlobalDumpConfigPathChar()); }
+
+void ModelContext::SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path) {
+ SetInsertOpConfigPath(context, StringToChar(cfg_path));
+}
+std::string ModelContext::GetInsertOpConfigPath(const std::shared_ptr &context) {
+ return CharToString(GetInsertOpConfigPathChar(context));
+}
+
+void ModelContext::SetInputFormat(const std::shared_ptr &context, const std::string &format) {
+ SetInputFormat(context, StringToChar(format));
+}
+std::string ModelContext::GetInputFormat(const std::shared_ptr &context) {
+ return CharToString(GetInputFormatChar(context));
+}
+
+void ModelContext::SetInputShape(const std::shared_ptr &context, const std::string &shape) {
+ SetInputShape(context, StringToChar(shape));
+}
+std::string ModelContext::GetInputShape(const std::shared_ptr &context) {
+ return CharToString(GetInputShapeChar(context));
+}
+
+void ModelContext::SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode) {
+ SetPrecisionMode(context, StringToChar(precision_mode));
+}
+std::string ModelContext::GetPrecisionMode(const std::shared_ptr &context) {
+ return CharToString(GetPrecisionModeChar(context));
+}
+
+void ModelContext::SetOpSelectImplMode(const std::shared_ptr &context,
+ const std::string &op_select_impl_mode) {
+ SetOpSelectImplMode(context, StringToChar(op_select_impl_mode));
+}
+std::string ModelContext::GetOpSelectImplMode(const std::shared_ptr &context) {
+ return CharToString(GetOpSelectImplModeChar(context));
+}
+
+void ModelContext::SetFusionSwitchConfigPath(const std::shared_ptr &context, const std::string &cfg_path) {
+ SetFusionSwitchConfigPath(context, StringToChar(cfg_path));
+}
+std::string ModelContext::GetFusionSwitchConfigPath(const std::shared_ptr &context) {
+ return CharToString(GetFusionSwitchConfigPathChar(context));
+}
+
+std::string ModelContext::GetDynamicBatchSize(const std::shared_ptr &context) {
+ return CharToString(GetDynamicBatchSizeChar(context));
+}
+
+void ModelContext::SetGpuTrtInferMode(const std::shared_ptr &context, const std::string &gpu_trt_infer_mode) {
+ SetGpuTrtInferMode(context, StringToChar(gpu_trt_infer_mode));
+}
+std::string ModelContext::GetGpuTrtInferMode(const std::shared_ptr &context) {
+ return CharToString(GetGpuTrtInferModeChar(context));
+}
+} // namespace mindspore
+#endif // MINDSPORE_INCLUDE_API_CONTEXT_H
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/data_type.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/data_type.h
new file mode 100755
index 0000000000..a39488a83d
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/data_type.h
@@ -0,0 +1,43 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MINDSPORE_INCLUDE_API_DATA_TYPE_H_
+#define MINDSPORE_INCLUDE_API_DATA_TYPE_H_
+
+namespace mindspore {
+enum class DataType : int {
+ kTypeUnknown = 0,
+ kObjectTypeString = 12,
+ kObjectTypeList = 13,
+ kObjectTypeTuple = 14,
+ kObjectTypeTensorType = 17,
+ kNumberTypeBool = 30,
+ kNumberTypeInt8 = 32,
+ kNumberTypeInt16 = 33,
+ kNumberTypeInt32 = 34,
+ kNumberTypeInt64 = 35,
+ kNumberTypeUInt8 = 37,
+ kNumberTypeUInt16 = 38,
+ kNumberTypeUInt32 = 39,
+ kNumberTypeUInt64 = 40,
+ kNumberTypeFloat16 = 42,
+ kNumberTypeFloat32 = 43,
+ kNumberTypeFloat64 = 44,
+ kNumberTypeEnd = 46,
+ // add new enum here
+ kInvalidType = INT32_MAX,
+};
+} // namespace mindspore
+#endif // MINDSPORE_INCLUDE_API_DATA_TYPE_H_
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/dual_abi_helper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/dual_abi_helper.h
new file mode 100755
index 0000000000..7d56d5ac72
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/dual_abi_helper.h
@@ -0,0 +1,164 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
+#define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace mindspore {
+inline std::vector StringToChar(const std::string &s) { return std::vector(s.begin(), s.end()); }
+
+inline std::string CharToString(const std::vector &c) { return std::string(c.begin(), c.end()); }
+
+inline std::optional> OptionalStringToChar(const std::optional &s) {
+ if (s == std::nullopt) return std::nullopt;
+ std::optional> ret = std::vector(s->begin(), s->end());
+ return ret;
+}
+
+inline std::optional OptionalCharToString(const std::optional> &c) {
+ if (c == std::nullopt) return std::nullopt;
+ std::optional ret = std::string(c->begin(), c->end());
+ return ret;
+}
+
+inline std::pair, int32_t> PairStringToChar(const std::pair &s) {
+ return std::pair, int32_t>(std::vector(s.first.begin(), s.first.end()), s.second);
+}
+
+inline std::pair PairCharToString(const std::pair, int32_t> &c) {
+ return std::pair(std::string(c.first.begin(), c.first.end()), c.second);
+}
+
+inline std::vector> VectorStringToChar(const std::vector &s) {
+ std::vector> ret;
+ std::transform(s.begin(), s.end(), std::back_inserter(ret),
+ [](auto str) { return std::vector(str.begin(), str.end()); });
+ return ret;
+}
+
+inline std::vector VectorCharToString(const std::vector> &c) {
+ std::vector ret;
+ std::transform(c.begin(), c.end(), std::back_inserter(ret),
+ [](auto ch) { return std::string(ch.begin(), ch.end()); });
+ return ret;
+}
+
+inline std::set> SetStringToChar(const std::set &s) {
+ std::set> ret;
+ std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()),
+ [](auto str) { return std::vector(str.begin(), str.end()); });
+ return ret;
+}
+
+inline std::set SetCharToString(const std::set> &c) {
+ std::set ret;
+ std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()),
+ [](auto ch) { return std::string(ch.begin(), ch.end()); });
+ return ret;
+}
+
+inline std::map, int32_t> MapStringToChar(const std::map &s) {
+ std::map, int32_t> ret;
+ std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
+ return std::pair, int32_t>(std::vector(str.first.begin(), str.first.end()), str.second);
+ });
+ return ret;
+}
+
+inline std::map MapCharToString(const std::map, int32_t> &c) {
+ std::map ret;
+ std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
+ return std::pair(std::string(ch.first.begin(), ch.first.end()), ch.second);
+ });
+ return ret;
+}
+
+inline std::map, std::vector> UnorderedMapStringToChar(
+ const std::unordered_map &s) {
+ std::map, std::vector> ret;
+ std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
+ return std::pair, std::vector>(std::vector(str.first.begin(), str.first.end()),
+ std::vector(str.second.begin(), str.second.end()));
+ });
+ return ret;
+}
+
+inline std::unordered_map UnorderedMapCharToString(
+ const std::map, std::vector> &c) {
+ std::unordered_map ret;
+ std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
+ return std::pair(std::string(ch.first.begin(), ch.first.end()),
+ std::string(ch.second.begin(), ch.second.end()));
+ });
+ return ret;
+}
+
+inline std::vector, std::vector>> ClassIndexStringToChar(
+ const std::vector>> &s) {
+ std::vector, std::vector>> ret;
+ std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) {
+ return std::pair, std::vector>(std::vector(str.first.begin(), str.first.end()),
+ str.second);
+ });
+ return ret;
+}
+
+inline std::vector>> ClassIndexCharToString(
+ const std::vector, std::vector>> &c) {
+ std::vector>> ret;
+ std::transform(c.begin(), c.end(), std::back_inserter(ret), [](auto ch) {
+ return std::pair>(std::string(ch.first.begin(), ch.first.end()), ch.second);
+ });
+ return ret;
+}
+
+template
+inline std::map, T> PadInfoStringToChar(const std::map &s_pad_info) {
+ std::map, T> ret;
+ std::transform(s_pad_info.begin(), s_pad_info.end(), std::inserter(ret, ret.begin()), [](auto str) {
+ return std::pair, T>(std::vector(str.first.begin(), str.first.end()), str.second);
+ });
+ return ret;
+}
+
+template
+inline std::map PadInfoCharToString(const std::map, T> &c_pad_info) {
+ std::map ret;
+ std::transform(c_pad_info.begin(), c_pad_info.end(), std::inserter(ret, ret.begin()), [](auto ch) {
+ return std::pair(std::string(ch.first.begin(), ch.first.end()), ch.second);
+ });
+ return ret;
+}
+
+template
+inline void TensorMapCharToString(const std::map, T> *c, std::unordered_map *s) {
+ for (auto ch : *c) {
+ auto key = std::string(ch.first.begin(), ch.first.end());
+ auto val = ch.second;
+ s->insert(std::pair(key, val));
+ }
+}
+} // namespace mindspore
+#endif // MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/graph.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/graph.h
new file mode 100755
index 0000000000..892f60495a
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/graph.h
@@ -0,0 +1,44 @@
+/**
+ * Copyright 2020 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MINDSPORE_INCLUDE_API_GRAPH_H
+#define MINDSPORE_INCLUDE_API_GRAPH_H
+
+#include
+#include
+#include
+#include
+#include "include/api/status.h"
+#include "include/api/types.h"
+
+namespace mindspore {
+class MS_API Graph {
+ public:
+ class GraphData;
+ explicit Graph(const std::shared_ptr &graph_data);
+ explicit Graph(std::shared_ptr &&graph_data);
+ explicit Graph(std::nullptr_t);
+ ~Graph();
+
+ enum ModelType ModelType() const;
+ bool operator==(std::nullptr_t) const;
+
+ private:
+ friend class GraphCell;
+ friend class ModelImpl;
+ std::shared_ptr graph_data_;
+};
+} // namespace mindspore
+#endif // MINDSPORE_INCLUDE_API_GRAPH_H
diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/lite_context.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/lite_context.h
new file mode 100755
index 0000000000..bb06cff782
--- /dev/null
+++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/lite_context.h
@@ -0,0 +1,71 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MINDSPORE_INCLUDE_API_LITE_CONTEXT_H
+#define MINDSPORE_INCLUDE_API_LITE_CONTEXT_H
+
+#include
+#include
+#include
+#include
+#include "include/api/types.h"
+#include "include/lite_types.h"
+
+namespace mindspore {
+namespace lite {
+class Allocator;
+} // namespace lite
+
+struct MS_API Context {
+ public:
+ static void Clear(const std::shared_ptr &context);
+
+ static void SetAsDefault(const std::shared_ptr &context);
+
+ static void SetVendorName(const std::shared_ptr &context, const std::string &name);
+ static std::string GetVendorName(const std::shared_ptr &context);
+
+ static void SetThreadNum(const std::shared_ptr &context, int num);
+ static int GetThreadNum(const std::shared_ptr &context);
+
+ static void SetAllocator(const std::shared_ptr &context, std::shared_ptr alloc);
+ static std::shared_ptr GetAllocator(const std::shared_ptr &context);
+
+ static void ConfigCPU(const std::shared_ptr &context, bool config);
+ static bool IfCPUEnabled(const std::shared_ptr &context);
+
+ static void ConfigCPUFp16(const std::shared_ptr &context, bool config);
+ static bool IfCPUFp16Enabled(const std::shared_ptr &context);
+
+ static void SetCPUBindMode(const std::shared_ptr &context, lite::CpuBindMode mode);
+ static lite::CpuBindMode GetCPUBindMode(const std::shared_ptr &context);
+
+ static void ConfigGPU(const std::shared_ptr &context, bool config);
+ static bool IfGPUEnabled(const std::shared_ptr &context);
+
+ static void ConfigGPUFp16(const std::shared_ptr &context, bool config);
+ static bool IfGPUFp16Enabled(const std::shared_ptr &context);
+
+ static void ConfigNPU(const std::shared_ptr