Compare commits

...

34 Commits
master ... r1.2

Author SHA1 Message Date
  mindspore-ci-bot cd3c5f8506 !233 fix bug, in some cases, failed to find the user's context info 5 years ago
  xuyongfei 3fb5c4b79e Serving, fix bug, in some cases, failed to find the user's context information, resulting in no response 5 years ago
  mindspore-ci-bot 4b6d49f91a !231 update mindspore submodule in branch r1.2 5 years ago
  qinzheng c02e94dde2 update mindspore submodule in branch r1.2 5 years ago
  mindspore-ci-bot 90c0680e61 !228 update RELEASE.md version number to r1.2 5 years ago
  徐永飞 aae4d5d705 update RELEASE.md. 5 years ago
  徐永飞 2b5e5fc35e !227 update setup.py version number to 1.2 5 years ago
  徐永飞 f5907fd284 update setup.py. 5 years ago
  mindspore-ci-bot 93710fa1ed !225 update mindspore commit id 5 years ago
  zhangyinxia 79d757e054 undate commit id 5 years ago
  mindspore-ci-bot c446026566 !224 fix docs 5 years ago
  zhangyinxia aea1929044 fix docs 5 years ago
  mindspore-ci-bot 4a106f6a53 !222 Serving, update mindspore commit id 5 years ago
  xuyongfei 118049306b Serving, update mindspore commit id 5 years ago
  mindspore-ci-bot 7d3402bcfd !219 Update mindspore commit && update build.sh help 5 years ago
  xuyongfei 99bcde5309 Update mindspore commit && update build.sh help 5 years ago
  mindspore-ci-bot a8c20313ef !218 Serving, ci warning clear 5 years ago
  xuyongfei 249cf80a98 Serving, ci warning clear 5 years ago
  mindspore-ci-bot f90a968eb2 !216 Serving, update set_max_enqueued_requests api name 5 years ago
  xuyongfei bb6abccf2e Serving, update set_max_enqueued_requests api name 5 years ago
  mindspore-ci-bot 3b6a40345f !214 Serving, update load model failed info 5 years ago
  xuyongfei f054ce9e23 Serving, update mindspore and release note 5 years ago
  mindspore-ci-bot 6ad3199664 !212 Serving, fix bug on check servable failed, not notify agents to exit 5 years ago
  xuyongfei 63ba55c374 Serving, fix bug on check servable failed, not notify agents to exit 5 years ago
  mindspore-ci-bot 621e71dc82 !211 Serving, output build exception info 5 years ago
  xuyongfei b4fa42e0e6 Serving, output build exception info 5 years ago
  mindspore-ci-bot b2e4a1ccff !207 Serving, add master and worker async handle time cost 5 years ago
  mindspore-ci-bot 68903729af !209 fix README link error 5 years ago
  xuyongfei c14e50e086 fix README link error 5 years ago
  xuyongfei a8c6b08c01 Serving, add master and worker async handle time cost 5 years ago
  mindspore-ci-bot 5a82e36078 !206 Serving, fix libevent_core.so link, update cxx inferface 5 years ago
  xuyongfei a52f647757 Serving, fix libevent_core.so link, update cxx inferface 5 years ago
  徐永飞 29206b1fa9 !204 Serving r1.2 update version number in docs 5 years ago
  xuyongfei f3c5bf8918 Serving r1.2 update version number in docs 5 years ago
62 changed files with 1350 additions and 622 deletions
Unified View
  1. +16
    -15
      CMakeLists.txt
  2. +8
    -8
      README.md
  3. +8
    -8
      README_CN.md
  4. +47
    -8
      RELEASE.md
  5. +7
    -6
      build.sh
  6. +10
    -10
      cmake/dependency_securec.cmake
  7. +20
    -19
      cmake/dependency_utils.cmake
  8. +4
    -2
      cmake/external_libs/gtest.cmake
  9. +2
    -1
      cmake/external_libs/libevent.cmake
  10. +14
    -13
      cmake/options.cmake
  11. +20
    -20
      cmake/package.cmake
  12. +102
    -101
      cmake/utils.cmake
  13. +1
    -0
      example/matmul_distributed/export_model/export_model.sh
  14. +4
    -1
      mindspore_serving/CMakeLists.txt
  15. +1
    -0
      mindspore_serving/ccsrc/common/file_system_operation.cc
  16. +7
    -8
      mindspore_serving/ccsrc/master/dispacther.cc
  17. +1
    -1
      mindspore_serving/ccsrc/master/dispacther.h
  18. +6
    -1
      mindspore_serving/ccsrc/master/grpc/grpc_server.h
  19. +3
    -3
      mindspore_serving/ccsrc/master/master_context.cc
  20. +3
    -3
      mindspore_serving/ccsrc/master/master_context.h
  21. +1
    -0
      mindspore_serving/ccsrc/master/restful/http_process.cc
  22. +1
    -1
      mindspore_serving/ccsrc/python/serving_py.cc
  23. +16
    -0
      mindspore_serving/ccsrc/python/worker/worker_py.cc
  24. +1
    -0
      mindspore_serving/ccsrc/worker/context.cc
  25. +0
    -2
      mindspore_serving/ccsrc/worker/grpc/worker_process.cc
  26. +1
    -0
      mindspore_serving/ccsrc/worker/grpc/worker_process.h
  27. +5
    -1
      mindspore_serving/ccsrc/worker/grpc/worker_server.h
  28. +91
    -43
      mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.cc
  29. +13
    -4
      mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.h
  30. +4
    -1
      mindspore_serving/ccsrc/worker/work_executor.cc
  31. +1
    -2
      mindspore_serving/master/__init__.py
  32. +6
    -4
      mindspore_serving/master/context.py
  33. +2
    -2
      mindspore_serving/worker/_worker.py
  34. +2
    -0
      mindspore_serving/worker/distributed/agent_startup.py
  35. +2
    -2
      mindspore_serving/worker/distributed/distributed_worker.py
  36. +2
    -4
      mindspore_serving/worker/register/method.py
  37. +4
    -4
      scripts/check_clang_format.sh
  38. +1
    -1
      tests/ut/python/tests/common.py
  39. +6
    -6
      tests/ut/python/tests/test_mater_worker_client.py
  40. +59
    -33
      tests/ut/stub/cxx_api/cell.cc
  41. +195
    -116
      tests/ut/stub/cxx_api/context.cc
  42. +2
    -0
      tests/ut/stub/cxx_api/factory.h
  43. +1
    -1
      tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.cc
  44. +1
    -2
      tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.h
  45. +4
    -0
      tests/ut/stub/cxx_api/graph/graph.cc
  46. +2
    -2
      tests/ut/stub/cxx_api/graph/graph_impl.h
  47. +92
    -22
      tests/ut/stub/cxx_api/model/model.cc
  48. +2
    -2
      tests/ut/stub/cxx_api/model/model_impl.h
  49. +25
    -2
      tests/ut/stub/cxx_api/model/ms/ms_model.cc
  50. +1
    -0
      tests/ut/stub/cxx_api/model/ms/ms_model.h
  51. +28
    -9
      tests/ut/stub/cxx_api/serialization.cc
  52. +130
    -14
      tests/ut/stub/cxx_api/types.cc
  53. +8
    -2
      tests/ut/stub/graph_impl_stub.cc
  54. +1
    -1
      tests/ut/stub/graph_impl_stub.h
  55. +2
    -1
      tests/ut/stub/include/api/cell.h
  56. +158
    -82
      tests/ut/stub/include/api/context.h
  57. +138
    -0
      tests/ut/stub/include/api/dual_abi_helper.h
  58. +2
    -0
      tests/ut/stub/include/api/graph.h
  59. +21
    -8
      tests/ut/stub/include/api/model.h
  60. +5
    -5
      tests/ut/stub/include/api/serialization.h
  61. +29
    -14
      tests/ut/stub/include/api/types.h
  62. +1
    -1
      third_party/mindspore

+ 16
- 15
CMakeLists.txt View File

@@ -1,22 +1,25 @@
cmake_minimum_required(VERSION 3.14.1) cmake_minimum_required(VERSION 3.14.1)
project (MindSpore_Serving)
project(MindSpore_Serving)


if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.3.0)
message(FATAL_ERROR "GCC vesion ${CMAKE_CXX_COMPILER_VERSION} must not be less than 7.3.0")
endif ()
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.3.0)
message(FATAL_ERROR "GCC version ${CMAKE_CXX_COMPILER_VERSION} must not be less than 7.3.0")
endif()


include(${CMAKE_SOURCE_DIR}/cmake/options.cmake) # set compile options include(${CMAKE_SOURCE_DIR}/cmake/options.cmake) # set compile options
include(${CMAKE_SOURCE_DIR}/cmake/check_requirements.cmake) # check require party, like OpenSSL include(${CMAKE_SOURCE_DIR}/cmake/check_requirements.cmake) # check require party, like OpenSSL
set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2")
if (NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0 \
-D_FORTIFY_SOURCE=2")
if(NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0) add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)
endif ()
endif()


if (ENABLE_PYTHON)
if(ENABLE_PYTHON)
add_compile_definitions(ENABLE_PYTHON) add_compile_definitions(ENABLE_PYTHON)
endif() endif()


set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -Wl,--allow-shlib-undefined -D_LIBCPP_INLINE_VISIBILITY='' -D_LIBCPP_DISABLE_EXTERN_TEMPLATE=1 -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2 -Wno-cpp")
set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer \
-Wl,--allow-shlib-undefined -D_LIBCPP_INLINE_VISIBILITY='' -D_LIBCPP_DISABLE_EXTERN_TEMPLATE=1 \
-DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2 -Wno-cpp")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -I/usr/local/include -std=c++17 -Werror -Wall -fPIC") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -I/usr/local/include -std=c++17 -Werror -Wall -fPIC")
set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON)


@@ -34,13 +37,13 @@ find_package(Python3 3.7 COMPONENTS Interpreter Development)
if(Python3_FOUND) if(Python3_FOUND)
set(PYTHON_INCLUDE_DIRS "${Python3_INCLUDE_DIRS}") set(PYTHON_INCLUDE_DIRS "${Python3_INCLUDE_DIRS}")
set(PYTHON_LIBRARIES "${Python3_LIBRARIES}") set(PYTHON_LIBRARIES "${Python3_LIBRARIES}")
if (WIN32)
if (Python3_DIR)
if(WIN32)
if(Python3_DIR)
message("Python3_DIR set already: " ${Python3_DIR}) message("Python3_DIR set already: " ${Python3_DIR})
else() else()
string(LENGTH ${PYTHON_LIBRARIES} PYTHON_LIBRARIES_LEN) string(LENGTH ${PYTHON_LIBRARIES} PYTHON_LIBRARIES_LEN)
string(LENGTH "libpythonxx.a" Python3_NAME_LEN) string(LENGTH "libpythonxx.a" Python3_NAME_LEN)
math(EXPR Python3_DIR_LEN ${PYTHON_LIBRARIES_LEN}-${Python3_NAME_LEN})
math(EXPR Python3_DIR_LEN ${PYTHON_LIBRARIES_LEN}-${Python3_NAME_LEN})
string(SUBSTRING ${Python3_LIBRARIES} 0 ${Python3_DIR_LEN} Python3_DIR) string(SUBSTRING ${Python3_LIBRARIES} 0 ${Python3_DIR_LEN} Python3_DIR)
message("Python3_DIR: " ${Python3_DIR}) message("Python3_DIR: " ${Python3_DIR})
endif() endif()
@@ -63,8 +66,6 @@ if(MS_WHL_LIB_PATH OR MS_BACKEND)
include(cmake/package.cmake) include(cmake/package.cmake)
endif() endif()


if (ENABLE_TESTCASES)
if(ENABLE_TESTCASES)
add_subdirectory(tests) add_subdirectory(tests)
endif() endif()



+ 8
- 8
README.md View File

@@ -40,7 +40,7 @@ MindSpore Serving provides the following functions:


## Installation ## Installation


MindSpore Serving depends on the MindSpore training and inference framework. Therefore, install [MindSpore](https://gitee.com/mindspore/mindspore/blob/master/README.md#installation) and then MindSpore Serving.
MindSpore Serving depends on the MindSpore training and inference framework. Therefore, install [MindSpore](https://gitee.com/mindspore/mindspore/blob/r1.2/README.md#installation) and then MindSpore Serving.


### Installing Serving ### Installing Serving


@@ -67,7 +67,7 @@ Perform the following steps to install Serving:


In the preceding information, `build.sh` is the build script file in the `serving` directory, and `$MINDSPORE_LIB_PATH` is the `lib` directory in the installation path of the MindSpore software package, for example, `softwarepath/mindspore/lib`. This path contains the library files on which MindSpore depends. In the preceding information, `build.sh` is the build script file in the `serving` directory, and `$MINDSPORE_LIB_PATH` is the `lib` directory in the installation path of the MindSpore software package, for example, `softwarepath/mindspore/lib`. This path contains the library files on which MindSpore depends.


Method 2: Directly build Serving. The MindSpore package is built together with Serving. You need to configure the [environment variables](https://gitee.com/mindspore/docs/blob/master/install/mindspore_ascend_install_source_en.md#configuring-environment-variables) for MindSpore building.
Method 2: Directly build Serving. The MindSpore package is built together with Serving. You need to configure the [environment variables](https://gitee.com/mindspore/docs/blob/r1.2/install/mindspore_ascend_install_source_en.md#configuring-environment-variables) for MindSpore building.


```shell ```shell
# GPU # GPU
@@ -101,21 +101,21 @@ from mindspore_serving import worker


To run MindSpore Serving, configure the following environment variables: To run MindSpore Serving, configure the following environment variables:


- MindSpore Serving depends on MindSpore. You need to configure [environment variables](https://gitee.com/mindspore/docs/blob/master/install/mindspore_ascend_install_source_en.md#configuring-environment-variables) to run MindSpore.
- MindSpore Serving depends on MindSpore. You need to configure [environment variables](https://gitee.com/mindspore/docs/blob/r1.2/install/mindspore_ascend_install_source_en.md#configuring-environment-variables) to run MindSpore.


## Quick Start ## Quick Start


[MindSpore-based Inference Service Deployment](https://www.mindspore.cn/tutorial/inference/en/master/serving_example.html) is used to demonstrate how to use MindSpore Serving.
[MindSpore-based Inference Service Deployment](https://www.mindspore.cn/tutorial/inference/en/r1.2/serving_example.html) is used to demonstrate how to use MindSpore Serving.


## Documents ## Documents


### Developer Guide ### Developer Guide


- [gRPC-based MindSpore Serving Access](https://www.mindspore.cn/tutorial/inference/en/master/serving_grpc.html)
- [RESTful-based MindSpore Serving Access](https://www.mindspore.cn/tutorial/inference/en/master/serving_restful.html)
- [Servable Provided Through Model Configuration](https://www.mindspore.cn/tutorial/inference/en/master/serving_model.html)
- [gRPC-based MindSpore Serving Access](https://www.mindspore.cn/tutorial/inference/en/r1.2/serving_grpc.html)
- [RESTful-based MindSpore Serving Access](https://www.mindspore.cn/tutorial/inference/en/r1.2/serving_restful.html)
- [Servable Provided Through Model Configuration](https://www.mindspore.cn/tutorial/inference/en/r1.2/serving_model.html)


For more details about the installation guide, tutorials, and APIs, see [MindSpore Python API](https://www.mindspore.cn/doc/api_python/en/master/index.html).
For more details about the installation guide, tutorials, and APIs, see [MindSpore Python API](https://www.mindspore.cn/doc/api_python/en/r1.2/index.html).


## Community ## Community




+ 8
- 8
README_CN.md View File

@@ -67,7 +67,7 @@ MindSpore Serving依赖MindSpore训练推理框架,安装完[MindSpore](https:


其中,`build.sh`为`serving`目录下的编译脚本文件,`$MINDSPORE_LIB_PATH`为MindSpore软件包的安装路径下的`lib`路径,例如,`softwarepath/mindspore/lib`,该路径包含MindSpore运行依赖的库文件。 其中,`build.sh`为`serving`目录下的编译脚本文件,`$MINDSPORE_LIB_PATH`为MindSpore软件包的安装路径下的`lib`路径,例如,`softwarepath/mindspore/lib`,该路径包含MindSpore运行依赖的库文件。


方式二,直接编译Serving,编译时会配套编译MindSpore的包,需要配置MindSpore编译时的[环境变量](https://gitee.com/mindspore/docs/blob/master/install/mindspore_ascend_install_source.md#配置环境变量) :
方式二,直接编译Serving,编译时会配套编译MindSpore的包,需要配置MindSpore编译时的[环境变量](https://gitee.com/mindspore/docs/blob/r1.2/install/mindspore_ascend_install_source.md#配置环境变量) :


```shell ```shell
# GPU # GPU
@@ -101,22 +101,22 @@ from mindspore_serving import worker


MindSpore Serving运行需要配置以下环境变量: MindSpore Serving运行需要配置以下环境变量:


- MindSpore Serving依赖MindSpore正确运行,运行MindSpore需要完成[环境变量配置](https://gitee.com/mindspore/docs/blob/master/install/mindspore_ascend_install_pip.md#%E9%85%8D%E7%BD%AE%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F)。
- MindSpore Serving依赖MindSpore正确运行,运行MindSpore需要完成[环境变量配置](https://gitee.com/mindspore/docs/blob/r1.2/install/mindspore_ascend_install_pip.md#%E9%85%8D%E7%BD%AE%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F)。


## 快速入门 ## 快速入门


以一个简单的[Add网络示例](https://www.mindspore.cn/tutorial/inference/zh-CN/master/serving_example.html),演示MindSpore Serving如何使用。
以一个简单的[Add网络示例](https://www.mindspore.cn/tutorial/inference/zh-CN/r1.2/serving_example.html),演示MindSpore Serving如何使用。


## 文档 ## 文档


### 开发者教程 ### 开发者教程


- [基于gRPC接口访问MindSpore Serving服务](https://www.mindspore.cn/tutorial/inference/zh-CN/master/serving_grpc.html)
- [基于RESTful接口访问MindSpore Serving服务](https://www.mindspore.cn/tutorial/inference/zh-CN/master/serving_restful.html)
- [通过配置模型提供Servable](https://www.mindspore.cn/tutorial/inference/zh-CN/master/serving_model.html)
- [基于MindSpore Serving部署分布式推理服务](https://www.mindspore.cn/tutorial/inference/zh-CN/master/serving_distributed_example.html)
- [基于gRPC接口访问MindSpore Serving服务](https://www.mindspore.cn/tutorial/inference/zh-CN/r1.2/serving_grpc.html)
- [基于RESTful接口访问MindSpore Serving服务](https://www.mindspore.cn/tutorial/inference/zh-CN/r1.2/serving_restful.html)
- [通过配置模型提供Servable](https://www.mindspore.cn/tutorial/inference/zh-CN/r1.2/serving_model.html)
- [基于MindSpore Serving部署分布式推理服务](https://www.mindspore.cn/tutorial/inference/zh-CN/r1.2/serving_distributed_example.html)


有关安装指南、教程和API的更多详细信息,请参阅[用户文档](https://www.mindspore.cn/doc/api_python/zh-CN/master/index.html)。
有关安装指南、教程和API的更多详细信息,请参阅[用户文档](https://www.mindspore.cn/doc/api_python/zh-CN/r1.2/index.html)。


## 社区 ## 社区




+ 47
- 8
RELEASE.md View File

@@ -1,21 +1,60 @@
# MindSpore Serving 1.2.0


# 1. MindSpore Serving 1.2.0 Release Notes
## MindSpore Serving 1.2.0 Release Notes


## 1.1. Major Features and Improvements
### Major Features and Improvements


### 1.1.1. Serving Framework
- [STABLE] Support distributed inference, it needs to cooperate with distributed training to export distributed models for super-large-scale neural network parameters(Ascend 910).
- [STABLE] Support GPU platform, Serving worker nodes can be deployer on Nvidia GPU, Ascend 310 and Ascend 910.
- This release is based on MindSpore version 1.2.0
- Support Python 3.8 and 3.9.


- [STABLE] Support distributed inference, it needs to cooperate with distributed training to export distributed models for super-large-scale neural network parameters.
- [STABLE] Support GPU platform, Serving worker nodes can be deployer on GPU, Ascend 310 and Ascend 910.
### API Change


# 2. MindSpore Serving 1.1.0 Release Notes
#### API Incompatible Change


## 2.1. Major Features and Improvements
##### Python API


### 2.1.1. Ascend 310 & Ascend 910 Serving Framework
Support deployment of distributed model, refer to [distributed inference tutorial](https://www.mindspore.cn/tutorial/inference/en/r1.2/serving_distributed_example.html) for related API.

#### Deprecations

##### Python API

### Bug Fixes

## Contributors

Thanks goes to these wonderful people:

chenweifeng, qinzheng, xujincai, xuyongfei, zhangyinxia, zhoufeng.

Contributions of any kind are welcome!

## MindSpore Serving 1.1.1 Release Notes

## Major Features and Improvements

- Adapts new C++ inference interface for MindSpore version 1.1.1.

## Bug fixes

- [BUGFIX] Fix bug in transforming result of type int16 in python Client.
- [BUGFIX] Fix bytes type misidentified as str type after python preprocess and postprocess.
- [BUGFIX] Fix bug releasing C++ tensor data when it's wrapped as numpy object sometimes.
- [BUGFIX] Update RuntimeError to warning log when check Ascend environment failed.

## MindSpore Serving 1.1.0 Release Notes

### Major Features and Improvements


- [STABLE] Support gRPC and RESTful API. - [STABLE] Support gRPC and RESTful API.
- [STABLE] Support simple Python API for Client and Server. - [STABLE] Support simple Python API for Client and Server.
- [STABLE] Support Model configuration,User can customize preprocessing & postprocessing for model. - [STABLE] Support Model configuration,User can customize preprocessing & postprocessing for model.
- [STABLE] Support multiple models,Multiple models can run simultaneously. - [STABLE] Support multiple models,Multiple models can run simultaneously.
- [STABLE] Support Model batching,Multiple instances will be split and combined to meet the batch size requirements of the model. - [STABLE] Support Model batching,Multiple instances will be split and combined to meet the batch size requirements of the model.
- This release is based on MindSpore version 1.1.0

### Bug Fixes

### Contributors

+ 7
- 6
build.sh View File

@@ -7,15 +7,16 @@ export BUILD_PATH="${PROJECTPATH}/build/"
usage() usage()
{ {
echo "Usage:" echo "Usage:"
echo "bash build.sh [-d] [-v] [-c on|off] [-a on|off] [-j[n]] [-p]"
echo " bash build.sh [-p {mindspore_shared_lib}] [-j[n]] [-d]"
echo " bash build.sh [-e gpu|ascend|cpu|npu] [-V 9.2|10.1|310|910] [-j[n]] [-d] "
echo " bash build.sh [-t on|off] [-j[n]] [-d]"
echo "" echo ""
echo "Options:" echo "Options:"
echo " -d Debug model"
echo " -p MindSpore lib {mindspore_shared_lib} path."
echo " -e Use cpu, gpu, npu or ascend"
echo " -V Specify the device version, if -e gpu, default CUDA 10.1, if -e ascend, default Ascend 910"
echo " -j[n] Set the threads when building (Default: -j8)" echo " -j[n] Set the threads when building (Default: -j8)"
echo " -v Display build command. Run cpack with verbose output."
echo " -c Enable code coverage, default off"
echo " -a Enable ASAN, default off. Memory error detection tool."
echo " -p MindSpore lib [mindspore_shared_lib] path."
echo " -d Debug model"
echo " -t Run testcases, default off." echo " -t Run testcases, default off."


} }


+ 10
- 10
cmake/dependency_securec.cmake View File

@@ -4,17 +4,17 @@
# SECUREC_LIBRARY # SECUREC_LIBRARY
# #


if (NOT TARGET securec)
set(_ms_tmp_CMAKE_POSITION_INDEPENDENT_CODE ${CMAKE_POSITION_INDEPENDENT_CODE})
set(_ms_tmp_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
if(NOT TARGET securec)
set(_ms_tmp_CMAKE_POSITION_INDEPENDENT_CODE ${CMAKE_POSITION_INDEPENDENT_CODE})
set(_ms_tmp_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})


set(CMAKE_C_FLAGS "${SECURE_CXX_FLAGS}")
if (CMAKE_SYSTEM_NAME MATCHES "Windows")
add_compile_definitions(SECUREC_ONLY_DECLARE_MEMSET)
endif()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/securec ${CMAKE_BINARY_DIR}/securec)
set(CMAKE_POSITION_INDEPENDENT_CODE ${_ms_tmp_CMAKE_POSITION_INDEPENDENT_CODE})
set(CMAKE_C_FLAGS ${_ms_tmp_CMAKE_C_FLAGS})
set(CMAKE_C_FLAGS "${SECURE_CXX_FLAGS}")
if(CMAKE_SYSTEM_NAME MATCHES "Windows")
add_compile_definitions(SECUREC_ONLY_DECLARE_MEMSET)
endif()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/securec ${CMAKE_BINARY_DIR}/securec)
set(CMAKE_POSITION_INDEPENDENT_CODE ${_ms_tmp_CMAKE_POSITION_INDEPENDENT_CODE})
set(CMAKE_C_FLAGS ${_ms_tmp_CMAKE_C_FLAGS})
endif() endif()


include_directories(${CMAKE_CURRENT_LIST_DIR}/../third_party/securec/include) include_directories(${CMAKE_CURRENT_LIST_DIR}/../third_party/securec/include)


+ 20
- 19
cmake/dependency_utils.cmake View File

@@ -2,24 +2,25 @@
# #


function(find_python_package out_inc out_lib) function(find_python_package out_inc out_lib)
# Use PYTHON_EXECUTABLE if it is defined, otherwise default to python
if ("${PYTHON_EXECUTABLE}" STREQUAL "")
set(PYTHON_EXECUTABLE "python3")
else()
set(PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}")
endif()
# Use PYTHON_EXECUTABLE if it is defined, otherwise default to python
if("${PYTHON_EXECUTABLE}" STREQUAL "")
set(PYTHON_EXECUTABLE "python3")
else()
set(PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}")
endif()


execute_process(
COMMAND "${PYTHON_EXECUTABLE}" -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())"
RESULT_VARIABLE result
OUTPUT_VARIABLE inc)
string(STRIP "${inc}" inc)
set(${out_inc} ${inc} PARENT_SCOPE)
execute_process(
COMMAND "${PYTHON_EXECUTABLE}" -c "import distutils.sysconfig as sysconfig; import os; print(os.path.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')))"
RESULT_VARIABLE result
OUTPUT_VARIABLE lib)
string(STRIP "${lib}" lib)
set(${out_lib} ${lib} PARENT_SCOPE)
execute_process(
COMMAND "${PYTHON_EXECUTABLE}" -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())"
RESULT_VARIABLE result
OUTPUT_VARIABLE inc)
string(STRIP "${inc}" inc)
set(${out_inc} ${inc} PARENT_SCOPE)

execute_process(
COMMAND "${PYTHON_EXECUTABLE}" -c "import distutils.sysconfig as sysconfig; import os; \
print(os.path.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')))"
RESULT_VARIABLE result
OUTPUT_VARIABLE lib)
string(STRIP "${lib}" lib)
set(${out_lib} ${lib} PARENT_SCOPE)
endfunction() endfunction()

+ 4
- 2
cmake/external_libs/gtest.cmake View File

@@ -9,5 +9,7 @@ mindspore_add_pkg(gtest
-DCMAKE_MACOSX_RPATH=TRUE -Dgtest_disable_pthreads=ON) -DCMAKE_MACOSX_RPATH=TRUE -Dgtest_disable_pthreads=ON)
include_directories(${gtest_INC}) include_directories(${gtest_INC})
add_library(mindspore_serving::gtest ALIAS gtest::gtest) add_library(mindspore_serving::gtest ALIAS gtest::gtest)
file(COPY ${gtest_LIBPATH}/libgtest${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)
file(COPY ${gtest_LIBPATH}/libgtest_main${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)
file(COPY ${gtest_LIBPATH}/libgtest${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION
${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)
file(COPY ${gtest_LIBPATH}/libgtest_main${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION
${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)

+ 2
- 1
cmake/external_libs/libevent.cmake View File

@@ -2,7 +2,7 @@ set(libevent_CFLAGS "-fstack-protector-all -D_FORTIFY_SOURCE=2 -O2")
set(libevent_LDFLAGS "-Wl,-z,now") set(libevent_LDFLAGS "-Wl,-z,now")
mindspore_add_pkg(libevent mindspore_add_pkg(libevent
VER 2.1.12 VER 2.1.12
LIBS event event_pthreads
LIBS event event_pthreads event_core
URL https://github.com/libevent/libevent/releases/download/release-2.1.12-stable/libevent-2.1.12-stable.tar.gz URL https://github.com/libevent/libevent/releases/download/release-2.1.12-stable/libevent-2.1.12-stable.tar.gz
MD5 b5333f021f880fe76490d8a799cd79f4 MD5 b5333f021f880fe76490d8a799cd79f4
CMAKE_OPTION -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_TESTING=OFF) CMAKE_OPTION -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_TESTING=OFF)
@@ -11,3 +11,4 @@ include_directories(${libevent_INC}) # 将指定目录添加到编译器的头


add_library(mindspore_serving::event ALIAS libevent::event) add_library(mindspore_serving::event ALIAS libevent::event)
add_library(mindspore_serving::event_pthreads ALIAS libevent::event_pthreads) add_library(mindspore_serving::event_pthreads ALIAS libevent::event_pthreads)
add_library(mindspore_serving::event_core ALIAS libevent::event_core)

+ 14
- 13
cmake/options.cmake View File

@@ -10,47 +10,48 @@ if(MS_WHL_LIB_PATH)
message("MindSpore whl lib path:" ${MS_WHL_LIB_PATH}) message("MindSpore whl lib path:" ${MS_WHL_LIB_PATH})
elseif(MS_BACKEND) elseif(MS_BACKEND)
message("MindSpore backend method:" ${MS_BACKEND}) message("MindSpore backend method:" ${MS_BACKEND})
elseif (RUN_TESTCASES)
elseif(RUN_TESTCASES)
message("MindSpore Serving Compile UT:" ${RUN_TESTCASES}) message("MindSpore Serving Compile UT:" ${RUN_TESTCASES})
elseif ()
elseif()
message(FATAL_ERROR "Please confirm how to use MindSpore.") message(FATAL_ERROR "Please confirm how to use MindSpore.")
endif() endif()


if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND Linux)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND Linux)
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack") set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack")
endif() endif()


if (ENABLE_COVERAGE)
if(ENABLE_COVERAGE)
set(COVERAGE_COMPILER_FLAGS "-g --coverage -fprofile-arcs -ftest-coverage") set(COVERAGE_COMPILER_FLAGS "-g --coverage -fprofile-arcs -ftest-coverage")
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}")
endif() endif()


if (ENABLE_ASAN)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fsanitize=address -fsanitize-recover=address -fno-omit-frame-pointer -fsanitize=undefined")
if(ENABLE_ASAN)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fsanitize=address -fsanitize-recover=address \
-fno-omit-frame-pointer -fsanitize=undefined")
else() else()
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fsanitize=address -fno-omit-frame-pointer -static-libsan -fsanitize=undefined")
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fsanitize=address -fno-omit-frame-pointer -static-libsan \
-fsanitize=undefined")
endif() endif()
endif() endif()


if (DEBUG_MODE)
if(DEBUG_MODE)
set(CMAKE_BUILD_TYPE "Debug") set(CMAKE_BUILD_TYPE "Debug")
add_compile_definitions(MEM_REUSE_DEBUG) add_compile_definitions(MEM_REUSE_DEBUG)
else() else()
set(CMAKE_BUILD_TYPE "Release") set(CMAKE_BUILD_TYPE "Release")
endif() endif()


if ((CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") OR (CMAKE_BUILD_TYPE STREQUAL Release))
if((CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") OR (CMAKE_BUILD_TYPE STREQUAL Release))
set(PYBIND11_LTO_CXX_FLAGS FALSE) set(PYBIND11_LTO_CXX_FLAGS FALSE)
endif() endif()


if (NOT BUILD_PATH)
if(NOT BUILD_PATH)
set(BUILD_PATH "${CMAKE_SOURCE_DIR}/build") set(BUILD_PATH "${CMAKE_SOURCE_DIR}/build")
endif() endif()


if (NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
if(NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
set(MS_BUILD_GRPC ON) set(MS_BUILD_GRPC ON)
endif() endif()


add_compile_definitions(USE_GLOG) add_compile_definitions(USE_GLOG)


+ 20
- 20
cmake/package.cmake View File

@@ -39,20 +39,20 @@ install(
# set python files # set python files
file(GLOB MS_PY_LIST ${CMAKE_SOURCE_DIR}/mindspore_serving/*.py) file(GLOB MS_PY_LIST ${CMAKE_SOURCE_DIR}/mindspore_serving/*.py)
install( install(
FILES ${MS_PY_LIST}
DESTINATION ${INSTALL_PY_DIR}
COMPONENT mindspore_serving
FILES ${MS_PY_LIST}
DESTINATION ${INSTALL_PY_DIR}
COMPONENT mindspore_serving
) )


install( install(
TARGETS _mindspore_serving
DESTINATION ${INSTALL_BASE_DIR}
COMPONENT mindspore_serving
TARGETS _mindspore_serving
DESTINATION ${INSTALL_BASE_DIR}
COMPONENT mindspore_serving
) )
install( install(
TARGETS serving_common
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore_serving
TARGETS serving_common
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore_serving
) )
install( install(
TARGETS serving_ascend TARGETS serving_ascend
@@ -60,17 +60,17 @@ install(
COMPONENT mindspore_serving COMPONENT mindspore_serving
) )
install( install(
DIRECTORY
${CMAKE_SOURCE_DIR}/mindspore_serving/master
${CMAKE_SOURCE_DIR}/mindspore_serving/worker
${CMAKE_SOURCE_DIR}/mindspore_serving/common
${CMAKE_SOURCE_DIR}/mindspore_serving/client
DESTINATION ${INSTALL_PY_DIR}
COMPONENT mindspore_serving
DIRECTORY
${CMAKE_SOURCE_DIR}/mindspore_serving/master
${CMAKE_SOURCE_DIR}/mindspore_serving/worker
${CMAKE_SOURCE_DIR}/mindspore_serving/common
${CMAKE_SOURCE_DIR}/mindspore_serving/client
DESTINATION ${INSTALL_PY_DIR}
COMPONENT mindspore_serving
) )
install( install(
FILES ${CMAKE_SOURCE_DIR}/build/mindspore_serving/mindspore_serving/mindspore_serving/proto/ms_service_pb2.py
${CMAKE_SOURCE_DIR}/build/mindspore_serving/mindspore_serving/mindspore_serving/proto/ms_service_pb2_grpc.py
DESTINATION ${INSTALL_PY_DIR}/proto
COMPONENT mindspore_serving
FILES ${CMAKE_SOURCE_DIR}/build/mindspore_serving/mindspore_serving/mindspore_serving/proto/ms_service_pb2.py
${CMAKE_SOURCE_DIR}/build/mindspore_serving/mindspore_serving/mindspore_serving/proto/ms_service_pb2_grpc.py
DESTINATION ${INSTALL_PY_DIR}/proto
COMPONENT mindspore_serving
) )

+ 102
- 101
cmake/utils.cmake View File

@@ -16,45 +16,45 @@ function(mindspore_add_submodule_obj des_submodule_objs sub_dir submodule_name_o


endfunction() endfunction()


if (DEFINED ENV{MSLIBS_CACHE_PATH})
set(_MS_LIB_CACHE $ENV{MSLIBS_CACHE_PATH})
if(DEFINED ENV{MSLIBS_CACHE_PATH})
set(_MS_LIB_CACHE $ENV{MSLIBS_CACHE_PATH})
else() else()
set(_MS_LIB_CACHE ${CMAKE_BINARY_DIR}/.mslib) set(_MS_LIB_CACHE ${CMAKE_BINARY_DIR}/.mslib)
endif ()
endif()
message("MS LIBS CACHE PATH: ${_MS_LIB_CACHE}") message("MS LIBS CACHE PATH: ${_MS_LIB_CACHE}")


if (NOT EXISTS ${_MS_LIB_CACHE})
if(NOT EXISTS ${_MS_LIB_CACHE})
file(MAKE_DIRECTORY ${_MS_LIB_CACHE}) file(MAKE_DIRECTORY ${_MS_LIB_CACHE})
endif ()
endif()


if (DEFINED ENV{MSLIBS_SERVER}) # export MSLIBS_SERVER=49.4.0.74
set(LOCAL_LIBS_SERVER $ENV{MSLIBS_SERVER})
if(DEFINED ENV{MSLIBS_SERVER}) # export MSLIBS_SERVER=49.4.0.74
set(LOCAL_LIBS_SERVER $ENV{MSLIBS_SERVER})
message("LOCAL_LIBS_SERVER: ${LOCAL_LIBS_SERVER}") message("LOCAL_LIBS_SERVER: ${LOCAL_LIBS_SERVER}")
endif ()
endif()


include(ProcessorCount) # 确定处理器/核的数量并将值保存在${var}中 include(ProcessorCount) # 确定处理器/核的数量并将值保存在${var}中
ProcessorCount(N) ProcessorCount(N)
if (JOBS)
if(JOBS)
set(THNUM ${JOBS}) set(THNUM ${JOBS})
else() else()
set(JOBS 8) set(JOBS 8)
if (${JOBS} GREATER ${N})
if(${JOBS} GREATER ${N})
set(THNUM ${N}) set(THNUM ${N})
else() else()
set(THNUM ${JOBS}) set(THNUM ${JOBS})
endif() endif()
endif ()
endif()
message("set make thread num: ${THNUM}") message("set make thread num: ${THNUM}")


if(LOCAL_LIBS_SERVER) if(LOCAL_LIBS_SERVER)
if (NOT ENV{no_proxy})
if(NOT ENV{no_proxy})
set(ENV{no_proxy} "${LOCAL_LIBS_SERVER}") set(ENV{no_proxy} "${LOCAL_LIBS_SERVER}")
else() else()
string(FIND $ENV{no_proxy} ${LOCAL_LIBS_SERVER} IP_POS) string(FIND $ENV{no_proxy} ${LOCAL_LIBS_SERVER} IP_POS)
if (${IP_POS} EQUAL -1)
if(${IP_POS} EQUAL -1)
set(ENV{no_proxy} "$ENV{no_proxy},${LOCAL_LIBS_SERVER}") set(ENV{no_proxy} "$ENV{no_proxy},${LOCAL_LIBS_SERVER}")
endif ()
endif ()
endif()
endif()
endif() endif()


function(__download_pkg pkg_name pkg_url pkg_md5) function(__download_pkg pkg_name pkg_url pkg_md5)
@@ -66,9 +66,9 @@ function(__download_pkg pkg_name pkg_url pkg_md5)


FetchContent_Declare( # 获取项目。可以是一个URL也可以是一个Git仓库。 FetchContent_Declare( # 获取项目。可以是一个URL也可以是一个Git仓库。
${pkg_name} ${pkg_name}
URL ${pkg_url}
URL ${pkg_url}
URL_HASH MD5=${pkg_md5} URL_HASH MD5=${pkg_md5}
)
)
FetchContent_GetProperties(${pkg_name}) # 获取我们需要的变量MyName_*。 FetchContent_GetProperties(${pkg_name}) # 获取我们需要的变量MyName_*。
message("download: ${${pkg_name}_SOURCE_DIR} , ${pkg_name} , ${pkg_url}") message("download: ${${pkg_name}_SOURCE_DIR} , ${pkg_name} , ${pkg_url}")
if(NOT ${pkg_name}_POPULATED) if(NOT ${pkg_name}_POPULATED)
@@ -84,14 +84,14 @@ function(__download_pkg_with_git pkg_name pkg_url pkg_git_commit pkg_md5)
set(pkg_url "http://${LOCAL_LIBS_SERVER}:8081/libs/${pkg_name}/${pkg_git_commit}") set(pkg_url "http://${LOCAL_LIBS_SERVER}:8081/libs/${pkg_name}/${pkg_git_commit}")
FetchContent_Declare( FetchContent_Declare(
${pkg_name} ${pkg_name}
URL ${pkg_url}
URL ${pkg_url}
URL_HASH MD5=${pkg_md5} URL_HASH MD5=${pkg_md5}
)
)
else() else()
FetchContent_Declare(
${pkg_name}
GIT_REPOSITORY ${pkg_url}
GIT_TAG ${pkg_git_commit})
FetchContent_Declare(
${pkg_name}
GIT_REPOSITORY ${pkg_url}
GIT_TAG ${pkg_git_commit})
endif() endif()
FetchContent_GetProperties(${pkg_name}) FetchContent_GetProperties(${pkg_name})
message("download: ${${pkg_name}_SOURCE_DIR} , ${pkg_name} , ${pkg_url}") message("download: ${${pkg_name}_SOURCE_DIR} , ${pkg_name} , ${pkg_url}")
@@ -128,46 +128,46 @@ function(__find_pkg_then_add_target pkg_name pkg_exe lib_path)
foreach(_LIB_NAME ${ARGN}) foreach(_LIB_NAME ${ARGN})
set(_LIB_SEARCH_NAME ${_LIB_NAME}) set(_LIB_SEARCH_NAME ${_LIB_NAME})
set(_LIB_TYPE SHARED) set(_LIB_TYPE SHARED)
if (${pkg_name}_USE_STATIC_LIBS)
if(${pkg_name}_USE_STATIC_LIBS)
set(_LIB_SEARCH_NAME "${CMAKE_STATIC_LIBRARY_PREFIX}${_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}") set(_LIB_SEARCH_NAME "${CMAKE_STATIC_LIBRARY_PREFIX}${_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(_LIB_TYPE STATIC) set(_LIB_TYPE STATIC)
endif ()
endif()
set(${_LIB_NAME}_LIB ${_LIB_NAME}_LIB-NOTFOUND) set(${_LIB_NAME}_LIB ${_LIB_NAME}_LIB-NOTFOUND)
find_library(${_LIB_NAME}_LIB ${_LIB_SEARCH_NAME} PATHS ${${pkg_name}_BASE_DIR}/${lib_path} NO_DEFAULT_PATH) find_library(${_LIB_NAME}_LIB ${_LIB_SEARCH_NAME} PATHS ${${pkg_name}_BASE_DIR}/${lib_path} NO_DEFAULT_PATH)
if (NOT ${_LIB_NAME}_LIB AND BUILD_LITE AND PLATFORM_ARM)
if(NOT ${_LIB_NAME}_LIB AND BUILD_LITE AND PLATFORM_ARM)
set(${_LIB_NAME}_LIB "${${pkg_name}_BASE_DIR}/${lib_path}/lib${_LIB_SEARCH_NAME}.so") set(${_LIB_NAME}_LIB "${${pkg_name}_BASE_DIR}/${lib_path}/lib${_LIB_SEARCH_NAME}.so")
endif(NOT ${_LIB_NAME}_LIB AND BUILD_LITE AND PLATFORM_ARM)
endif()
if(NOT ${_LIB_NAME}_LIB) if(NOT ${_LIB_NAME}_LIB)
return() return()
endif() endif()


add_library(${pkg_name}::${_LIB_NAME} ${_LIB_TYPE} IMPORTED GLOBAL) add_library(${pkg_name}::${_LIB_NAME} ${_LIB_TYPE} IMPORTED GLOBAL)
if (WIN32 AND ${_LIB_TYPE} STREQUAL "SHARED")
if(WIN32 AND ${_LIB_TYPE} STREQUAL "SHARED")
set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES IMPORTED_IMPLIB_RELEASE ${${_LIB_NAME}_LIB}) set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES IMPORTED_IMPLIB_RELEASE ${${_LIB_NAME}_LIB})
else() else()
set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES IMPORTED_LOCATION ${${_LIB_NAME}_LIB}) set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES IMPORTED_LOCATION ${${_LIB_NAME}_LIB})
endif() endif()


if (EXISTS ${${pkg_name}_BASE_DIR}/include)
set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${${pkg_name}_BASE_DIR}/include")
endif ()
if(EXISTS ${${pkg_name}_BASE_DIR}/include)
set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${${pkg_name}_BASE_DIR}/include")
endif()


list(APPEND ${pkg_name}_LIBS ${pkg_name}::${_LIB_NAME}) list(APPEND ${pkg_name}_LIBS ${pkg_name}::${_LIB_NAME})
message("found ${${_LIB_NAME}_LIB}") message("found ${${_LIB_NAME}_LIB}")
STRING( REGEX REPLACE "(.+)/(.+)" "\\1" LIBPATH ${${_LIB_NAME}_LIB})
STRING(REGEX REPLACE "(.+)/(.+)" "\\1" LIBPATH ${${_LIB_NAME}_LIB})
set(${pkg_name}_LIBPATH ${LIBPATH} CACHE STRING INTERNAL) set(${pkg_name}_LIBPATH ${LIBPATH} CACHE STRING INTERNAL)
endforeach(_LIB_NAME)
endforeach()


set(${pkg_name}_LIBS ${${pkg_name}_LIBS} PARENT_SCOPE) set(${pkg_name}_LIBS ${${pkg_name}_LIBS} PARENT_SCOPE)
endfunction() endfunction()


function(__exec_cmd) function(__exec_cmd)
set(options )
set(options)
set(oneValueArgs WORKING_DIRECTORY) set(oneValueArgs WORKING_DIRECTORY)
set(multiValueArgs COMMAND) set(multiValueArgs COMMAND)


cmake_parse_arguments(EXEC "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
cmake_parse_arguments(EXEC "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})


execute_process(COMMAND ${EXEC_COMMAND} execute_process(COMMAND ${EXEC_COMMAND}
WORKING_DIRECTORY ${EXEC_WORKING_DIRECTORY} WORKING_DIRECTORY ${EXEC_WORKING_DIRECTORY}
@@ -179,42 +179,43 @@ endfunction()


function(__check_patches pkg_patches) function(__check_patches pkg_patches)
# check patches # check patches
if (PKG_PATCHES)
if(PKG_PATCHES)
file(TOUCH ${_MS_LIB_CACHE}/${pkg_name}_patch.md5) file(TOUCH ${_MS_LIB_CACHE}/${pkg_name}_patch.md5)
file(READ ${_MS_LIB_CACHE}/${pkg_name}_patch.md5 ${pkg_name}_PATCHES_MD5) file(READ ${_MS_LIB_CACHE}/${pkg_name}_patch.md5 ${pkg_name}_PATCHES_MD5)


message("patches md5:${${pkg_name}_PATCHES_MD5}") message("patches md5:${${pkg_name}_PATCHES_MD5}")


set(${pkg_name}_PATCHES_NEW_MD5 )
set(${pkg_name}_PATCHES_NEW_MD5)
foreach(_PATCH ${PKG_PATCHES}) foreach(_PATCH ${PKG_PATCHES})
file(MD5 ${_PATCH} _PF_MD5) file(MD5 ${_PATCH} _PF_MD5)
set(${pkg_name}_PATCHES_NEW_MD5 "${${pkg_name}_PATCHES_NEW_MD5},${_PF_MD5}") set(${pkg_name}_PATCHES_NEW_MD5 "${${pkg_name}_PATCHES_NEW_MD5},${_PF_MD5}")
endforeach(_PATCH)
endforeach()


if (NOT ${pkg_name}_PATCHES_MD5 STREQUAL ${pkg_name}_PATCHES_NEW_MD5)
if(NOT ${pkg_name}_PATCHES_MD5 STREQUAL ${pkg_name}_PATCHES_NEW_MD5)
set(${pkg_name}_PATCHES ${PKG_PATCHES}) set(${pkg_name}_PATCHES ${PKG_PATCHES})
file(REMOVE_RECURSE "${_MS_LIB_CACHE}/${pkg_name}-subbuild") file(REMOVE_RECURSE "${_MS_LIB_CACHE}/${pkg_name}-subbuild")
file(WRITE ${_MS_LIB_CACHE}/${pkg_name}_patch.md5 ${${pkg_name}_PATCHES_NEW_MD5}) file(WRITE ${_MS_LIB_CACHE}/${pkg_name}_patch.md5 ${${pkg_name}_PATCHES_NEW_MD5})
message("patches changed : ${${pkg_name}_PATCHES_NEW_MD5}") message("patches changed : ${${pkg_name}_PATCHES_NEW_MD5}")
endif ()
endif ()
endif()
endif()
endfunction() endfunction()


set(MS_FIND_NO_DEFAULT_PATH NO_CMAKE_PATH NO_CMAKE_ENVIRONMENT_PATH NO_SYSTEM_ENVIRONMENT_PATH set(MS_FIND_NO_DEFAULT_PATH NO_CMAKE_PATH NO_CMAKE_ENVIRONMENT_PATH NO_SYSTEM_ENVIRONMENT_PATH
NO_CMAKE_BUILDS_PATH NO_CMAKE_PACKAGE_REGISTRY NO_CMAKE_SYSTEM_PATH
NO_CMAKE_SYSTEM_PACKAGE_REGISTRY)
NO_CMAKE_BUILDS_PATH NO_CMAKE_PACKAGE_REGISTRY NO_CMAKE_SYSTEM_PATH
NO_CMAKE_SYSTEM_PACKAGE_REGISTRY)
set(MS_FIND_NO_DEFAULT_PATH ${MS_FIND_NO_DEFAULT_PATH} PARENT_SCOPE) set(MS_FIND_NO_DEFAULT_PATH ${MS_FIND_NO_DEFAULT_PATH} PARENT_SCOPE)
function(mindspore_add_pkg pkg_name )
function(mindspore_add_pkg pkg_name)


message("---------add pkg: " ${pkg_name} "---------") message("---------add pkg: " ${pkg_name} "---------")
set(options )
set(options)
set(oneValueArgs URL MD5 GIT_REPOSITORY GIT_TAG VER EXE DIR HEAD_ONLY CMAKE_PATH RELEASE LIB_PATH CUSTOM_CMAKE) set(oneValueArgs URL MD5 GIT_REPOSITORY GIT_TAG VER EXE DIR HEAD_ONLY CMAKE_PATH RELEASE LIB_PATH CUSTOM_CMAKE)
set(multiValueArgs CMAKE_OPTION LIBS PRE_CONFIGURE_COMMAND CONFIGURE_COMMAND BUILD_OPTION INSTALL_INCS INSTALL_LIBS PATCHES SUBMODULES SOURCEMODULES ONLY_MAKE ONLY_MAKE_INCS ONLY_MAKE_LIBS)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
set(multiValueArgs CMAKE_OPTION LIBS PRE_CONFIGURE_COMMAND CONFIGURE_COMMAND BUILD_OPTION INSTALL_INCS INSTALL_LIBS
PATCHES SUBMODULES SOURCEMODULES ONLY_MAKE ONLY_MAKE_INCS ONLY_MAKE_LIBS)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})


if (NOT PKG_LIB_PATH)
if(NOT PKG_LIB_PATH)
set(PKG_LIB_PATH lib) set(PKG_LIB_PATH lib)
endif ()
endif()


if(NOT PKG_EXE) if(NOT PKG_EXE)
set(PKG_EXE 0) set(PKG_EXE 0)
@@ -224,11 +225,11 @@ function(mindspore_add_pkg pkg_name )
string(TOLOWER ${pkg_name} pkg_name) string(TOLOWER ${pkg_name} pkg_name)
message("pkg name:${__FIND_PKG_NAME},${pkg_name}") message("pkg name:${__FIND_PKG_NAME},${pkg_name}")


set(${pkg_name}_PATCHES_HASH )
set(${pkg_name}_PATCHES_HASH)
foreach(_PATCH ${PKG_PATCHES}) foreach(_PATCH ${PKG_PATCHES})
file(MD5 ${_PATCH} _PF_MD5) file(MD5 ${_PATCH} _PF_MD5)
set(${pkg_name}_PATCHES_HASH "${${pkg_name}_PATCHES_HASH},${_PF_MD5}") set(${pkg_name}_PATCHES_HASH "${${pkg_name}_PATCHES_HASH},${_PF_MD5}")
endforeach(_PATCH)
endforeach()


# check options # check options
set(${pkg_name}_CONFIG_TXT set(${pkg_name}_CONFIG_TXT
@@ -247,16 +248,16 @@ function(mindspore_add_pkg pkg_name )
set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/${PKG_HEAD_ONLY} PARENT_SCOPE) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/${PKG_HEAD_ONLY} PARENT_SCOPE)
add_library(${pkg_name} INTERFACE) add_library(${pkg_name} INTERFACE)
target_include_directories(${pkg_name} INTERFACE ${${pkg_name}_INC}) target_include_directories(${pkg_name} INTERFACE ${${pkg_name}_INC})
if (${PKG_RELEASE})
if(${PKG_RELEASE})
__find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIB_PATH} ${PKG_LIBS}) __find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIB_PATH} ${PKG_LIBS})
endif ()
endif()
return() return()
endif ()
endif()


set(${__FIND_PKG_NAME}_ROOT ${${pkg_name}_BASE_DIR}) set(${__FIND_PKG_NAME}_ROOT ${${pkg_name}_BASE_DIR})
set(${__FIND_PKG_NAME}_ROOT ${${pkg_name}_BASE_DIR} PARENT_SCOPE) set(${__FIND_PKG_NAME}_ROOT ${${pkg_name}_BASE_DIR} PARENT_SCOPE)


if (PKG_LIBS)
if(PKG_LIBS)
__find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIB_PATH} ${PKG_LIBS}) __find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIB_PATH} ${PKG_LIBS})
if(${pkg_name}_LIBS) if(${pkg_name}_LIBS)
set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE)
@@ -265,28 +266,28 @@ function(mindspore_add_pkg pkg_name )
endif() endif()
elseif(NOT PKG_HEAD_ONLY) elseif(NOT PKG_HEAD_ONLY)
find_package(${__FIND_PKG_NAME} ${PKG_VER} ${MS_FIND_NO_DEFAULT_PATH}) find_package(${__FIND_PKG_NAME} ${PKG_VER} ${MS_FIND_NO_DEFAULT_PATH})
if (${__FIND_PKG_NAME}_FOUND)
if(${__FIND_PKG_NAME}_FOUND)
set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE)
message("Found pkg: ${__FIND_PKG_NAME}") message("Found pkg: ${__FIND_PKG_NAME}")
return() return()
endif ()
endif ()
endif()
endif()


if (NOT PKG_DIR)
if (PKG_GIT_REPOSITORY)
if(NOT PKG_DIR)
if(PKG_GIT_REPOSITORY)
__download_pkg_with_git(${pkg_name} ${PKG_GIT_REPOSITORY} ${PKG_GIT_TAG} ${PKG_MD5}) __download_pkg_with_git(${pkg_name} ${PKG_GIT_REPOSITORY} ${PKG_GIT_TAG} ${PKG_MD5})
else() else()
__download_pkg(${pkg_name} ${PKG_URL} ${PKG_MD5}) __download_pkg(${pkg_name} ${PKG_URL} ${PKG_MD5})
endif() endif()
foreach(_SUBMODULE_FILE ${PKG_SUBMODULES}) foreach(_SUBMODULE_FILE ${PKG_SUBMODULES})
STRING( REGEX REPLACE "(.+)_(.+)" "\\1" _SUBMODEPATH ${_SUBMODULE_FILE})
STRING( REGEX REPLACE "(.+)/(.+)" "\\2" _SUBMODENAME ${_SUBMODEPATH})
STRING(REGEX REPLACE "(.+)_(.+)" "\\1" _SUBMODEPATH ${_SUBMODULE_FILE})
STRING(REGEX REPLACE "(.+)/(.+)" "\\2" _SUBMODENAME ${_SUBMODEPATH})
file(GLOB ${pkg_name}_INSTALL_SUBMODULE ${_SUBMODULE_FILE}/*) file(GLOB ${pkg_name}_INSTALL_SUBMODULE ${_SUBMODULE_FILE}/*)
file(COPY ${${pkg_name}_INSTALL_SUBMODULE} DESTINATION ${${pkg_name}_SOURCE_DIR}/3rdparty/${_SUBMODENAME}) file(COPY ${${pkg_name}_INSTALL_SUBMODULE} DESTINATION ${${pkg_name}_SOURCE_DIR}/3rdparty/${_SUBMODENAME})
endforeach (_SUBMODULE_FILE)
endforeach()
else() else()
set(${pkg_name}_SOURCE_DIR ${PKG_DIR}) set(${pkg_name}_SOURCE_DIR ${PKG_DIR})
endif ()
endif()
file(WRITE ${${pkg_name}_BASE_DIR}/options.txt ${${pkg_name}_CONFIG_TXT}) file(WRITE ${${pkg_name}_BASE_DIR}/options.txt ${${pkg_name}_CONFIG_TXT})
message("${pkg_name}_SOURCE_DIR : ${${pkg_name}_SOURCE_DIR}") message("${pkg_name}_SOURCE_DIR : ${${pkg_name}_SOURCE_DIR}")


@@ -302,32 +303,32 @@ function(mindspore_add_pkg pkg_name )
if(NOT Result EQUAL "0") if(NOT Result EQUAL "0")
message(FATAL_ERROR "Failed patch: ${_LF_PATCH_FILE}") message(FATAL_ERROR "Failed patch: ${_LF_PATCH_FILE}")
endif() endif()
endforeach(_PATCH_FILE)
endforeach()
foreach(_SOURCE_DIR ${PKG_SOURCEMODULES}) foreach(_SOURCE_DIR ${PKG_SOURCEMODULES})
file(GLOB ${pkg_name}_INSTALL_SOURCE ${${pkg_name}_SOURCE_DIR}/${_SOURCE_DIR}/*) file(GLOB ${pkg_name}_INSTALL_SOURCE ${${pkg_name}_SOURCE_DIR}/${_SOURCE_DIR}/*)
file(COPY ${${pkg_name}_INSTALL_SOURCE} DESTINATION ${${pkg_name}_BASE_DIR}/${_SOURCE_DIR}/) file(COPY ${${pkg_name}_INSTALL_SOURCE} DESTINATION ${${pkg_name}_BASE_DIR}/${_SOURCE_DIR}/)
endforeach (_SUBMODULE_FILE)
endforeach()
file(LOCK ${${pkg_name}_BASE_DIR} DIRECTORY GUARD FUNCTION RESULT_VARIABLE ${pkg_name}_LOCK_RET TIMEOUT 600) file(LOCK ${${pkg_name}_BASE_DIR} DIRECTORY GUARD FUNCTION RESULT_VARIABLE ${pkg_name}_LOCK_RET TIMEOUT 600)
if(NOT ${pkg_name}_LOCK_RET EQUAL "0") if(NOT ${pkg_name}_LOCK_RET EQUAL "0")
message(FATAL_ERROR "error! when try lock ${${pkg_name}_BASE_DIR} : ${${pkg_name}_LOCK_RET}") message(FATAL_ERROR "error! when try lock ${${pkg_name}_BASE_DIR} : ${${pkg_name}_LOCK_RET}")
endif() endif()


if (PKG_CUSTOM_CMAKE)
if(PKG_CUSTOM_CMAKE)
file(GLOB ${pkg_name}_cmake ${PKG_CUSTOM_CMAKE}/CMakeLists.txt) file(GLOB ${pkg_name}_cmake ${PKG_CUSTOM_CMAKE}/CMakeLists.txt)
file(COPY ${${pkg_name}_cmake} DESTINATION ${${pkg_name}_SOURCE_DIR}) file(COPY ${${pkg_name}_cmake} DESTINATION ${${pkg_name}_SOURCE_DIR})
endif ()
endif()


if(${pkg_name}_SOURCE_DIR) if(${pkg_name}_SOURCE_DIR)
if (PKG_HEAD_ONLY)
if(PKG_HEAD_ONLY)
file(GLOB ${pkg_name}_SOURCE_SUBDIRS ${${pkg_name}_SOURCE_DIR}/*) file(GLOB ${pkg_name}_SOURCE_SUBDIRS ${${pkg_name}_SOURCE_DIR}/*)
file(COPY ${${pkg_name}_SOURCE_SUBDIRS} DESTINATION ${${pkg_name}_BASE_DIR}) file(COPY ${${pkg_name}_SOURCE_SUBDIRS} DESTINATION ${${pkg_name}_BASE_DIR})
set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/${PKG_HEAD_ONLY} PARENT_SCOPE) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/${PKG_HEAD_ONLY} PARENT_SCOPE)
if (NOT PKG_RELEASE)
if(NOT PKG_RELEASE)
add_library(${pkg_name} INTERFACE) add_library(${pkg_name} INTERFACE)
target_include_directories(${pkg_name} INTERFACE ${${pkg_name}_INC}) target_include_directories(${pkg_name} INTERFACE ${${pkg_name}_INC})
endif ()
endif()


elseif (PKG_ONLY_MAKE)
elseif(PKG_ONLY_MAKE)
__exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} ${${pkg_name}_CXXFLAGS} -j${THNUM} __exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} ${${pkg_name}_CXXFLAGS} -j${THNUM}
WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}) WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})
set(PKG_INSTALL_INCS ${PKG_ONLY_MAKE_INCS}) set(PKG_INSTALL_INCS ${PKG_ONLY_MAKE_INCS})
@@ -337,23 +338,23 @@ function(mindspore_add_pkg pkg_name )
file(COPY ${${pkg_name}_INSTALL_INCS} DESTINATION ${${pkg_name}_BASE_DIR}/include) file(COPY ${${pkg_name}_INSTALL_INCS} DESTINATION ${${pkg_name}_BASE_DIR}/include)
file(COPY ${${pkg_name}_INSTALL_LIBS} DESTINATION ${${pkg_name}_BASE_DIR}/lib) file(COPY ${${pkg_name}_INSTALL_LIBS} DESTINATION ${${pkg_name}_BASE_DIR}/lib)


elseif (PKG_CMAKE_OPTION)
elseif(PKG_CMAKE_OPTION)
# in cmake # in cmake
file(MAKE_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build) file(MAKE_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build)
if (${pkg_name}_CFLAGS)
if(${pkg_name}_CFLAGS)
set(${pkg_name}_CMAKE_CFLAGS "-DCMAKE_C_FLAGS=${${pkg_name}_CFLAGS}") set(${pkg_name}_CMAKE_CFLAGS "-DCMAKE_C_FLAGS=${${pkg_name}_CFLAGS}")
endif ()
if (${pkg_name}_CXXFLAGS)
endif()
if(${pkg_name}_CXXFLAGS)
set(${pkg_name}_CMAKE_CXXFLAGS "-DCMAKE_CXX_FLAGS=${${pkg_name}_CXXFLAGS}") set(${pkg_name}_CMAKE_CXXFLAGS "-DCMAKE_CXX_FLAGS=${${pkg_name}_CXXFLAGS}")
endif ()
endif()


if (${pkg_name}_LDFLAGS)
if (${pkg_name}_USE_STATIC_LIBS)
if(${pkg_name}_LDFLAGS)
if(${pkg_name}_USE_STATIC_LIBS)
#set(${pkg_name}_CMAKE_LDFLAGS "-DCMAKE_STATIC_LINKER_FLAGS=${${pkg_name}_LDFLAGS}") #set(${pkg_name}_CMAKE_LDFLAGS "-DCMAKE_STATIC_LINKER_FLAGS=${${pkg_name}_LDFLAGS}")
else() else()
set(${pkg_name}_CMAKE_LDFLAGS "-DCMAKE_SHARED_LINKER_FLAGS=${${pkg_name}_LDFLAGS}") set(${pkg_name}_CMAKE_LDFLAGS "-DCMAKE_SHARED_LINKER_FLAGS=${${pkg_name}_LDFLAGS}")
endif ()
endif ()
endif()
endif()


__exec_cmd(COMMAND ${CMAKE_COMMAND} ${PKG_CMAKE_OPTION} -G ${CMAKE_GENERATOR} __exec_cmd(COMMAND ${CMAKE_COMMAND} ${PKG_CMAKE_OPTION} -G ${CMAKE_GENERATOR}
${${pkg_name}_CMAKE_CFLAGS} ${${pkg_name}_CMAKE_CXXFLAGS} ${${pkg_name}_CMAKE_LDFLAGS} ${${pkg_name}_CMAKE_CFLAGS} ${${pkg_name}_CMAKE_CXXFLAGS} ${${pkg_name}_CMAKE_LDFLAGS}
@@ -364,48 +365,48 @@ function(mindspore_add_pkg pkg_name )
WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build) WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build)


else() else()
if (${pkg_name}_CFLAGS)
if(${pkg_name}_CFLAGS)
set(${pkg_name}_MAKE_CFLAGS "CFLAGS=${${pkg_name}_CFLAGS}") set(${pkg_name}_MAKE_CFLAGS "CFLAGS=${${pkg_name}_CFLAGS}")
endif ()
if (${pkg_name}_CXXFLAGS)
endif()
if(${pkg_name}_CXXFLAGS)
set(${pkg_name}_MAKE_CXXFLAGS "CXXFLAGS=${${pkg_name}_CXXFLAGS}") set(${pkg_name}_MAKE_CXXFLAGS "CXXFLAGS=${${pkg_name}_CXXFLAGS}")
endif ()
if (${pkg_name}_LDFLAGS)
endif()
if(${pkg_name}_LDFLAGS)
set(${pkg_name}_MAKE_LDFLAGS "LDFLAGS=${${pkg_name}_LDFLAGS}") set(${pkg_name}_MAKE_LDFLAGS "LDFLAGS=${${pkg_name}_LDFLAGS}")
endif ()
endif()
# in configure && make # in configure && make
if (PKG_PRE_CONFIGURE_COMMAND)
if(PKG_PRE_CONFIGURE_COMMAND)
__exec_cmd(COMMAND ${PKG_PRE_CONFIGURE_COMMAND} __exec_cmd(COMMAND ${PKG_PRE_CONFIGURE_COMMAND}
WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}) WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})
endif ()
endif()


if (PKG_CONFIGURE_COMMAND)
if(PKG_CONFIGURE_COMMAND)
__exec_cmd(COMMAND ${PKG_CONFIGURE_COMMAND} __exec_cmd(COMMAND ${PKG_CONFIGURE_COMMAND}
${${pkg_name}_MAKE_CFLAGS} ${${pkg_name}_MAKE_CXXFLAGS} ${${pkg_name}_MAKE_LDFLAGS} ${${pkg_name}_MAKE_CFLAGS} ${${pkg_name}_MAKE_CXXFLAGS} ${${pkg_name}_MAKE_LDFLAGS}
--prefix=${${pkg_name}_BASE_DIR} --prefix=${${pkg_name}_BASE_DIR}
WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}) WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})
endif ()
endif()
set(${pkg_name}_BUILD_OPTION ${PKG_BUILD_OPTION}) set(${pkg_name}_BUILD_OPTION ${PKG_BUILD_OPTION})
if (NOT PKG_CONFIGURE_COMMAND)
if(NOT PKG_CONFIGURE_COMMAND)
set(${pkg_name}_BUILD_OPTION ${${pkg_name}_BUILD_OPTION} set(${pkg_name}_BUILD_OPTION ${${pkg_name}_BUILD_OPTION}
${${pkg_name}_MAKE_CFLAGS} ${${pkg_name}_MAKE_CXXFLAGS} ${${pkg_name}_MAKE_LDFLAGS}) ${${pkg_name}_MAKE_CFLAGS} ${${pkg_name}_MAKE_CXXFLAGS} ${${pkg_name}_MAKE_LDFLAGS})
endif ()
endif()
# build # build
__exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} ${${pkg_name}_BUILD_OPTION} -j${THNUM} __exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} ${${pkg_name}_BUILD_OPTION} -j${THNUM}
WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}) WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})


if (PKG_INSTALL_INCS OR PKG_INSTALL_LIBS)
if(PKG_INSTALL_INCS OR PKG_INSTALL_LIBS)
file(GLOB ${pkg_name}_INSTALL_INCS ${${pkg_name}_SOURCE_DIR}/${PKG_INSTALL_INCS}) file(GLOB ${pkg_name}_INSTALL_INCS ${${pkg_name}_SOURCE_DIR}/${PKG_INSTALL_INCS})
file(GLOB ${pkg_name}_INSTALL_LIBS ${${pkg_name}_SOURCE_DIR}/${PKG_INSTALL_LIBS}) file(GLOB ${pkg_name}_INSTALL_LIBS ${${pkg_name}_SOURCE_DIR}/${PKG_INSTALL_LIBS})
file(COPY ${${pkg_name}_INSTALL_INCS} DESTINATION ${${pkg_name}_BASE_DIR}/include) file(COPY ${${pkg_name}_INSTALL_INCS} DESTINATION ${${pkg_name}_BASE_DIR}/include)
file(COPY ${${pkg_name}_INSTALL_LIBS} DESTINATION ${${pkg_name}_BASE_DIR}/lib) file(COPY ${${pkg_name}_INSTALL_LIBS} DESTINATION ${${pkg_name}_BASE_DIR}/lib)
else() else()
__exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} install WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}) __exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} install WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})
endif ()
endif ()
endif()
endif()
endif() endif()


if (PKG_LIBS)
if(PKG_LIBS)
__find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIB_PATH} ${PKG_LIBS}) __find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIB_PATH} ${PKG_LIBS})
set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE)
if(NOT ${pkg_name}_LIBS) if(NOT ${pkg_name}_LIBS)
@@ -413,10 +414,10 @@ function(mindspore_add_pkg pkg_name )
endif() endif()
else() else()
find_package(${__FIND_PKG_NAME} ${PKG_VER} QUIET ${MS_FIND_NO_DEFAULT_PATH}) find_package(${__FIND_PKG_NAME} ${PKG_VER} QUIET ${MS_FIND_NO_DEFAULT_PATH})
if (${__FIND_PKG_NAME}_FOUND)
if(${__FIND_PKG_NAME}_FOUND)
set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE)
message("Found pkg: ${${__FIND_PKG_NAME}_LIBRARIES}") message("Found pkg: ${${__FIND_PKG_NAME}_LIBRARIES}")
return() return()
endif ()
endif ()
endif()
endif()
endfunction() endfunction()

+ 1
- 0
example/matmul_distributed/export_model/export_model.sh View File

@@ -27,6 +27,7 @@ pytest -sv ./distributed_inference.py::test_inference >inference.log0 2>&1
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
echo "inference success" echo "inference success"
else else
cat inference.log0
echo "inference failed" echo "inference failed"
exit 2 exit 2
fi fi


+ 4
- 1
mindspore_serving/CMakeLists.txt View File

@@ -3,7 +3,7 @@


# Find Protobuf installation # Find Protobuf installation
# Looks for protobuf-config.cmake file installed by Protobuf's cmake installation. # Looks for protobuf-config.cmake file installed by Protobuf's cmake installation.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-rpath,$ORIGIN:$ORIGIN/lib")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-rpath,$ORIGIN:$ORIGIN/lib -Wl,--no-as-needed")


add_library(protobuf::libprotobuf ALIAS protobuf::protobuf) add_library(protobuf::libprotobuf ALIAS protobuf::protobuf)
add_executable(protobuf::libprotoc ALIAS protobuf::protoc) add_executable(protobuf::libprotoc ALIAS protobuf::protoc)
@@ -102,10 +102,13 @@ set_property(TARGET serving_ascend PROPERTY POSITION_INDEPENDENT_CODE TRUE)
target_link_libraries(serving_common PRIVATE PROTO_SRC_LIB) target_link_libraries(serving_common PRIVATE PROTO_SRC_LIB)
target_link_libraries(serving_common PRIVATE ${_REFLECTION} ${_GRPC_GRPCPP} ${_PROTOBUF_LIBPROTOBUF} pthread) target_link_libraries(serving_common PRIVATE ${_REFLECTION} ${_GRPC_GRPCPP} ${_PROTOBUF_LIBPROTOBUF} pthread)
target_link_libraries(serving_common PRIVATE mindspore_serving::event mindspore_serving::event_pthreads) target_link_libraries(serving_common PRIVATE mindspore_serving::event mindspore_serving::event_pthreads)
target_link_libraries(serving_common PRIVATE mindspore_serving::event_core)
target_link_libraries(serving_common PRIVATE mindspore_serving::glog) target_link_libraries(serving_common PRIVATE mindspore_serving::glog)
target_link_libraries(serving_common PRIVATE mindspore_serving::eigen) target_link_libraries(serving_common PRIVATE mindspore_serving::eigen)
target_link_libraries(serving_common PRIVATE ${SECUREC_LIBRARY}) target_link_libraries(serving_common PRIVATE ${SECUREC_LIBRARY})


set_target_properties(serving_common PROPERTIES SKIP_BUILD_RPATH TRUE)

# python # python
add_compile_definitions(ENABLE_PYTHON) add_compile_definitions(ENABLE_PYTHON)
file(GLOB_RECURSE PY_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "ccsrc/python/*.cc") file(GLOB_RECURSE PY_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "ccsrc/python/*.cc")


+ 1
- 0
mindspore_serving/ccsrc/common/file_system_operation.cc View File

@@ -34,6 +34,7 @@ bool DirOrFileExist(const std::string &file_path) {
std::vector<std::string> GetAllSubDirs(const std::string &dir_path) { std::vector<std::string> GetAllSubDirs(const std::string &dir_path) {
std::vector<std::string> SubDirs = GetAllSubDirsNotFullPath(dir_path); std::vector<std::string> SubDirs = GetAllSubDirsNotFullPath(dir_path);
for (auto &item : SubDirs) { for (auto &item : SubDirs) {
// cppcheck-suppress useStlAlgorithm
item = dir_path + "/" + item; item = dir_path + "/" + item;
} }
return SubDirs; return SubDirs;


+ 7
- 8
mindspore_serving/ccsrc/master/dispacther.cc View File

@@ -57,9 +57,10 @@ DispatcherWorkerContext Dispatcher::GetWorkSession(const RequestSpec &request_sp
} }


Status Dispatcher::JudgeInferNum() { Status Dispatcher::JudgeInferNum() {
auto max_infer_num = MasterContext::Instance()->GetMaxRequestBufferCount();
if (infer_num_ >= max_infer_num) {
return INFER_STATUS_LOG_ERROR(FAILED) << "Serving Error: request buffer number exceeds the limit " << max_infer_num;
auto max_enqueued_requests = MasterContext::Instance()->GetMaxEnqueuedRequests();
if (enqueued_requests_ >= max_enqueued_requests) {
return INFER_STATUS_LOG_ERROR(FAILED)
<< "Serving Error: enqueued requests count exceeds the limit " << max_enqueued_requests;
} }
return SUCCESS; return SUCCESS;
} }
@@ -77,12 +78,10 @@ void Dispatcher::DispatchAsync(const proto::PredictRequest &request, proto::Pred
try { try {
auto callback = [this, on_finish]() { auto callback = [this, on_finish]() {
on_finish(); on_finish();
this->infer_num_--;
this->enqueued_requests_--;
}; };
infer_num_++;
MSI_TIME_STAMP_START(Predict)
enqueued_requests_++;
status = DispatchAsyncInner(request, reply, callback); status = DispatchAsyncInner(request, reply, callback);
MSI_TIME_STAMP_END(Predict)
} catch (const std::bad_alloc &ex) { } catch (const std::bad_alloc &ex) {
MSI_LOG(ERROR) << "Serving Error: malloc memory failed"; MSI_LOG(ERROR) << "Serving Error: malloc memory failed";
std::cout << "Serving Error: malloc memory failed" << std::endl; std::cout << "Serving Error: malloc memory failed" << std::endl;
@@ -101,7 +100,7 @@ void Dispatcher::DispatchAsync(const proto::PredictRequest &request, proto::Pred
if (status != SUCCESS) { if (status != SUCCESS) {
GrpcTensorHelper::CreateReplyFromErrorMsg(status, reply); GrpcTensorHelper::CreateReplyFromErrorMsg(status, reply);
on_finish(); on_finish();
infer_num_--;
enqueued_requests_--;
} }
} }




+ 1
- 1
mindspore_serving/ccsrc/master/dispacther.h View File

@@ -61,7 +61,7 @@ class MS_API Dispatcher {
std::shared_mutex servable_shared_lock_; std::shared_mutex servable_shared_lock_;
// avoid invoke Clear and then UnregisterServable is invoked by Clear in other thread // avoid invoke Clear and then UnregisterServable is invoked by Clear in other thread
std::atomic_bool clearing_flag = false; std::atomic_bool clearing_flag = false;
std::atomic_uint32_t infer_num_ = 0;
std::atomic_uint32_t enqueued_requests_ = 0;


Status JudgeInferNum(); Status JudgeInferNum();
DispatcherWorkerContext GetWorkSession(const RequestSpec &request_spec) const; DispatcherWorkerContext GetWorkSession(const RequestSpec &request_spec) const;


+ 6
- 1
mindspore_serving/ccsrc/master/grpc/grpc_server.h View File

@@ -94,7 +94,12 @@ class MasterPredictContext : public MasterServiceContext {
void HandleRequest() override { void HandleRequest() override {
EnqueueRequest(service_impl_, async_service_, cq_); EnqueueRequest(service_impl_, async_service_, cq_);
state_ = STATE::FINISH; state_ = STATE::FINISH;
PredictOnFinish on_finish = [this]() { responder_.Finish(response_, grpc::Status::OK, this); };
MSI_TIME_STAMP_START(RequestHandle)
PredictOnFinish on_finish = [this, time_start_RequestHandle]() {
responder_.Finish(response_, grpc::Status::OK, this);
MSI_TIME_STAMP_END(RequestHandle)
};
service_impl_->PredictAsync(&request_, &response_, on_finish); service_impl_->PredictAsync(&request_, &response_, on_finish);
} }


+ 3
- 3
mindspore_serving/ccsrc/master/master_context.cc View File

@@ -26,10 +26,10 @@ std::shared_ptr<MasterContext> MasterContext::Instance() {
return instance; return instance;
} }


void MasterContext::SetMaxRequestBufferCount(uint32_t max_request_buffer_count) {
max_request_buffer_count_ = max_request_buffer_count;
void MasterContext::SetMaxEnqueuedRequests(uint32_t max_enqueued_requests) {
max_enqueued_requests_ = max_enqueued_requests;
} }


uint32_t MasterContext::GetMaxRequestBufferCount() const { return max_request_buffer_count_; }
uint32_t MasterContext::GetMaxEnqueuedRequests() const { return max_enqueued_requests_; }


} // namespace mindspore::serving } // namespace mindspore::serving

+ 3
- 3
mindspore_serving/ccsrc/master/master_context.h View File

@@ -28,11 +28,11 @@ class MS_API MasterContext {
public: public:
static std::shared_ptr<MasterContext> Instance(); static std::shared_ptr<MasterContext> Instance();


void SetMaxRequestBufferCount(uint32_t max_request_buffer_count);
uint32_t GetMaxRequestBufferCount() const;
void SetMaxEnqueuedRequests(uint32_t max_enqueued_requests);
uint32_t GetMaxEnqueuedRequests() const;


private: private:
uint32_t max_request_buffer_count_ = 10000; // default 10000
uint32_t max_enqueued_requests_ = 10000; // default 10000
}; };


} // namespace mindspore::serving } // namespace mindspore::serving


+ 1
- 0
mindspore_serving/ccsrc/master/restful/http_process.cc View File

@@ -128,6 +128,7 @@ DataType RestfulService::GetObjDataType(const json &js) {


std::string RestfulService::GetStringByDataType(DataType type) { std::string RestfulService::GetStringByDataType(DataType type) {
for (const auto &item : str2_infer_type) { for (const auto &item : str2_infer_type) {
// cppcheck-suppress useStlAlgorithm
if (item.second == type) { if (item.second == type) {
return item.first; return item.first;
} }


+ 1
- 1
mindspore_serving/ccsrc/python/serving_py.cc View File

@@ -182,7 +182,7 @@ void PyRegWorker(pybind11::module *m_ptr) {
py::class_<MasterContext, std::shared_ptr<MasterContext>>(m, "MasterContext_") py::class_<MasterContext, std::shared_ptr<MasterContext>>(m, "MasterContext_")
.def(py::init<>()) .def(py::init<>())
.def_static("get_instance", &MasterContext::Instance) .def_static("get_instance", &MasterContext::Instance)
.def("set_max_request_buffer_count", &MasterContext::SetMaxRequestBufferCount);
.def("set_max_enqueued_requests", &MasterContext::SetMaxEnqueuedRequests);
} }


void PyRegWorkerAgent(pybind11::module *m_ptr) { void PyRegWorkerAgent(pybind11::module *m_ptr) {


+ 16
- 0
mindspore_serving/ccsrc/python/worker/worker_py.cc View File

@@ -42,6 +42,10 @@ void PyWorker::OnEndStartServable(const std::string &servable_directory, const s
void PyWorker::StartServable(const std::string &model_directory, const std::string &model_name, uint32_t version_number, void PyWorker::StartServable(const std::string &model_directory, const std::string &model_name, uint32_t version_number,
const std::string &master_ip, uint32_t master_port, const std::string &worker_ip, const std::string &master_ip, uint32_t master_port, const std::string &worker_ip,
uint32_t worker_port) { uint32_t worker_port) {
if (Worker::GetInstance().IsRunning()) {
MSI_LOG_EXCEPTION << "A servable has been started, only one servable can run in a process currently.";
}

auto notify_master = std::make_shared<GrpcNotfiyMaster>(master_ip, master_port, worker_ip, worker_port); auto notify_master = std::make_shared<GrpcNotfiyMaster>(master_ip, master_port, worker_ip, worker_port);
auto servable = std::make_shared<LocalModelServable>(); auto servable = std::make_shared<LocalModelServable>();
auto status = servable->StartServable(model_directory, model_name, version_number); auto status = servable->StartServable(model_directory, model_name, version_number);
@@ -69,6 +73,10 @@ void PyWorker::StartServable(const std::string &model_directory, const std::stri


void PyWorker::StartServableInMaster(const std::string &model_directory, const std::string &model_name, void PyWorker::StartServableInMaster(const std::string &model_directory, const std::string &model_name,
uint32_t version_number) { uint32_t version_number) {
if (Worker::GetInstance().IsRunning()) {
MSI_LOG_EXCEPTION << "A servable has been started, only one servable can run in a process currently.";
}

auto notify_master = std::make_shared<LocalNotifyMaster>(); auto notify_master = std::make_shared<LocalNotifyMaster>();
auto servable = std::make_shared<LocalModelServable>(); auto servable = std::make_shared<LocalModelServable>();
auto status = servable->StartServable(model_directory, model_name, version_number); auto status = servable->StartServable(model_directory, model_name, version_number);
@@ -92,6 +100,10 @@ void PyWorker::StartDistributedServable(const std::string &servable_directory, c
const std::string &worker_ip, uint32_t worker_port, const std::string &worker_ip, uint32_t worker_port,
const std::string &master_ip, uint32_t master_port, const std::string &master_ip, uint32_t master_port,
uint32_t wait_agents_time_in_seconds) { uint32_t wait_agents_time_in_seconds) {
if (Worker::GetInstance().IsRunning()) {
MSI_LOG_EXCEPTION << "A servable has been started, only one servable can run in a process currently.";
}

Status status; Status status;
auto servable = std::make_shared<DistributedServable>(); auto servable = std::make_shared<DistributedServable>();
auto grpc_sever = std::make_shared<MSDistributedWorkerServer>(servable); auto grpc_sever = std::make_shared<MSDistributedWorkerServer>(servable);
@@ -122,6 +134,10 @@ void PyWorker::StartDistributedServableInMaster(const std::string &servable_dire
const std::string &rank_table_json_file, uint32_t version_number, const std::string &rank_table_json_file, uint32_t version_number,
const std::string &worker_ip, uint32_t worker_port, const std::string &worker_ip, uint32_t worker_port,
uint32_t wait_agents_time_in_seconds) { uint32_t wait_agents_time_in_seconds) {
if (Worker::GetInstance().IsRunning()) {
MSI_LOG_EXCEPTION << "A servable has been started, only one servable can run in a process currently.";
}

Status status; Status status;
auto servable = std::make_shared<DistributedServable>(); auto servable = std::make_shared<DistributedServable>();
auto grpc_sever = std::make_shared<MSDistributedWorkerServer>(servable); auto grpc_sever = std::make_shared<MSDistributedWorkerServer>(servable);


+ 1
- 0
mindspore_serving/ccsrc/worker/context.cc View File

@@ -38,6 +38,7 @@ Status ServableContext::SetDeviceTypeStr(const std::string &device_type) {
DeviceType type; DeviceType type;
std::string device_type_lowcase = device_type; std::string device_type_lowcase = device_type;
for (auto &c : device_type_lowcase) { for (auto &c : device_type_lowcase) {
// cppcheck-suppress useStlAlgorithm
if (c >= 'A' && c <= 'Z') { if (c >= 'A' && c <= 'Z') {
c = c - 'A' + 'a'; c = c - 'A' + 'a';
} }


+ 0
- 2
mindspore_serving/ccsrc/worker/grpc/worker_process.cc View File

@@ -32,9 +32,7 @@ void MSWorkerImpl::PredictAsync(grpc::ServerContext *context, const proto::Predi
Status status(FAILED); Status status(FAILED);
MSI_LOG(INFO) << "Begin call service Eval"; MSI_LOG(INFO) << "Begin call service Eval";
try { try {
MSI_TIME_STAMP_START(Predict)
status = Worker::GetInstance().RunAsync(*request, reply, on_finish); status = Worker::GetInstance().RunAsync(*request, reply, on_finish);
MSI_TIME_STAMP_END(Predict)
} catch (const std::bad_alloc &ex) { } catch (const std::bad_alloc &ex) {
MSI_LOG(ERROR) << "Serving Error: malloc memory failed"; MSI_LOG(ERROR) << "Serving Error: malloc memory failed";
std::cout << "Serving Error: malloc memory failed" << std::endl; std::cout << "Serving Error: malloc memory failed" << std::endl;


+ 1
- 0
mindspore_serving/ccsrc/worker/grpc/worker_process.h View File

@@ -42,6 +42,7 @@ class MSWorkerImpl {
watcher_ = std::make_shared<Watcher<proto::MSAgent, proto::MSMaster>>(server_address); watcher_ = std::make_shared<Watcher<proto::MSAgent, proto::MSMaster>>(server_address);
} }
} }
virtual ~MSWorkerImpl() = default;
void PredictAsync(grpc::ServerContext *context, const proto::PredictRequest *request, proto::PredictReply *reply, void PredictAsync(grpc::ServerContext *context, const proto::PredictRequest *request, proto::PredictReply *reply,
PredictOnFinish on_finish); PredictOnFinish on_finish);


+ 5
- 1
mindspore_serving/ccsrc/worker/grpc/worker_server.h View File

@@ -100,7 +100,11 @@ class WorkerPredictContext : public WorkerServiceContext {
void HandleRequest() override { void HandleRequest() override {
EnqueueRequest(service_impl_, async_service_, cq_); EnqueueRequest(service_impl_, async_service_, cq_);
state_ = STATE::FINISH; state_ = STATE::FINISH;
PredictOnFinish on_finish = [this]() { responder_.Finish(response_, grpc::Status::OK, this); };
MSI_TIME_STAMP_START(WorkerRequestHandle)
PredictOnFinish on_finish = [this, time_start_WorkerRequestHandle]() {
responder_.Finish(response_, grpc::Status::OK, this);
MSI_TIME_STAMP_END(WorkerRequestHandle)
};
service_impl_->PredictAsync(&ctx_, &request_, &response_, on_finish); service_impl_->PredictAsync(&ctx_, &request_, &response_, on_finish);
} }


+ 91
- 43
mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.cc View File

@@ -78,10 +78,6 @@ Status MindSporeModelWrap::LoadModelFromFile(serving::DeviceType device_type, ui
const std::string &file_name, ModelType model_type, bool with_batch_dim, const std::string &file_name, ModelType model_type, bool with_batch_dim,
const std::vector<int> &without_batch_dim_inputs, const std::vector<int> &without_batch_dim_inputs,
const std::map<std::string, std::string> &other_options) { const std::map<std::string, std::string> &other_options) {
std::string ms_device_type = GetMsDeviceType(device_type);
if (ms_device_type.empty()) {
return INFER_STATUS_LOG_ERROR(FAILED) << "Invalid device type " << device_type;
}
auto ms_model_type = GetMsModelType(model_type); auto ms_model_type = GetMsModelType(model_type);
if (ms_model_type == mindspore::kUnknownType) { if (ms_model_type == mindspore::kUnknownType) {
return INFER_STATUS_LOG_ERROR(FAILED) << "Invalid model type " << model_type; return INFER_STATUS_LOG_ERROR(FAILED) << "Invalid model type " << model_type;
@@ -89,26 +85,27 @@ Status MindSporeModelWrap::LoadModelFromFile(serving::DeviceType device_type, ui


std::shared_ptr<mindspore::Model> model = nullptr; std::shared_ptr<mindspore::Model> model = nullptr;
try { try {
mindspore::GlobalContext::SetGlobalDeviceTarget(ms_device_type);
mindspore::GlobalContext::SetGlobalDeviceID(device_id);
auto graph = mindspore::Serialization::LoadModel(file_name, ms_model_type);
auto context = TransformModelContext(other_options);
model = std::make_shared<mindspore::Model>(mindspore::GraphCell(graph), context);
mindspore::Graph graph;
auto ms_status = mindspore::Serialization::Load(file_name, ms_model_type, &graph);
auto context = TransformModelContext(device_type, device_id, other_options);
model = std::make_shared<mindspore::Model>();
mindspore::Status status = model->Build(mindspore::GraphCell(graph), context);
if (!status.IsOk()) {
return INFER_STATUS_LOG_ERROR(FAILED)
<< "Load model from file failed, model file: " << file_name << ", device_type: '" << device_type
<< "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options
<< ", build error detail: " << status.ToString();
}
} catch (std::runtime_error &ex) { } catch (std::runtime_error &ex) {
return INFER_STATUS_LOG_ERROR(FAILED) return INFER_STATUS_LOG_ERROR(FAILED)
<< "Load model from file failed, model file: " << file_name << ", device_type: '" << ms_device_type
<< "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options;
}
mindspore::Status status = model->Build();
if (!status.IsOk()) {
return INFER_STATUS_LOG_ERROR(FAILED)
<< "Load model from file failed, model file: " << file_name << ", device_type: '" << ms_device_type
<< "Load model from file failed, model file: " << file_name << ", device_type: '" << device_type
<< "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options << "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options
<< ", build error detail: " << status.ToString();
<< ", build error detail: " << ex.what();
} }

ApiModelInfo api_model_info; ApiModelInfo api_model_info;
api_model_info.model = model; api_model_info.model = model;
api_model_info.device_type = ms_device_type;
api_model_info.device_type = device_type;
api_model_info.device_id = device_id; api_model_info.device_id = device_id;
api_model_info.with_batch_dim = with_batch_dim; api_model_info.with_batch_dim = with_batch_dim;
api_model_info.without_batch_dim_inputs = without_batch_dim_inputs; api_model_info.without_batch_dim_inputs = without_batch_dim_inputs;
@@ -118,45 +115,88 @@ Status MindSporeModelWrap::LoadModelFromFile(serving::DeviceType device_type, ui
} }
GetModelBatchSize(&api_model_info); GetModelBatchSize(&api_model_info);
model_ = api_model_info; model_ = api_model_info;
MSI_LOG_INFO << "Load model from file success, model file: " << file_name << ", device_type: '" << ms_device_type
MSI_LOG_INFO << "Load model from file success, model file: " << file_name << ", device_type: '" << device_type
<< "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options; << "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options;
return SUCCESS; return SUCCESS;
} }


std::shared_ptr<Context> MindSporeModelWrap::TransformModelContext(const std::map<std::string, std::string> &options) {
using ContextStrFun = std::function<void(const std::shared_ptr<Context> &, const std::string &)>;
ContextStrFun set_output_type = [](const std::shared_ptr<Context> &context, const std::string &val) {
std::shared_ptr<DeviceInfoContext> MindSporeModelWrap::TransformAscend310ModelContext(
uint32_t device_id, const std::map<std::string, std::string> &options) {
auto context_info = std::make_shared<Ascend310DeviceInfo>();
context_info->SetDeviceID(device_id);

using ContextStrFun = std::function<void(const std::string &)>;
ContextStrFun set_output_type = [context_info](const std::string &val) {
// "FP32", "FP16", "UINT8" // "FP32", "FP16", "UINT8"
if (val == "FP32") { if (val == "FP32") {
mindspore::ModelContext::SetOutputType(context, mindspore::DataType::kNumberTypeFloat32);
context_info->SetOutputType(mindspore::DataType::kNumberTypeFloat32);
} else if (val == "FP16") { } else if (val == "FP16") {
mindspore::ModelContext::SetOutputType(context, mindspore::DataType::kNumberTypeFloat16);
context_info->SetOutputType(mindspore::DataType::kNumberTypeFloat16);
} else if (val == "UINT8") { } else if (val == "UINT8") {
mindspore::ModelContext::SetOutputType(context, mindspore::DataType::kNumberTypeUInt8);
context_info->SetOutputType(mindspore::DataType::kNumberTypeUInt8);
} else { } else {
MSI_LOG_ERROR << "Set model context output type failed, unknown data type " << val; MSI_LOG_ERROR << "Set model context output type failed, unknown data type " << val;
} }
}; };
auto context = std::make_shared<mindspore::ModelContext>();
for (auto &item : options) { for (auto &item : options) {
const auto &key = item.first; const auto &key = item.first;
const auto &value = item.second; const auto &value = item.second;
if (key == "acl_option.insert_op_config_file_path") { if (key == "acl_option.insert_op_config_file_path") {
mindspore::ModelContext::SetInsertOpConfigPath(context, value);
context_info->SetInsertOpConfigPath(value);
} else if (key == "acl_option.input_format") { } else if (key == "acl_option.input_format") {
mindspore::ModelContext::SetInputFormat(context, value);
context_info->SetInputFormat(value);
} else if (key == "acl_option.input_shape") { } else if (key == "acl_option.input_shape") {
mindspore::ModelContext::SetInputShape(context, value);
context_info->SetInputShape(value);
} else if (key == "acl_option.output_type") { } else if (key == "acl_option.output_type") {
set_output_type(context, value);
set_output_type(value);
} else if (key == "acl_option.precision_mode") { } else if (key == "acl_option.precision_mode") {
mindspore::ModelContext::SetPrecisionMode(context, value);
context_info->SetPrecisionMode(value);
} else if (key == "acl_option.op_select_impl_mode") { } else if (key == "acl_option.op_select_impl_mode") {
mindspore::ModelContext::SetOpSelectImplMode(context, value);
} else if (key == "gpu_option.enable_trt_infer") {
mindspore::ModelContext::SetGpuTrtInferMode(context, value);
context_info->SetOpSelectImplMode(value);
}
}
return context_info;
}

std::shared_ptr<DeviceInfoContext> MindSporeModelWrap::TransformAscend910ModelContext(
uint32_t device_id, const std::map<std::string, std::string> &options) {
auto context_info = std::make_shared<Ascend910DeviceInfo>();
context_info->SetDeviceID(device_id);
return context_info;
}
std::shared_ptr<DeviceInfoContext> MindSporeModelWrap::TransformNvidiaGPUModelContext(
uint32_t device_id, const std::map<std::string, std::string> &options) {
auto context_info = std::make_shared<NvidiaGPUDeviceInfo>();
context_info->SetDeviceID(device_id);

for (auto &item : options) {
const auto &key = item.first;
const auto &value = item.second;
if (key == "gpu_option.enable_trt_infer") {
if (value == "True") {
context_info->SetGpuTrtInferMode(true);
} else {
context_info->SetGpuTrtInferMode(false);
}
} }
} }
return context_info;
}

std::shared_ptr<Context> MindSporeModelWrap::TransformModelContext(serving::DeviceType device_type, uint32_t device_id,
const std::map<std::string, std::string> &options) {
auto context = std::make_shared<mindspore::Context>();
std::shared_ptr<mindspore::DeviceInfoContext> context_info = nullptr;
if (device_type == kDeviceTypeAscendMS) {
context_info = TransformAscend910ModelContext(device_id, options);
} else if (device_type == kDeviceTypeAscendCL) {
context_info = TransformAscend310ModelContext(device_id, options);
} else if (device_type == kDeviceTypeGpu) {
context_info = TransformNvidiaGPUModelContext(device_id, options);
}
if (context_info != nullptr) {
context->MutableDeviceInfo().push_back(context_info);
}
return context; return context;
} }


@@ -311,7 +351,12 @@ Status MindSporeModelWrap::ExecuteModelCommon(size_t request_size, const FuncMak
} }
std::vector<mindspore::MSTensor> inputs; std::vector<mindspore::MSTensor> inputs;
for (size_t i = 0; i < input_names.size(); i++) { for (size_t i = 0; i < input_names.size(); i++) {
inputs.push_back(in_func(i, input_names[i]));
auto tensor = in_func(i, input_names[i]);
if (tensor == nullptr) {
return INFER_STATUS_LOG_ERROR(FAILED) << "Failed to create input " << i << " MSTensor";
}
inputs.push_back(*tensor);
mindspore::MSTensor::DestroyTensorPtr(tensor);
} }
std::vector<mindspore::MSTensor> outputs; std::vector<mindspore::MSTensor> outputs;
mindspore::Status status = model->Predict(inputs, &outputs); mindspore::Status status = model->Predict(inputs, &outputs);
@@ -344,8 +389,8 @@ std::vector<serving::TensorInfo> MindSporeModelWrap::GetOutputInfos() const { re
ssize_t MindSporeModelWrap::GetBatchSize() const { return model_.batch_size; } ssize_t MindSporeModelWrap::GetBatchSize() const { return model_.batch_size; }


bool MindSporeModelWrap::CheckModelSupport(DeviceType device_type, ModelType model_type) const { bool MindSporeModelWrap::CheckModelSupport(DeviceType device_type, ModelType model_type) const {
std::string ms_device_type = GetMsDeviceType(device_type);
if (ms_device_type.empty()) {
auto ms_device_type = GetMsDeviceType(device_type);
if (ms_device_type == mindspore::kInvalidDeviceType) {
return false; return false;
} }
auto ms_model_type = GetMsModelType(model_type); auto ms_model_type = GetMsModelType(model_type);
@@ -376,22 +421,25 @@ mindspore::ModelType MindSporeModelWrap::GetMsModelType(serving::ModelType model
return ms_model_type; return ms_model_type;
} }


std::string MindSporeModelWrap::GetMsDeviceType(serving::DeviceType device_type) {
std::string device_type_str;
mindspore::DeviceType MindSporeModelWrap::GetMsDeviceType(serving::DeviceType device_type) {
mindspore::DeviceType ms_device_type = mindspore::DeviceType::kInvalidDeviceType;
switch (device_type) { switch (device_type) {
case kDeviceTypeAscendMS: case kDeviceTypeAscendMS:
device_type_str = mindspore::kDeviceTypeAscend910;
ms_device_type = mindspore::DeviceType::kAscend910;
break; break;
case kDeviceTypeAscendCL: case kDeviceTypeAscendCL:
device_type_str = mindspore::kDeviceTypeAscend310;
ms_device_type = mindspore::DeviceType::kAscend310;
break; break;
case kDeviceTypeGpu: case kDeviceTypeGpu:
device_type_str = mindspore::kDeviceTypeGPU;
ms_device_type = mindspore::DeviceType::kNvidiaGPU;
break;
case kDeviceTypeCpu:
ms_device_type = mindspore::DeviceType::kCPU;
break; break;
default: default:
break; break;
} }
return device_type_str;
return ms_device_type;
} }


ApiBufferTensorWrap::ApiBufferTensorWrap() = default; ApiBufferTensorWrap::ApiBufferTensorWrap() = default;


+ 13
- 4
mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.h View File

@@ -40,7 +40,7 @@ struct ApiModelInfo {
std::vector<serving::TensorInfo> output_tensor_infos; std::vector<serving::TensorInfo> output_tensor_infos;
std::shared_ptr<mindspore::Model> model = nullptr; std::shared_ptr<mindspore::Model> model = nullptr;
uint32_t batch_size = 0; uint32_t batch_size = 0;
std::string device_type;
serving::DeviceType device_type;
uint32_t device_id = 0; uint32_t device_id = 0;
bool with_batch_dim = false; bool with_batch_dim = false;
std::vector<int> without_batch_dim_inputs; std::vector<int> without_batch_dim_inputs;
@@ -71,15 +71,24 @@ class MindSporeModelWrap : public InferenceBase {
private: private:
ApiModelInfo model_; ApiModelInfo model_;


using FuncMakeInBuffer = std::function<mindspore::MSTensor(size_t index, const std::string &name)>;
using FuncMakeInBuffer = std::function<mindspore::MSTensor *(size_t index, const std::string &name)>;
using FuncMakeOutTensor = using FuncMakeOutTensor =
std::function<void(const mindspore::MSTensor, DataType data_type, const std::vector<int64_t> &shape)>; std::function<void(const mindspore::MSTensor, DataType data_type, const std::vector<int64_t> &shape)>;
Status ExecuteModelCommon(size_t request_size, const FuncMakeInBuffer &in_func, const FuncMakeOutTensor &out_func); Status ExecuteModelCommon(size_t request_size, const FuncMakeInBuffer &in_func, const FuncMakeOutTensor &out_func);
Status GetModelInfos(ApiModelInfo *model_info); Status GetModelInfos(ApiModelInfo *model_info);
std::shared_ptr<Context> TransformModelContext(const std::map<std::string, std::string> &other_options);
std::shared_ptr<Context> TransformModelContext(serving::DeviceType device_type, uint32_t device_id,
const std::map<std::string, std::string> &other_options);

std::shared_ptr<DeviceInfoContext> TransformAscend310ModelContext(uint32_t device_id,
const std::map<std::string, std::string> &options);
std::shared_ptr<DeviceInfoContext> TransformAscend910ModelContext(uint32_t device_id,
const std::map<std::string, std::string> &options);
std::shared_ptr<DeviceInfoContext> TransformNvidiaGPUModelContext(uint32_t device_id,
const std::map<std::string, std::string> &options);

void GetModelBatchSize(ApiModelInfo *model_info); void GetModelBatchSize(ApiModelInfo *model_info);
static mindspore::ModelType GetMsModelType(serving::ModelType model_type); static mindspore::ModelType GetMsModelType(serving::ModelType model_type);
static std::string GetMsDeviceType(serving::DeviceType device_type);
static mindspore::DeviceType GetMsDeviceType(serving::DeviceType device_type);
}; };


class ApiBufferTensorWrap : public TensorBase { class ApiBufferTensorWrap : public TensorBase {


+ 4
- 1
mindspore_serving/ccsrc/worker/work_executor.cc View File

@@ -251,7 +251,10 @@ Status WorkExecutor::Work(const RequestSpec &request_spec, const std::vector<Ins
context.user_context = user_context; context.user_context = user_context;
} }
infer_session.instances = instances; infer_session.instances = instances;
infer_session_map_[user_id] = infer_session;
{
std::unique_lock<std::mutex> lock(infer_session_map_mutex_);
infer_session_map_[user_id] = infer_session;
}


if (!method_def.preprocess_name.empty()) { if (!method_def.preprocess_name.empty()) {
OnReceivePreprocessInputs(instances); OnReceivePreprocessInputs(instances);


+ 1
- 2
mindspore_serving/master/__init__.py View File

@@ -22,6 +22,5 @@ __all__.extend([
"start_grpc_server", "start_grpc_server",
'start_restful_server', 'start_restful_server',
'start_master_server', 'start_master_server',
'stop',
'context'
'stop'
]) ])

+ 6
- 4
mindspore_serving/master/context.py View File

@@ -16,19 +16,21 @@
from mindspore_serving._mindspore_serving import MasterContext_ from mindspore_serving._mindspore_serving import MasterContext_
from mindspore_serving.common import check_type from mindspore_serving.common import check_type


__all__ = ["set_max_enqueued_requests"]

_context = MasterContext_.get_instance() _context = MasterContext_.get_instance()




def set_max_request_buffer_count(max_request_buffer_count):
def set_max_enqueued_requests(max_enqueued_requests):
r""" r"""
Set the maximum number of requests waiting to be processed. Set the maximum number of requests waiting to be processed.


Args: Args:
max_request_buffer_count (int): The maximum acceptable infer message size in number, default 10000,
max_enqueued_requests (int): The maximum acceptable infer message size in number, default 10000,
Max infer number should be a positive integer. Max infer number should be a positive integer.


Raises: Raises:
RuntimeError: The type or value of the parameters is invalid, or other error happened. RuntimeError: The type or value of the parameters is invalid, or other error happened.
""" """
check_type.check_int("max_request_buffer_count", max_request_buffer_count, 1)
_context.set_max_request_buffer_count(max_request_buffer_count)
check_type.check_int("max_enqueued_requests", max_enqueued_requests, 1)
_context.set_max_enqueued_requests(max_enqueued_requests)

+ 2
- 2
mindspore_serving/worker/_worker.py View File

@@ -138,7 +138,7 @@ def start_servable(servable_directory, servable_name, version_number=0,
Args: Args:
servable_directory (str): The directory where the servable is located in. There expects to has a directory servable_directory (str): The directory where the servable is located in. There expects to has a directory
named `servable_name`. For more detail: named `servable_name`. For more detail:
`How to config Servable <https://www.mindspore.cn/tutorial/inference/zh-CN/master/serving_model.html>`_ .
`How to config Servable <https://www.mindspore.cn/tutorial/inference/zh-CN/r1.2/serving_model.html>`_ .


servable_name (str): The servable name. servable_name (str): The servable name.
version_number (int): Servable version number to be loaded. The version number should be a positive integer, version_number (int): Servable version number to be loaded. The version number should be a positive integer,
@@ -198,7 +198,7 @@ def start_servable_in_master(servable_directory, servable_name, version_number=0
Args: Args:
servable_directory (str): The directory where the servable is located in. There expects to has a directory named servable_directory (str): The directory where the servable is located in. There expects to has a directory named
`servable_name`. For more detail: `servable_name`. For more detail:
`How to config Servable <https://www.mindspore.cn/tutorial/inference/zh-CN/master/serving_model.html>`_ .
`How to config Servable <https://www.mindspore.cn/tutorial/inference/zh-CN/r1.2/serving_model.html>`_ .


servable_name (str): The servable name. servable_name (str): The servable name.
version_number (int): Servable version number to be loaded. The version number should be a positive integer, version_number (int): Servable version number to be loaded. The version number should be a positive integer,


+ 2
- 0
mindspore_serving/worker/distributed/agent_startup.py View File

@@ -350,6 +350,7 @@ def _startup_agents(common_meta, worker_ip, worker_port,


class DistributedServableConfig: class DistributedServableConfig:
"""Python DistributedServableConfig""" """Python DistributedServableConfig"""

def __init__(self): def __init__(self):
self.rank_table_content = "" self.rank_table_content = ""
self.rank_list = None self.rank_list = None
@@ -407,6 +408,7 @@ def _get_worker_distributed_config(worker_ip, worker_port):
# pylint: disable=broad-except # pylint: disable=broad-except
except Exception as e: except Exception as e:
c_send_pipe.send(e) c_send_pipe.send(e)

process = Process(target=process_fun, args=(c_send_pipe,), process = Process(target=process_fun, args=(c_send_pipe,),
name=f"worker_agent_get_agents_config_from_worker") name=f"worker_agent_get_agents_config_from_worker")
process.start() process.start()


+ 2
- 2
mindspore_serving/worker/distributed/distributed_worker.py View File

@@ -50,7 +50,7 @@ def start_distributed_servable(servable_directory, servable_name, rank_table_jso
Args: Args:
servable_directory (str): The directory where the servable is located in. There expects to has a directory servable_directory (str): The directory where the servable is located in. There expects to has a directory
named `servable_name`. For more detail: named `servable_name`. For more detail:
`How to config Servable <https://www.mindspore.cn/tutorial/inference/zh-CN/master/serving_model.html>`_ .
`How to config Servable <https://www.mindspore.cn/tutorial/inference/zh-CN/r1.2/serving_model.html>`_ .


servable_name (str): The servable name. servable_name (str): The servable name.
version_number (int): Servable version number to be loaded. The version number should be a positive integer, version_number (int): Servable version number to be loaded. The version number should be a positive integer,
@@ -107,7 +107,7 @@ def start_distributed_servable_in_master(servable_directory, servable_name, rank
Args: Args:
servable_directory (str): The directory where the servable is located in. There expects to has a directory named servable_directory (str): The directory where the servable is located in. There expects to has a directory named
`servable_name`. For more detail: `servable_name`. For more detail:
`How to config Servable <https://www.mindspore.cn/tutorial/inference/zh-CN/master/serving_model.html>`_ .
`How to config Servable <https://www.mindspore.cn/tutorial/inference/zh-CN/r1.2/serving_model.html>`_ .


servable_name (str): The servable name. servable_name (str): The servable name.
version_number (int): Servable version number to be loaded. The version number should be a positive integer, version_number (int): Servable version number to be loaded. The version number should be a positive integer,


+ 2
- 4
mindspore_serving/worker/register/method.py View File

@@ -80,8 +80,7 @@ def call_preprocess_pipeline(preprocess_fun, *args):
in `preprocess` or `postprocess`, such as using MindData concurrency ability to process multiple input in `preprocess` or `postprocess`, such as using MindData concurrency ability to process multiple input
images in `preprocess`, MindSpore Serving provides 'call_preprocess_pipeline' and 'call_pstprocess_pipeline' images in `preprocess`, MindSpore Serving provides 'call_preprocess_pipeline' and 'call_pstprocess_pipeline'
to register such preprocessing and postprocessing. For more detail, to register such preprocessing and postprocessing. For more detail,
please refer to [Resnet50 model configuration example]
<https://gitee.com/mindspore/serving/blob/master/example/resnet/resnet50/servable_config.py>`_ .
please refer to `Resnet50 model configuration example <https://gitee.com/mindspore/serving/blob/r1.2/example/resnet/resnet50/servable_config.py>`_.


Args: Args:
preprocess_fun (function): Python pipeline function for preprocess. preprocess_fun (function): Python pipeline function for preprocess.
@@ -248,8 +247,7 @@ def call_postprocess_pipeline(postprocess_fun, *args):
in `preprocess` or `postprocess`, such as using MindData concurrency ability to process multiple input in `preprocess` or `postprocess`, such as using MindData concurrency ability to process multiple input
images in `preprocess`, MindSpore Serving provides 'call_preprocess_pipeline' and 'call_pstprocess_pipeline' images in `preprocess`, MindSpore Serving provides 'call_preprocess_pipeline' and 'call_pstprocess_pipeline'
to register such preprocessing and postprocessing. For more detail, to register such preprocessing and postprocessing. For more detail,
please refer to [Resnet50 model configuration example]
<https://gitee.com/mindspore/serving/blob/master/example/resnet/resnet50/servable_config.py>`_ .
please refer to `Resnet50 model configuration example <https://gitee.com/mindspore/serving/blob/r1.2/example/resnet/resnet50/servable_config.py>`_.


Args: Args:
postprocess_fun (function): Python pipeline function for postprocess. postprocess_fun (function): Python pipeline function for postprocess.


+ 4
- 4
scripts/check_clang_format.sh View File

@@ -33,7 +33,7 @@ echo "SCRIPTS_PATH=$SCRIPTS_PATH"
# print usage message # print usage message
function usage() function usage()
{ {
echo "Check whether the specified source files were well formated"
echo "Check whether the specified source files were well formatted"
echo "Usage:" echo "Usage:"
echo "bash $0 [-a] [-c] [-l] [-h]" echo "bash $0 [-a] [-c] [-l] [-h]"
echo "e.g. $0 -a" echo "e.g. $0 -a"
@@ -97,7 +97,7 @@ fi
CHECK_RESULT_FILE=__code_format_check_result__ CHECK_RESULT_FILE=__code_format_check_result__
echo "0" > "$CHECK_RESULT_FILE" echo "0" > "$CHECK_RESULT_FILE"


# check format of files modified in the lastest commit
# check format of files modified in the latest commit
while read line; do while read line; do
BASE_NAME=$(basename "${line}") BASE_NAME=$(basename "${line}")
TEMP_FILE="__TEMP__${BASE_NAME}" TEMP_FILE="__TEMP__${BASE_NAME}"
@@ -107,7 +107,7 @@ while read line; do
ret=$? ret=$?
rm "${TEMP_FILE}" rm "${TEMP_FILE}"
if [[ "${ret}" -ne 0 ]]; then if [[ "${ret}" -ne 0 ]]; then
echo "File ${line} is not formated, please format it."
echo "File ${line} is not formatted, please format it."
echo "1" > "${CHECK_RESULT_FILE}" echo "1" > "${CHECK_RESULT_FILE}"
break break
fi fi
@@ -118,6 +118,6 @@ rm "${CHECK_RESULT_FILE}"
rm "${CHECK_LIST_FILE}" rm "${CHECK_LIST_FILE}"
cd "${CURRENT_PATH}" || exit 1 cd "${CURRENT_PATH}" || exit 1
if [[ "X${result}" == "X0" ]]; then if [[ "X${result}" == "X0" ]]; then
echo "Check PASS: specified files are well formated!"
echo "Check PASS: specified files are well formatted!"
fi fi
exit "${result}" exit "${result}"

+ 1
- 1
tests/ut/python/tests/common.py View File

@@ -130,7 +130,7 @@ def serving_test(func):
try: try:
func(*args, **kwargs) func(*args, **kwargs)
finally: finally:
master.context.set_max_request_buffer_count(10000)
master.context.set_max_enqueued_requests(10000)
master.stop() master.stop()
worker.stop() worker.stop()
servable_dir = os.path.join(os.getcwd(), "serving_python_ut_servables") servable_dir = os.path.join(os.getcwd(), "serving_python_ut_servables")


+ 6
- 6
tests/ut/python/tests/test_mater_worker_client.py View File

@@ -1119,7 +1119,7 @@ def add_common(x1, x2):
return y return y
""" """
base.init_servable_with_servable_config(1, servable_content) base.init_servable_with_servable_config(1, servable_content)
master.context.set_max_request_buffer_count(1)
master.context.set_max_enqueued_requests(1)
worker.start_servable_in_master(base.servable_dir, base.servable_name) worker.start_servable_in_master(base.servable_dir, base.servable_name)
master.start_grpc_server("0.0.0.0", 5500) master.start_grpc_server("0.0.0.0", 5500)
# Client # Client
@@ -1140,10 +1140,10 @@ def add_common(x1, x2):
assert "error" in result0 or "error" in result1 assert "error" in result0 or "error" in result1
if "error" in result0: if "error" in result0:
assert "error" not in result1 assert "error" not in result1
assert "Serving Error: request buffer number exceeds the limit 1" in result0["error"]
assert "Serving Error: enqueued requests count exceeds the limit 1" in result0["error"]
else: else:
assert "error" not in result0 assert "error" not in result0
assert "Serving Error: request buffer number exceeds the limit 1" in result1["error"]
assert "Serving Error: enqueued requests count exceeds the limit 1" in result1["error"]




@serving_test @serving_test
@@ -1165,7 +1165,7 @@ def add_common(x1, x2):
return y return y
""" """
base.init_servable_with_servable_config(1, servable_content) base.init_servable_with_servable_config(1, servable_content)
master.context.set_max_request_buffer_count(1)
master.context.set_max_enqueued_requests(1)


master.start_master_server("0.0.0.0", 6100) master.start_master_server("0.0.0.0", 6100)
worker.start_servable(base.servable_dir, base.servable_name, worker_port=6200, master_port=6100) worker.start_servable(base.servable_dir, base.servable_name, worker_port=6200, master_port=6100)
@@ -1188,7 +1188,7 @@ def add_common(x1, x2):
assert "error" in result0 or "error" in result1 assert "error" in result0 or "error" in result1
if "error" in result0: if "error" in result0:
assert "error" not in result1 assert "error" not in result1
assert "Serving Error: request buffer number exceeds the limit 1" in result0["error"]
assert "Serving Error: enqueued requests count exceeds the limit 1" in result0["error"]
else: else:
assert "error" not in result0 assert "error" not in result0
assert "Serving Error: request buffer number exceeds the limit 1" in result1["error"]
assert "Serving Error: enqueued requests count exceeds the limit 1" in result1["error"]

+ 59
- 33
tests/ut/stub/cxx_api/cell.cc View File

@@ -21,12 +21,19 @@
namespace mindspore { namespace mindspore {
std::vector<Output> CellBase::operator()(const std::vector<Input> &inputs) const { return Clone()->Construct(inputs); } std::vector<Output> CellBase::operator()(const std::vector<Input> &inputs) const { return Clone()->Construct(inputs); }


ParameterCell::ParameterCell(const ParameterCell &cell) : tensor_(cell.tensor_.Clone()) {}
ParameterCell::ParameterCell(const ParameterCell &cell) {
auto tmp_ptr = cell.tensor_.Clone();
tensor_ = *tmp_ptr;
MSTensor::DestroyTensorPtr(tmp_ptr);
}

ParameterCell &ParameterCell::operator=(const ParameterCell &cell) { ParameterCell &ParameterCell::operator=(const ParameterCell &cell) {
if (&cell == this) { if (&cell == this) {
return *this; return *this;
} }
tensor_ = cell.tensor_.Clone();
auto tmp_ptr = cell.tensor_.Clone();
tensor_ = *tmp_ptr;
MSTensor::DestroyTensorPtr(tmp_ptr);
return *this; return *this;
} }


@@ -40,10 +47,16 @@ ParameterCell &ParameterCell::operator=(ParameterCell &&cell) {
return *this; return *this;
} }


ParameterCell::ParameterCell(const MSTensor &tensor) : tensor_(tensor.Clone()) {}
ParameterCell::ParameterCell(const MSTensor &tensor) {
auto tmp_ptr = tensor.Clone();
tensor_ = *tmp_ptr;
MSTensor::DestroyTensorPtr(tmp_ptr);
}


ParameterCell &ParameterCell::operator=(const MSTensor &tensor) { ParameterCell &ParameterCell::operator=(const MSTensor &tensor) {
tensor_ = tensor.Clone();
auto tmp_ptr = tensor.Clone();
tensor_ = *tmp_ptr;
MSTensor::DestroyTensorPtr(tmp_ptr);
return *this; return *this;
} }


@@ -54,54 +67,67 @@ ParameterCell &ParameterCell::operator=(MSTensor &&tensor) {
return *this; return *this;
} }


GraphCell::GraphCell(const Graph &graph)
: graph_(std::make_shared<Graph>(graph)),
executor_(Factory<GraphCell::GraphImpl>::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) {
MS_EXCEPTION_IF_NULL(graph_);
MS_EXCEPTION_IF_NULL(executor_);
executor_->SetGraph(graph_);
}
GraphCell::GraphCell(const Graph &graph) : graph_(std::make_shared<Graph>(graph)) { MS_EXCEPTION_IF_NULL(graph_); }


GraphCell::GraphCell(const std::shared_ptr<Graph> &graph)
: graph_(graph),
executor_(Factory<GraphCell::GraphImpl>::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) {
MS_EXCEPTION_IF_NULL(graph_);
MS_EXCEPTION_IF_NULL(executor_);
executor_->SetGraph(graph_);
}
GraphCell::GraphCell(const std::shared_ptr<Graph> &graph) : graph_(graph) { MS_EXCEPTION_IF_NULL(graph_); }


GraphCell::GraphCell(Graph &&graph)
: graph_(std::make_shared<Graph>(graph)),
executor_(Factory<GraphCell::GraphImpl>::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) {
MS_EXCEPTION_IF_NULL(graph_);
MS_EXCEPTION_IF_NULL(executor_);
executor_->SetGraph(graph_);
}
GraphCell::GraphCell(Graph &&graph) : graph_(std::make_shared<Graph>(graph)) { MS_EXCEPTION_IF_NULL(graph_); }


Status GraphCell::Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) { Status GraphCell::Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {
MS_EXCEPTION_IF_NULL(executor_);
if (executor_ == nullptr) {
executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);
if (executor_ == nullptr) {
MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed.";
return kMEFailed;
}
executor_->SetGraph(graph_);
}
return executor_->Run(inputs, outputs); return executor_->Run(inputs, outputs);
} }


Status GraphCell::Load() {
MS_EXCEPTION_IF_NULL(executor_);
return executor_->Load();
Status GraphCell::Load(uint32_t device_id) {
if (executor_ == nullptr) {
executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);
if (executor_ == nullptr) {
MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed.";
return kMEFailed;
}
executor_->SetGraph(graph_);
}
return executor_->Load(device_id);
} }


std::vector<MSTensor> GraphCell::GetInputs() { std::vector<MSTensor> GraphCell::GetInputs() {
MS_EXCEPTION_IF_NULL(executor_);
if (executor_ == nullptr) {
executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);
if (executor_ == nullptr) {
MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed.";
return {};
}
executor_->SetGraph(graph_);
}
return executor_->GetInputs(); return executor_->GetInputs();
} }


std::vector<MSTensor> GraphCell::GetOutputs() { std::vector<MSTensor> GraphCell::GetOutputs() {
MS_EXCEPTION_IF_NULL(executor_);
if (executor_ == nullptr) {
executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);
if (executor_ == nullptr) {
MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed.";
return {};
}
executor_->SetGraph(graph_);
}
return executor_->GetOutputs(); return executor_->GetOutputs();
} }


InputAndOutput::InputAndOutput() : cell_(nullptr), prev_(), index_(-1) {} InputAndOutput::InputAndOutput() : cell_(nullptr), prev_(), index_(-1) {}


InputAndOutput::InputAndOutput(const MSTensor &tensor)
: cell_(std::make_shared<ParameterCell>(tensor.Clone())), prev_(), index_(-1) {}
InputAndOutput::InputAndOutput(const MSTensor &tensor) : prev_(), index_(-1) {
auto tmp_ptr = tensor.Clone();
cell_ = std::make_shared<ParameterCell>(*tmp_ptr);
MSTensor::DestroyTensorPtr(tmp_ptr);
}
InputAndOutput::InputAndOutput(MSTensor &&tensor) InputAndOutput::InputAndOutput(MSTensor &&tensor)
: cell_(std::make_shared<ParameterCell>(tensor)), prev_(), index_(-1) {} : cell_(std::make_shared<ParameterCell>(tensor)), prev_(), index_(-1) {}




+ 195
- 116
tests/ut/stub/cxx_api/context.cc View File

@@ -17,36 +17,57 @@
#include <any> #include <any>
#include <map> #include <map>
#include <type_traits> #include <type_traits>
#include "cxx_api/factory.h"
#include "utils/log_adapter.h" #include "utils/log_adapter.h"


constexpr auto kGlobalContextDeviceTarget = "mindspore.ascend.globalcontext.device_target";
constexpr auto kGlobalContextDeviceID = "mindspore.ascend.globalcontext.device_id";
constexpr auto kModelOptionInsertOpCfgPath = "mindspore.option.insert_op_config_file_path"; // aipp config file
constexpr auto kModelOptionInputFormat = "mindspore.option.input_format"; // nchw or nhwc
constexpr auto kModelOptionInputShape = "mindspore.option.input_shape";
constexpr auto kModelOptionCpuEnableFP16 = "mindspore.option.cpu.enable_fp16";
constexpr auto kModelOptionCpuThreadAffinity = "mindspore.option.cpu.thread_affinity";
constexpr auto kModelOptionMaliGpuEnableFP16 = "mindspore.option.mali_gpu.enable_fp16";
constexpr auto kModelOptionKirinNpuFrequency = "mindspore.option.kirin_npu.frequency";
constexpr auto kModelOptionDeviceID = "mindspore.option.device_id";
constexpr auto kModelOptionNvidiaGpuDeviceID = kModelOptionDeviceID;
constexpr auto kModelOptionNvidiaGpuTrtInferMode = "mindspore.option.nvidia_gpu.trt_infer_mode";
constexpr auto kModelOptionAscend910DeviceID = kModelOptionDeviceID;
constexpr auto kModelOptionAscend310DeviceID = kModelOptionDeviceID;
constexpr auto kModelOptionAscend310DumpCfgPath = "mindspore.option.ascend310.dump_config_file_path";
constexpr auto kModelOptionAscend310InsertOpCfgPath =
"mindspore.option.ascend310.insert_op_config_file_path"; // aipp config file
constexpr auto kModelOptionAscend310InputFormat = "mindspore.option.ascend310.input_format"; // nchw or nhwc
constexpr auto kModelOptionAscend310InputShapeMap = "mindspore.option.ascend310.input_shape_map";
constexpr auto kModelOptionAscend310InputShape = "mindspore.option.ascend310.input_shape";
// Mandatory while dynamic batch: e.g. "input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1" // Mandatory while dynamic batch: e.g. "input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1"
constexpr auto kModelOptionOutputType = "mindspore.option.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32"
constexpr auto kModelOptionPrecisionMode = "mindspore.option.precision_mode";
constexpr auto kModelOptionAscend310OutputType =
"mindspore.option.ascend310.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32"
constexpr auto kModelOptionAscend310PrecisionMode = "mindspore.option.ascend310.precision_mode";
// "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" or "allow_mix_precision", default as "force_fp16" // "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" or "allow_mix_precision", default as "force_fp16"
constexpr auto kModelOptionOpSelectImplMode = "mindspore.option.op_select_impl_mode";
constexpr auto kModelOptionAscend310OpSelectImplMode = "mindspore.option.ascend310.op_select_impl_mode";
constexpr auto KModelOptionAscend310FusionSwitchCfgPath = "mindspore.option.ascend310.fusion_switch_config_file_path";
// "False": Inference with native backend, "True": Inference with Tensor-RT engine, default as "False" // "False": Inference with native backend, "True": Inference with Tensor-RT engine, default as "False"
constexpr auto kModelOptionGpuTrtInferMode = "mindspore.option.gpu_trt_infer_mode";
constexpr auto kModelOptionAscend310DynamicBatchSize = "mindspore.option.ascend310.dynamic_batch_size";


namespace mindspore { namespace mindspore {
class Allocator {};

struct Context::Data { struct Context::Data {
std::vector<std::shared_ptr<DeviceInfoContext>> device_info_list;
int32_t thread_num;
std::shared_ptr<Allocator> allocator;
};

struct DeviceInfoContext::Data {
std::map<std::string, std::any> params; std::map<std::string, std::any> params;
}; };


Context::Context() : data(std::make_shared<Data>()) {}
Context::Context() : data_(std::make_shared<Data>()) {}


template <class T, typename U = std::remove_cv_t<std::remove_reference_t<T>>> template <class T, typename U = std::remove_cv_t<std::remove_reference_t<T>>>
static const U &GetValue(const std::shared_ptr<Context> &context, const std::string &key) {
static const U &GetValue(const std::shared_ptr<DeviceInfoContext::Data> &data, const std::string &key) {
static U empty_result; static U empty_result;
if (context == nullptr || context->data == nullptr) {
if (data == nullptr) {
return empty_result; return empty_result;
} }
auto iter = context->data->params.find(key);
if (iter == context->data->params.end()) {
auto iter = data->params.find(key);
if (iter == data->params.end()) {
return empty_result; return empty_result;
} }
const std::any &value = iter->second; const std::any &value = iter->second;
@@ -57,147 +78,205 @@ static const U &GetValue(const std::shared_ptr<Context> &context, const std::str
return std::any_cast<const U &>(value); return std::any_cast<const U &>(value);
} }


std::shared_ptr<Context> GlobalContext::GetGlobalContext() {
static std::shared_ptr<Context> g_context = std::make_shared<Context>();
return g_context;
void Context::SetThreadNum(int32_t thread_num) {
MS_EXCEPTION_IF_NULL(data_);
data_->thread_num = thread_num;
}
int32_t Context::GetThreadNum() const {
MS_EXCEPTION_IF_NULL(data_);
return data_->thread_num;
} }


void GlobalContext::SetGlobalDeviceTarget(const std::vector<char> &device_target) {
auto global_context = GetGlobalContext();
MS_EXCEPTION_IF_NULL(global_context);
if (global_context->data == nullptr) {
global_context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(global_context->data);
}
global_context->data->params[kGlobalContextDeviceTarget] = CharToString(device_target);
void Context::SetAllocator(const std::shared_ptr<Allocator> &allocator) {
MS_EXCEPTION_IF_NULL(data_);
data_->allocator = allocator;
}
std::shared_ptr<Allocator> Context::GetAllocator() const {
MS_EXCEPTION_IF_NULL(data_);
return data_->allocator;
} }


std::vector<char> GlobalContext::GetGlobalDeviceTargetChar() {
auto global_context = GetGlobalContext();
MS_EXCEPTION_IF_NULL(global_context);
const std::string &ref = GetValue<std::string>(global_context, kGlobalContextDeviceTarget);
return StringToChar(ref);
std::vector<std::shared_ptr<DeviceInfoContext>> &Context::MutableDeviceInfo() {
MS_EXCEPTION_IF_NULL(data_);
return data_->device_info_list;
} }


void GlobalContext::SetGlobalDeviceID(const uint32_t &device_id) {
auto global_context = GetGlobalContext();
MS_EXCEPTION_IF_NULL(global_context);
if (global_context->data == nullptr) {
global_context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(global_context->data);
}
global_context->data->params[kGlobalContextDeviceID] = device_id;
DeviceInfoContext::DeviceInfoContext() : data_(std::make_shared<Data>()) {}

void CPUDeviceInfo::SetEnableFP16(bool is_fp16) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionCpuEnableFP16] = is_fp16;
}
bool CPUDeviceInfo::GetEnableFP16() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<bool>(data_, kModelOptionCpuEnableFP16);
} }


uint32_t GlobalContext::GetGlobalDeviceID() {
auto global_context = GetGlobalContext();
MS_EXCEPTION_IF_NULL(global_context);
return GetValue<uint32_t>(global_context, kGlobalContextDeviceID);
void CPUDeviceInfo::SetThreadAffinity(int affinity) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionCpuThreadAffinity] = affinity;
}
int CPUDeviceInfo::GetThreadAffinity() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<bool>(data_, kModelOptionCpuThreadAffinity);
} }


void ModelContext::SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::vector<char> &cfg_path) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionInsertOpCfgPath] = CharToString(cfg_path);
void MaliGPUDeviceInfo::SetEnableFP16(bool is_fp16) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionMaliGpuEnableFP16] = is_fp16;
}
bool MaliGPUDeviceInfo::GetEnableFP16() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<bool>(data_, kModelOptionMaliGpuEnableFP16);
} }


std::vector<char> ModelContext::GetInsertOpConfigPathChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionInsertOpCfgPath);
return StringToChar(ref);
void KirinNPUDeviceInfo::SetFrequency(int frequency) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionKirinNpuFrequency] = frequency;
}
int KirinNPUDeviceInfo::GetFrequency() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<int>(data_, kModelOptionKirinNpuFrequency);
} }


void ModelContext::SetInputFormat(const std::shared_ptr<Context> &context, const std::vector<char> &format) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionInputFormat] = CharToString(format);
void NvidiaGPUDeviceInfo::SetDeviceID(uint32_t device_id) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionNvidiaGpuDeviceID] = device_id;
}
uint32_t NvidiaGPUDeviceInfo::GetDeviceID() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<uint32_t>(data_, kModelOptionNvidiaGpuDeviceID);
} }


std::vector<char> ModelContext::GetInputFormatChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionInputFormat);
return StringToChar(ref);
void NvidiaGPUDeviceInfo::SetGpuTrtInferMode(bool gpu_trt_infer_mode) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionNvidiaGpuTrtInferMode] = gpu_trt_infer_mode;
}
bool NvidiaGPUDeviceInfo::GetGpuTrtInferMode() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<bool>(data_, kModelOptionNvidiaGpuTrtInferMode);
} }


void ModelContext::SetInputShape(const std::shared_ptr<Context> &context, const std::vector<char> &shape) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionInputShape] = CharToString(shape);
void Ascend910DeviceInfo::SetDeviceID(uint32_t device_id) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend910DeviceID] = device_id;
}
uint32_t Ascend910DeviceInfo::GetDeviceID() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<uint32_t>(data_, kModelOptionAscend910DeviceID);
} }


std::vector<char> ModelContext::GetInputShapeChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionInputShape);
return StringToChar(ref);
void Ascend310DeviceInfo::SetDeviceID(uint32_t device_id) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310DeviceID] = device_id;
}
uint32_t Ascend310DeviceInfo::GetDeviceID() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<uint32_t>(data_, kModelOptionAscend310DeviceID);
} }


void ModelContext::SetOutputType(const std::shared_ptr<Context> &context, enum DataType output_type) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionOutputType] = output_type;
void Ascend310DeviceInfo::SetDumpConfigPath(const std::vector<char> &cfg_path) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310DumpCfgPath] = CharToString(cfg_path);
}
std::vector<char> Ascend310DeviceInfo::GetDumpConfigPathChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310DeviceID);
return StringToChar(ref);
} }


enum DataType ModelContext::GetOutputType(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
return GetValue<enum DataType>(context, kModelOptionOutputType);
void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::vector<char> &cfg_path) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310InsertOpCfgPath] = CharToString(cfg_path);
}
std::vector<char> Ascend310DeviceInfo::GetInsertOpConfigPathChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310InsertOpCfgPath);
return StringToChar(ref);
} }


void ModelContext::SetPrecisionMode(const std::shared_ptr<Context> &context, const std::vector<char> &precision_mode) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionPrecisionMode] = CharToString(precision_mode);
void Ascend310DeviceInfo::SetInputFormat(const std::vector<char> &format) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310InputFormat] = CharToString(format);
}
std::vector<char> Ascend310DeviceInfo::GetInputFormatChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310InputFormat);
return StringToChar(ref);
} }


std::vector<char> ModelContext::GetPrecisionModeChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionPrecisionMode);
void Ascend310DeviceInfo::SetInputShape(const std::vector<char> &shape) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310InputShape] = CharToString(shape);
}
std::vector<char> Ascend310DeviceInfo::GetInputShapeChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310InputShape);
return StringToChar(ref); return StringToChar(ref);
} }


void ModelContext::SetOpSelectImplMode(const std::shared_ptr<Context> &context,
const std::vector<char> &op_select_impl_mode) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
void Ascend310DeviceInfo::SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size) {
MS_EXCEPTION_IF_NULL(data_);
std::string batchs = "";
for (size_t i = 0; i < dynamic_batch_size.size(); ++i) {
if (i != 0) {
batchs.push_back(',');
}
batchs += std::to_string(dynamic_batch_size[i]);
} }
context->data->params[kModelOptionOpSelectImplMode] = CharToString(op_select_impl_mode);
data_->params[kModelOptionAscend310DynamicBatchSize] = batchs;
}
std::vector<char> Ascend310DeviceInfo::GetDynamicBatchSizeChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310DynamicBatchSize);
return StringToChar(ref);
} }


std::vector<char> ModelContext::GetOpSelectImplModeChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionOpSelectImplMode);
void Ascend310DeviceInfo::SetPrecisionMode(const std::vector<char> &precision_mode) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310PrecisionMode] = CharToString(precision_mode);
}
std::vector<char> Ascend310DeviceInfo::GetPrecisionModeChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310PrecisionMode);
return StringToChar(ref); return StringToChar(ref);
} }


void ModelContext::SetGpuTrtInferMode(const std::shared_ptr<Context> &context,
const std::vector<char> &gpu_trt_infer_mode) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionGpuTrtInferMode] = CharToString(gpu_trt_infer_mode);
void Ascend310DeviceInfo::SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310OpSelectImplMode] = CharToString(op_select_impl_mode);
}
std::vector<char> Ascend310DeviceInfo::GetOpSelectImplModeChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310OpSelectImplMode);
return StringToChar(ref);
} }


std::vector<char> ModelContext::GetGpuTrtInferModeChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionGpuTrtInferMode);
void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::vector<char> &cfg_path) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[KModelOptionAscend310FusionSwitchCfgPath] = CharToString(cfg_path);
}
std::vector<char> Ascend310DeviceInfo::GetFusionSwitchConfigPathChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, KModelOptionAscend310FusionSwitchCfgPath);
return StringToChar(ref); return StringToChar(ref);
} }

void Ascend310DeviceInfo::SetInputShapeMap(const std::map<int, std::vector<int>> &shape) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310InputShapeMap] = shape;
}
std::map<int, std::vector<int>> Ascend310DeviceInfo::GetInputShapeMap() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<std::map<int, std::vector<int>>>(data_, kModelOptionAscend310InputShapeMap);
}

void Ascend310DeviceInfo::SetOutputType(enum DataType output_type) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310OutputType] = output_type;
}
enum DataType Ascend310DeviceInfo::GetOutputType() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<enum DataType>(data_, kModelOptionAscend310OutputType);
}
} // namespace mindspore } // namespace mindspore

+ 2
- 0
tests/ut/stub/cxx_api/factory.h View File

@@ -24,6 +24,8 @@
#include "utils/utils.h" #include "utils/utils.h"


namespace mindspore { namespace mindspore {
inline std::string g_device_target = "Default";

template <class T> template <class T>
class Factory { class Factory {
using U = std::function<std::shared_ptr<T>()>; using U = std::function<std::shared_ptr<T>()>;


+ 1
- 1
tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.cc View File

@@ -42,7 +42,7 @@ std::vector<MSTensor> AscendGraphImpl::GetOutputs() {
return graph_imp_stub_->GetOutputs(); return graph_imp_stub_->GetOutputs();
} }


Status AscendGraphImpl::Load() { return kSuccess; }
Status AscendGraphImpl::Load(uint32_t device_id) { return kSuccess; }


Status AscendGraphImpl::Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) { Status AscendGraphImpl::Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {
if (!graph_imp_stub_) { if (!graph_imp_stub_) {


+ 1
- 2
tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.h View File

@@ -27,14 +27,13 @@
#include "cxx_api/model/model_impl.h" #include "cxx_api/model/model_impl.h"


namespace mindspore { namespace mindspore {

class AscendGraphImpl : public GraphCell::GraphImpl { class AscendGraphImpl : public GraphCell::GraphImpl {
public: public:
AscendGraphImpl(); AscendGraphImpl();
~AscendGraphImpl() override; ~AscendGraphImpl() override;


Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override; Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;
Status Load() override;
Status Load(uint32_t device_id) override;
std::vector<MSTensor> GetInputs() override; std::vector<MSTensor> GetInputs() override;
std::vector<MSTensor> GetOutputs() override; std::vector<MSTensor> GetOutputs() override;




+ 4
- 0
tests/ut/stub/cxx_api/graph/graph.cc View File

@@ -18,6 +18,8 @@
#include "utils/log_adapter.h" #include "utils/log_adapter.h"


namespace mindspore { namespace mindspore {
Graph::Graph() : graph_data_(nullptr) {}

Graph::Graph(const std::shared_ptr<GraphData> &graph_data) : graph_data_(graph_data) {} Graph::Graph(const std::shared_ptr<GraphData> &graph_data) : graph_data_(graph_data) {}


Graph::Graph(std::shared_ptr<GraphData> &&graph_data) : graph_data_(graph_data) {} Graph::Graph(std::shared_ptr<GraphData> &&graph_data) : graph_data_(graph_data) {}
@@ -28,6 +30,8 @@ Graph::Graph(std::nullptr_t) : graph_data_(nullptr) {}


bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; } bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; }


bool Graph::operator!=(std::nullptr_t) const { return graph_data_ != nullptr; }

ModelType Graph::ModelType() const { ModelType Graph::ModelType() const {
MS_EXCEPTION_IF_NULL(graph_data_); MS_EXCEPTION_IF_NULL(graph_data_);
return graph_data_->ModelType(); return graph_data_->ModelType();


+ 2
- 2
tests/ut/stub/cxx_api/graph/graph_impl.h View File

@@ -29,14 +29,14 @@
namespace mindspore { namespace mindspore {
class GraphCell::GraphImpl { class GraphCell::GraphImpl {
public: public:
GraphImpl() = default;
GraphImpl() : graph_(nullptr) {}
virtual ~GraphImpl() = default; virtual ~GraphImpl() = default;


std::shared_ptr<Graph::GraphData> &MutableGraphData() const { return graph_->graph_data_; } std::shared_ptr<Graph::GraphData> &MutableGraphData() const { return graph_->graph_data_; }
void SetGraph(const std::shared_ptr<Graph> &graph) { graph_ = graph; } void SetGraph(const std::shared_ptr<Graph> &graph) { graph_ = graph; }


virtual Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) = 0; virtual Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) = 0;
virtual Status Load() = 0;
virtual Status Load(uint32_t device_id) = 0;


virtual std::vector<MSTensor> GetInputs() = 0; virtual std::vector<MSTensor> GetInputs() = 0;
virtual std::vector<MSTensor> GetOutputs() = 0; virtual std::vector<MSTensor> GetOutputs() = 0;


+ 92
- 22
tests/ut/stub/cxx_api/model/model.cc View File

@@ -21,60 +21,130 @@


namespace mindspore { namespace mindspore {
namespace { namespace {
const std::map<std::string, std::set<ModelType>> kSupportedModelMap = {
{kDeviceTypeAscend310, {kOM, kMindIR}},
{kDeviceTypeAscend910, {kMindIR}},
{kDeviceTypeGPU, {kMindIR}},
const std::map<enum DeviceType, std::set<ModelType>> kSupportedModelMap = {
{kAscend310, {kOM, kMindIR}},
{kAscend910, {kMindIR}},
{kNvidiaGPU, {kMindIR}},
}; };

std::string GetDeviceTypeString(enum DeviceType type) {
static const std::map<enum DeviceType, std::string> kDeviceTypeStrs = {
{kCPU, "CPU"}, {kMaliGPU, "MaliGPU"}, {kNvidiaGPU, "GPU"},
{kKirinNPU, "KirinGPU"}, {kAscend910, "Ascend910"}, {kAscend310, "Ascend310"},
};
auto iter = kDeviceTypeStrs.find(type);
if (iter != kDeviceTypeStrs.end()) {
return iter->second;
}

return "InvalidDeviceType" + std::to_string(type);
} }
Status Model::Build() {
MS_EXCEPTION_IF_NULL(impl_);
} // namespace
Status Model::Build(GraphCell graph_cell, const std::shared_ptr<Context> &model_context) {
if (graph_cell.GetGraph() == nullptr) {
MS_LOG(ERROR) << "Invalid graph input.";
return kMCInvalidInput;
}

if (model_context == nullptr) {
MS_LOG(ERROR) << "Invalid model context.";
return kMCInvalidInput;
}
auto &device_info = model_context->MutableDeviceInfo();
if (device_info.size() != 1) {
MS_LOG(ERROR) << "Invalid model context, only single device info is supported.";
return kMCInvalidInput;
}

std::string device_target = GetDeviceTypeString(device_info[0]->GetDeviceType());
impl_ = Factory<ModelImpl>::Instance().Create(device_target);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Create session type " << device_target << " failed";
return kMEFailed;
}

g_device_target = device_target;

impl_->SetGraph(std::make_shared<Graph>(*graph_cell.GetGraph()));
impl_->SetContext(model_context);

return impl_->Build(); return impl_->Build();
} }


Status Model::Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims) { Status Model::Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims) {
MS_EXCEPTION_IF_NULL(impl_);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Failed because this model has not been built.";
return kMCFailed;
}
return impl_->Resize(inputs, dims); return impl_->Resize(inputs, dims);
} }


Status Model::Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) { Status Model::Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {
MS_EXCEPTION_IF_NULL(impl_);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Failed because this model has not been built.";
return kMCFailed;
}
return impl_->Predict(inputs, outputs); return impl_->Predict(inputs, outputs);
} }


std::vector<MSTensor> Model::GetInputs() { std::vector<MSTensor> Model::GetInputs() {
MS_EXCEPTION_IF_NULL(impl_);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Failed because this model has not been built.";
return {};
}
return impl_->GetInputs(); return impl_->GetInputs();
} }


std::vector<MSTensor> Model::GetOutputs() { std::vector<MSTensor> Model::GetOutputs() {
MS_EXCEPTION_IF_NULL(impl_);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Failed because this model has not been built.";
return {};
}
return impl_->GetOutputs(); return impl_->GetOutputs();
} }


Model::Model(const GraphCell &graph_cell, const std::shared_ptr<Context> &model_context)
: impl_(Factory<ModelImpl>::Instance().Create(mindspore::GlobalContext::GetGlobalDeviceTarget())) {
if (impl_ == nullptr) {
MS_LOG(EXCEPTION) << "Create session type " << mindspore::GlobalContext::GetGlobalDeviceTarget() << " failed";
MSTensor Model::GetInputByTensorName(const std::vector<char> &tensor_name) {
std::string tensor_name_str = CharToString(tensor_name);
auto inputs = GetInputs();
for (auto in : inputs) {
if (in.Name() == tensor_name_str) {
return in;
}
} }
MS_EXCEPTION_IF_NULL(graph_cell.GetGraph());
impl_->SetGraph(std::make_shared<Graph>(*graph_cell.GetGraph()));
impl_->SetContext(model_context);

return MSTensor(std::shared_ptr<MSTensor::Impl>(nullptr));
} }


Model::Model(const std::vector<Output> &network, const std::shared_ptr<Context> &model_context) {
MS_LOG(EXCEPTION) << "Unsupported feature.";
std::vector<std::vector<char>> Model::GetOutputTensorNamesChar() {
std::vector<std::vector<char>> ret;
auto outputs = GetOutputs();
std::transform(outputs.begin(), outputs.end(), std::back_inserter(ret),
[](MSTensor item) -> std::vector<char> { return StringToChar(item.Name()); });
return ret;
}

MSTensor Model::GetOutputByTensorName(const std::vector<char> &tensor_name) {
std::string tensor_name_str = CharToString(tensor_name);
auto outputs = GetOutputs();
for (auto out : outputs) {
if (out.Name() == tensor_name_str) {
return out;
}
}

return MSTensor(std::shared_ptr<MSTensor::Impl>(nullptr));
} }


Model::Model() : impl_(nullptr) {}
Model::~Model() {} Model::~Model() {}


bool Model::CheckModelSupport(const std::vector<char> &device_type, ModelType model_type) {
std::string device_type_str = CharToString(device_type);
bool Model::CheckModelSupport(enum DeviceType device_type, ModelType model_type) {
std::string device_type_str = GetDeviceTypeString(device_type);
if (!Factory<ModelImpl>::Instance().CheckModelSupport(device_type_str)) { if (!Factory<ModelImpl>::Instance().CheckModelSupport(device_type_str)) {
return false; return false;
} }


auto first_iter = kSupportedModelMap.find(device_type_str);
auto first_iter = kSupportedModelMap.find(device_type);
if (first_iter == kSupportedModelMap.end()) { if (first_iter == kSupportedModelMap.end()) {
return false; return false;
} }


+ 2
- 2
tests/ut/stub/cxx_api/model/model_impl.h View File

@@ -42,9 +42,9 @@ class ModelImpl {
virtual std::vector<MSTensor> GetOutputs() = 0; virtual std::vector<MSTensor> GetOutputs() = 0;


protected: protected:
Status Load(const std::shared_ptr<GraphCell> &graph_cell) {
Status Load(const std::shared_ptr<GraphCell> &graph_cell, uint32_t device_id) {
MS_EXCEPTION_IF_NULL(graph_cell); MS_EXCEPTION_IF_NULL(graph_cell);
return graph_cell->Load();
return graph_cell->Load(device_id);
} }


FuncGraphPtr GetFuncGraph() const { FuncGraphPtr GetFuncGraph() const {


+ 25
- 2
tests/ut/stub/cxx_api/model/ms/ms_model.cc View File

@@ -53,7 +53,7 @@ std::shared_ptr<GraphCell> MsModel::GenerateGraphCell(const std::vector<std::vec
MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(graph);
auto graph_cell = std::make_shared<GraphCell>(graph); auto graph_cell = std::make_shared<GraphCell>(graph);
MS_EXCEPTION_IF_NULL(graph_cell); MS_EXCEPTION_IF_NULL(graph_cell);
auto ret = ModelImpl::Load(graph_cell);
auto ret = ModelImpl::Load(graph_cell, GetDeviceID());
if (ret != kSuccess) { if (ret != kSuccess) {
MS_LOG(ERROR) << "Load failed."; MS_LOG(ERROR) << "Load failed.";
return nullptr; return nullptr;
@@ -78,7 +78,7 @@ Status MsModel::Build() {
MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(graph);
auto graph_cell = std::make_shared<GraphCell>(graph); auto graph_cell = std::make_shared<GraphCell>(graph);
MS_EXCEPTION_IF_NULL(graph_cell); MS_EXCEPTION_IF_NULL(graph_cell);
auto ret = ModelImpl::Load(graph_cell);
auto ret = ModelImpl::Load(graph_cell, GetDeviceID());
if (ret != kSuccess) { if (ret != kSuccess) {
MS_LOG(ERROR) << "Load failed."; MS_LOG(ERROR) << "Load failed.";
return ret; return ret;
@@ -149,4 +149,27 @@ std::vector<MSTensor> MsModel::GetOutputs() {
MS_EXCEPTION_IF_NULL(graph_cell_); MS_EXCEPTION_IF_NULL(graph_cell_);
return graph_cell_->GetOutputs(); return graph_cell_->GetOutputs();
} }

uint32_t MsModel::GetDeviceID() const {
if (model_context_ == nullptr) {
return 0;
}

auto &device_infos = model_context_->MutableDeviceInfo();
if (device_infos.size() != 1) {
return 0;
}

auto ascend910_info = device_infos[0]->Cast<Ascend910DeviceInfo>();
if (ascend910_info != nullptr) {
return ascend910_info->GetDeviceID();
}

auto gpu_info = device_infos[0]->Cast<NvidiaGPUDeviceInfo>();
if (gpu_info != nullptr) {
return gpu_info->GetDeviceID();
}

return 0;
}
} // namespace mindspore } // namespace mindspore

+ 1
- 0
tests/ut/stub/cxx_api/model/ms/ms_model.h View File

@@ -42,6 +42,7 @@ class MsModel : public ModelImpl {


private: private:
std::shared_ptr<GraphCell> GenerateGraphCell(const std::vector<std::vector<int64_t>> &dims); std::shared_ptr<GraphCell> GenerateGraphCell(const std::vector<std::vector<int64_t>> &dims);
uint32_t GetDeviceID() const;


std::shared_ptr<GraphCell> graph_cell_; std::shared_ptr<GraphCell> graph_cell_;
std::map<std::string, std::shared_ptr<GraphCell>> dynamic_size_graph_map_; std::map<std::string, std::shared_ptr<GraphCell>> dynamic_size_graph_map_;


+ 28
- 9
tests/ut/stub/cxx_api/serialization.cc View File

@@ -67,17 +67,31 @@ static Buffer ReadFile(const std::string &file) {
return buffer; return buffer;
} }


Graph Serialization::LoadModel(const void *model_data, size_t data_size, ModelType model_type) {
Status Serialization::Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph) {
if (graph == nullptr) {
MS_LOG(ERROR) << "Output args graph is nullptr.";
return kMEInvalidInput;
}

if (model_type == kMindIR) { if (model_type == kMindIR) {
auto anf_graph = std::make_shared<FuncGraph>(); auto anf_graph = std::make_shared<FuncGraph>();
return Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));
*graph = Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));
return kSuccess;
} else if (model_type == kOM) { } else if (model_type == kOM) {
return Graph(std::make_shared<Graph::GraphData>(Buffer(model_data, data_size), kOM));
*graph = Graph(std::make_shared<Graph::GraphData>(Buffer(model_data, data_size), kOM));
return kSuccess;
} }
MS_LOG(EXCEPTION) << "Unsupported ModelType " << model_type;

MS_LOG(ERROR) << "Unsupported ModelType " << model_type;
return kMEInvalidInput;
} }


Graph Serialization::LoadModel(const std::vector<char> &file, ModelType model_type) {
Status Serialization::Load(const std::vector<char> &file, ModelType model_type, Graph *graph) {
if (graph == nullptr) {
MS_LOG(ERROR) << "Output args graph is nullptr.";
return kMEInvalidInput;
}

std::string file_path = CharToString(file); std::string file_path = CharToString(file);
Buffer data = ReadFile(file_path); Buffer data = ReadFile(file_path);
if (data.Data() == nullptr) { if (data.Data() == nullptr) {
@@ -86,13 +100,18 @@ Graph Serialization::LoadModel(const std::vector<char> &file, ModelType model_ty
if (model_type == kMindIR) { if (model_type == kMindIR) {
auto anf_graph = std::make_shared<FuncGraph>(); auto anf_graph = std::make_shared<FuncGraph>();
if (anf_graph == nullptr) { if (anf_graph == nullptr) {
MS_LOG(EXCEPTION) << "Load model failed.";
MS_LOG(ERROR) << "Load model failed.";
return kMEInvalidInput;
} }
return Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));
*graph = Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));
return kSuccess;
} else if (model_type == kOM) { } else if (model_type == kOM) {
return Graph(std::make_shared<Graph::GraphData>(data, kOM));
*graph = Graph(std::make_shared<Graph::GraphData>(data, kOM));
return kSuccess;
} }
MS_LOG(EXCEPTION) << "Unsupported ModelType " << model_type;

MS_LOG(ERROR) << "Unsupported ModelType " << model_type;
return kMEInvalidInput;
} }


Status Serialization::LoadCheckPoint(const std::string &ckpt_file, std::map<std::string, Buffer> *parameters) { Status Serialization::LoadCheckPoint(const std::string &ckpt_file, std::map<std::string, Buffer> *parameters) {


+ 130
- 14
tests/ut/stub/cxx_api/types.cc View File

@@ -133,33 +133,139 @@ class TensorReferenceImpl : public MSTensor::Impl {
std::vector<int64_t> shape_; std::vector<int64_t> shape_;
}; };


MSTensor MSTensor::CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
MSTensor *MSTensor::CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
std::string name_str = CharToString(name); std::string name_str = CharToString(name);
try { try {
std::shared_ptr<Impl> impl = std::make_shared<TensorDefaultImpl>(name_str, type, shape, data, data_len); std::shared_ptr<Impl> impl = std::make_shared<TensorDefaultImpl>(name_str, type, shape, data, data_len);
return MSTensor(impl);
MSTensor *ret = new MSTensor(impl);
return ret;
} catch (const std::bad_alloc &) { } catch (const std::bad_alloc &) {
MS_LOG(ERROR) << "Malloc memory failed."; MS_LOG(ERROR) << "Malloc memory failed.";
return MSTensor(nullptr);
return nullptr;
} catch (...) { } catch (...) {
MS_LOG(ERROR) << "Unknown error occurred."; MS_LOG(ERROR) << "Unknown error occurred.";
return MSTensor(nullptr);
return nullptr;
} }
} }


MSTensor MSTensor::CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
MSTensor *MSTensor::CreateRefTensor(const std::vector<char> &name, enum DataType type,
const std::vector<int64_t> &shape, const void *data, size_t data_len) noexcept {
std::string name_str = CharToString(name); std::string name_str = CharToString(name);
try { try {
std::shared_ptr<Impl> impl = std::make_shared<TensorReferenceImpl>(name_str, type, shape, data, data_len); std::shared_ptr<Impl> impl = std::make_shared<TensorReferenceImpl>(name_str, type, shape, data, data_len);
return MSTensor(impl);
MSTensor *ret = new MSTensor(impl);
return ret;
} catch (const std::bad_alloc &) { } catch (const std::bad_alloc &) {
MS_LOG(ERROR) << "Malloc memory failed."; MS_LOG(ERROR) << "Malloc memory failed.";
return MSTensor(nullptr);
return nullptr;
} catch (...) { } catch (...) {
MS_LOG(ERROR) << "Unknown error occurred."; MS_LOG(ERROR) << "Unknown error occurred.";
return MSTensor(nullptr);
return nullptr;
}
}

MSTensor *MSTensor::CharStringsToTensor(const std::vector<char> &name, const std::vector<std::vector<char>> &str) {
// num(4 bytes) + offset1(4 bytes) + offset2(4 bytes) + ... + data1(str1.len) + data2(str2.len) + ...
// str1.len() = offset2 - offset1
// data1.begin() = start + offset1
size_t mem_size = 0;
mem_size += sizeof(int32_t); // for num
for (const auto &s : str) {
mem_size += sizeof(int32_t); // for offset
mem_size += s.size(); // for data
}

auto tensor = CreateTensor(name, DataType::kObjectTypeString, {static_cast<int64_t>(mem_size)}, nullptr, mem_size);
if (tensor == nullptr) {
MS_LOG(ERROR) << "Create tensor failed.";
return nullptr;
}

int32_t *data = reinterpret_cast<int32_t *>(tensor->MutableData());
if (data == nullptr) {
MS_LOG(ERROR) << "Create tensor failed.";
DestroyTensorPtr(tensor);
return nullptr;
}
uint8_t *cur_data = reinterpret_cast<uint8_t *>(data + 1 + str.size());
*reinterpret_cast<int32_t *>(data) = str.size();
for (size_t i = 0; i < str.size(); ++i) {
int32_t offset = (cur_data - reinterpret_cast<uint8_t *>(data));
data[i + 1] = offset;
if (str[i].empty()) {
continue;
}
auto ret = memcpy_s(reinterpret_cast<void *>(cur_data), str[i].size(), str[i].data(), str[i].size());
if (ret != 0) {
MS_LOG(ERROR) << "memcpy_s failed, ret = " << ret;
DestroyTensorPtr(tensor);
return nullptr;
}
cur_data += str[i].size();
}

return tensor;
}

std::vector<std::vector<char>> MSTensor::TensorToStringChars(const MSTensor &tensor) {
if (tensor == nullptr || tensor.DataType() != DataType::kObjectTypeString || tensor.DataSize() < 4) {
MS_LOG(ERROR) << "Invalid tensor.";
return {};
}

std::vector<std::vector<char>> strings;
auto host_data = tensor.Data();
const int32_t *data = reinterpret_cast<const int32_t *>(host_data.get());
int32_t str_num = data[0];
if (str_num == 0) {
return {};
}
if (str_num < 0) {
MS_LOG(ERROR) << "str num " << str_num << " cannot be negative.";
return {};
}

if (tensor.DataSize() < (str_num + 1) * sizeof(int32_t)) {
MS_LOG(ERROR) << "Invalid tensor data size " << tensor.DataSize() << ", need " << (str_num + 1) * sizeof(int32_t)
<< " at least for " << str_num << " strings.";
return {};
}
for (size_t i = 0; i < static_cast<size_t>(str_num); ++i) {
strings.push_back({});
auto &str = strings[i];
int32_t str_len;
int32_t offset = data[i + 1];
if (i + 1 != static_cast<size_t>(str_num)) {
str_len = data[i + 1 + 1] - offset;
} else {
str_len = tensor.DataSize() - offset;
}

if (str_len == 0) {
continue;
}

if (str_len < 0) {
MS_LOG(ERROR) << "str " << i << " len " << str_len << " cannot be negative.";
return {};
}

str.resize(str_len);
const uint8_t *cur_data = reinterpret_cast<const uint8_t *>(data) + offset;
auto ret = memcpy_s(reinterpret_cast<void *>(str.data()), str.size(), cur_data, str_len);
if (ret != 0) {
MS_LOG(ERROR) << "memcpy_s failed, ret = " << ret;
return {};
}
}

return strings;
}

void MSTensor::DestroyTensorPtr(MSTensor *tensor) noexcept {
if (tensor != nullptr) {
delete tensor;
} }
} }


@@ -173,11 +279,21 @@ MSTensor::~MSTensor() = default;


bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; } bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; }


MSTensor MSTensor::Clone() const {
bool MSTensor::operator!=(std::nullptr_t) const { return impl_ != nullptr; }

MSTensor *MSTensor::Clone() const {
MS_EXCEPTION_IF_NULL(impl_); MS_EXCEPTION_IF_NULL(impl_);
MSTensor ret;
ret.impl_ = impl_->Clone();
return ret;
try {
MSTensor *ret = new MSTensor();
ret->impl_ = impl_->Clone();
return ret;
} catch (const std::bad_alloc &) {
MS_LOG(ERROR) << "Malloc memory failed.";
return nullptr;
} catch (...) {
MS_LOG(ERROR) << "Unknown error occurred.";
return nullptr;
}
} }


std::vector<char> MSTensor::CharName() const { std::vector<char> MSTensor::CharName() const {


+ 8
- 2
tests/ut/stub/graph_impl_stub.cc View File

@@ -57,7 +57,13 @@ Status GraphImplStubAdd::Run(const std::vector<MSTensor> &inputs, std::vector<MS
} }
auto x1 = reinterpret_cast<const float *>(inputs[0].Data().get()); auto x1 = reinterpret_cast<const float *>(inputs[0].Data().get());
auto x2 = reinterpret_cast<const float *>(inputs[1].Data().get()); auto x2 = reinterpret_cast<const float *>(inputs[1].Data().get());
MSTensor output = outputs_[0].Clone();
MSTensor* output_ptr = outputs_[0].Clone();
if (output_ptr == nullptr) {
return mindspore::kCoreFailed;
}
MSTensor output = *output_ptr;
mindspore::MSTensor::DestroyTensorPtr(output_ptr);

auto y = reinterpret_cast<float *>(output.MutableData()); auto y = reinterpret_cast<float *>(output.MutableData());
for (size_t i = 0; i < outputs_[0].DataSize() / sizeof(float); i++) { for (size_t i = 0; i < outputs_[0].DataSize() / sizeof(float); i++) {
y[i] = x1[i] + x2[i]; y[i] = x1[i] + x2[i];
@@ -66,7 +72,7 @@ Status GraphImplStubAdd::Run(const std::vector<MSTensor> &inputs, std::vector<MS
return mindspore::kSuccess; return mindspore::kSuccess;
} }


Status GraphImplStubAdd::Load() { return kSuccess; }
Status GraphImplStubAdd::Load(uint32_t device_id) { return kSuccess; }


std::vector<MSTensor> GraphImplStubAdd::GetInputs() { return inputs_; } std::vector<MSTensor> GraphImplStubAdd::GetInputs() { return inputs_; }




+ 1
- 1
tests/ut/stub/graph_impl_stub.h View File

@@ -36,7 +36,7 @@ class GraphImplStubAdd : public GraphCell::GraphImpl {
~GraphImplStubAdd() override; ~GraphImplStubAdd() override;


Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override; Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;
Status Load() override;
Status Load(uint32_t device_id) override;


std::vector<MSTensor> GetInputs() override; std::vector<MSTensor> GetInputs() override;
std::vector<MSTensor> GetOutputs() override; std::vector<MSTensor> GetOutputs() override;


+ 2
- 1
tests/ut/stub/include/api/cell.h View File

@@ -103,8 +103,9 @@ class MS_API GraphCell final : public Cell<GraphCell> {
std::vector<MSTensor> GetOutputs(); std::vector<MSTensor> GetOutputs();


private: private:
friend class Model;
friend class ModelImpl; friend class ModelImpl;
Status Load();
Status Load(uint32_t device_id);


std::shared_ptr<Graph> graph_; std::shared_ptr<Graph> graph_;
std::shared_ptr<GraphImpl> executor_; std::shared_ptr<GraphImpl> executor_;


+ 158
- 82
tests/ut/stub/include/api/context.h View File

@@ -19,130 +19,206 @@
#include <string> #include <string>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include <map>
#include "include/api/types.h" #include "include/api/types.h"
#include "include/api/dual_abi_helper.h" #include "include/api/dual_abi_helper.h"


namespace mindspore { namespace mindspore {
constexpr auto kDeviceTypeAscend310 = "Ascend310";
constexpr auto kDeviceTypeAscend910 = "Ascend910";
constexpr auto kDeviceTypeGPU = "GPU";
enum DeviceType {
kCPU = 0,
kMaliGPU,
kNvidiaGPU,
kKirinNPU,
kAscend910,
kAscend310,
// add new type here
kInvalidDeviceType = 100,
};

class Allocator;
class DeviceInfoContext;


struct MS_API Context {
class MS_API Context {
public: public:
Context(); Context();
virtual ~Context() = default;
~Context() = default;

void SetThreadNum(int32_t thread_num);
int32_t GetThreadNum() const;

void SetAllocator(const std::shared_ptr<Allocator> &allocator);
std::shared_ptr<Allocator> GetAllocator() const;

std::vector<std::shared_ptr<DeviceInfoContext>> &MutableDeviceInfo();

private:
struct Data; struct Data;
std::shared_ptr<Data> data;
std::shared_ptr<Data> data_;
}; };


struct MS_API GlobalContext : public Context {
class MS_API DeviceInfoContext : public std::enable_shared_from_this<DeviceInfoContext> {
public: public:
static std::shared_ptr<Context> GetGlobalContext();
struct Data;


static inline void SetGlobalDeviceTarget(const std::string &device_target);
static inline std::string GetGlobalDeviceTarget();
DeviceInfoContext();
virtual ~DeviceInfoContext() = default;
virtual enum DeviceType GetDeviceType() const = 0;


static void SetGlobalDeviceID(const uint32_t &device_id);
static uint32_t GetGlobalDeviceID();
template <class T>
std::shared_ptr<T> Cast() {
static_assert(std::is_base_of<DeviceInfoContext, T>::value, "Wrong cast type.");
if (GetDeviceType() != T().GetDeviceType()) {
return nullptr;
}


private:
// api without std::string
static void SetGlobalDeviceTarget(const std::vector<char> &device_target);
static std::vector<char> GetGlobalDeviceTargetChar();
return std::static_pointer_cast<T>(shared_from_this());
}

protected:
std::shared_ptr<Data> data_;
};

class MS_API CPUDeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kCPU; };

/// \brief Set the thread affinity of CPU cores.
///
/// \param mode: 0: no affinities, 1: big cores first, 2: little cores first
void SetThreadAffinity(int mode);
int GetThreadAffinity() const;
void SetEnableFP16(bool is_fp16);
bool GetEnableFP16() const;
};

class MS_API MaliGPUDeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kMaliGPU; };

void SetEnableFP16(bool is_fp16);
bool GetEnableFP16() const;
};

class MS_API KirinNPUDeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kKirinNPU; };

void SetFrequency(int frequency);
int GetFrequency() const;
};

class MS_API NvidiaGPUDeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kNvidiaGPU; };

void SetDeviceID(uint32_t device_id);
uint32_t GetDeviceID() const;

void SetGpuTrtInferMode(bool gpu_trt_infer_mode);
bool GetGpuTrtInferMode() const;
};

class MS_API Ascend910DeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kAscend910; };

void SetDeviceID(uint32_t device_id);
uint32_t GetDeviceID() const;
}; };


struct MS_API ModelContext : public Context {
class MS_API Ascend310DeviceInfo : public DeviceInfoContext {
public: public:
static inline void SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::string &cfg_path);
static inline std::string GetInsertOpConfigPath(const std::shared_ptr<Context> &context);
enum DeviceType GetDeviceType() const override { return DeviceType::kAscend310; };

void SetDeviceID(uint32_t device_id);
uint32_t GetDeviceID() const;

inline void SetDumpConfigPath(const std::string &cfg_path);
inline std::string GetDumpConfigPath() const;

inline void SetInsertOpConfigPath(const std::string &cfg_path);
inline std::string GetInsertOpConfigPath() const;

inline void SetInputFormat(const std::string &format);
inline std::string GetInputFormat() const;

inline void SetInputShape(const std::string &shape);
inline std::string GetInputShape() const;


static inline void SetInputFormat(const std::shared_ptr<Context> &context, const std::string &format);
static inline std::string GetInputFormat(const std::shared_ptr<Context> &context);
void SetInputShapeMap(const std::map<int, std::vector<int>> &shape);
std::map<int, std::vector<int>> GetInputShapeMap() const;


static inline void SetInputShape(const std::shared_ptr<Context> &context, const std::string &shape);
static inline std::string GetInputShape(const std::shared_ptr<Context> &context);
void SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size);
inline std::string GetDynamicBatchSize() const;


static void SetOutputType(const std::shared_ptr<Context> &context, enum DataType output_type);
static enum DataType GetOutputType(const std::shared_ptr<Context> &context);
void SetOutputType(enum DataType output_type);
enum DataType GetOutputType() const;


static inline void SetPrecisionMode(const std::shared_ptr<Context> &context, const std::string &precision_mode);
static inline std::string GetPrecisionMode(const std::shared_ptr<Context> &context);
inline void SetPrecisionMode(const std::string &precision_mode);
inline std::string GetPrecisionMode() const;


static inline void SetOpSelectImplMode(const std::shared_ptr<Context> &context,
const std::string &op_select_impl_mode);
static inline std::string GetOpSelectImplMode(const std::shared_ptr<Context> &context);
inline void SetOpSelectImplMode(const std::string &op_select_impl_mode);
inline std::string GetOpSelectImplMode() const;


static inline void SetGpuTrtInferMode(const std::shared_ptr<Context> &context, const std::string &gpu_trt_infer_mode);
static inline std::string GetGpuTrtInferMode(const std::shared_ptr<Context> &context);
inline void SetFusionSwitchConfigPath(const std::string &cfg_path);
inline std::string GetFusionSwitchConfigPath() const;


private: private:
// api without std::string
static void SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::vector<char> &cfg_path);
static std::vector<char> GetInsertOpConfigPathChar(const std::shared_ptr<Context> &context);
void SetDumpConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetDumpConfigPathChar() const;


static void SetInputFormat(const std::shared_ptr<Context> &context, const std::vector<char> &format);
static std::vector<char> GetInputFormatChar(const std::shared_ptr<Context> &context);
void SetInsertOpConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetInsertOpConfigPathChar() const;


static void SetInputShape(const std::shared_ptr<Context> &context, const std::vector<char> &shape);
static std::vector<char> GetInputShapeChar(const std::shared_ptr<Context> &context);
void SetInputFormat(const std::vector<char> &format);
std::vector<char> GetInputFormatChar() const;


static void SetPrecisionMode(const std::shared_ptr<Context> &context, const std::vector<char> &precision_mode);
static std::vector<char> GetPrecisionModeChar(const std::shared_ptr<Context> &context);
void SetInputShape(const std::vector<char> &shape);
std::vector<char> GetInputShapeChar() const;


static void SetOpSelectImplMode(const std::shared_ptr<Context> &context,
const std::vector<char> &op_select_impl_mode);
static std::vector<char> GetOpSelectImplModeChar(const std::shared_ptr<Context> &context);
std::vector<char> GetDynamicBatchSizeChar() const;


static void SetGpuTrtInferMode(const std::shared_ptr<Context> &context, const std::vector<char> &gpu_trt_infer_mode);
static std::vector<char> GetGpuTrtInferModeChar(const std::shared_ptr<Context> &context);
void SetPrecisionMode(const std::vector<char> &precision_mode);
std::vector<char> GetPrecisionModeChar() const;

void SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode);
std::vector<char> GetOpSelectImplModeChar() const;

void SetFusionSwitchConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetFusionSwitchConfigPathChar() const;
}; };


void GlobalContext::SetGlobalDeviceTarget(const std::string &device_target) {
SetGlobalDeviceTarget(StringToChar(device_target));
}
std::string GlobalContext::GetGlobalDeviceTarget() { return CharToString(GetGlobalDeviceTargetChar()); }
void Ascend310DeviceInfo::SetDumpConfigPath(const std::string &cfg_path) { SetDumpConfigPath(StringToChar(cfg_path)); }
std::string Ascend310DeviceInfo::GetDumpConfigPath() const { return CharToString(GetDumpConfigPathChar()); }


void ModelContext::SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::string &cfg_path) {
SetInsertOpConfigPath(context, StringToChar(cfg_path));
}
std::string ModelContext::GetInsertOpConfigPath(const std::shared_ptr<Context> &context) {
return CharToString(GetInsertOpConfigPathChar(context));
void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::string &cfg_path) {
SetInsertOpConfigPath(StringToChar(cfg_path));
} }
std::string Ascend310DeviceInfo::GetInsertOpConfigPath() const { return CharToString(GetInsertOpConfigPathChar()); }


void ModelContext::SetInputFormat(const std::shared_ptr<Context> &context, const std::string &format) {
SetInputFormat(context, StringToChar(format));
}
std::string ModelContext::GetInputFormat(const std::shared_ptr<Context> &context) {
return CharToString(GetInputFormatChar(context));
}
void Ascend310DeviceInfo::SetInputFormat(const std::string &format) { SetInputFormat(StringToChar(format)); }
std::string Ascend310DeviceInfo::GetInputFormat() const { return CharToString(GetInputFormatChar()); }


void ModelContext::SetInputShape(const std::shared_ptr<Context> &context, const std::string &shape) {
SetInputShape(context, StringToChar(shape));
}
std::string ModelContext::GetInputShape(const std::shared_ptr<Context> &context) {
return CharToString(GetInputShapeChar(context));
}
void Ascend310DeviceInfo::SetInputShape(const std::string &shape) { SetInputShape(StringToChar(shape)); }
std::string Ascend310DeviceInfo::GetInputShape() const { return CharToString(GetInputShapeChar()); }


void ModelContext::SetPrecisionMode(const std::shared_ptr<Context> &context, const std::string &precision_mode) {
SetPrecisionMode(context, StringToChar(precision_mode));
}
std::string ModelContext::GetPrecisionMode(const std::shared_ptr<Context> &context) {
return CharToString(GetPrecisionModeChar(context));
}
std::string Ascend310DeviceInfo::GetDynamicBatchSize() const { return CharToString(GetDynamicBatchSizeChar()); }


void ModelContext::SetOpSelectImplMode(const std::shared_ptr<Context> &context,
const std::string &op_select_impl_mode) {
SetOpSelectImplMode(context, StringToChar(op_select_impl_mode));
void Ascend310DeviceInfo::SetPrecisionMode(const std::string &precision_mode) {
SetPrecisionMode(StringToChar(precision_mode));
} }
std::string ModelContext::GetOpSelectImplMode(const std::shared_ptr<Context> &context) {
return CharToString(GetOpSelectImplModeChar(context));
std::string Ascend310DeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }

void Ascend310DeviceInfo::SetOpSelectImplMode(const std::string &op_select_impl_mode) {
SetOpSelectImplMode(StringToChar(op_select_impl_mode));
} }
std::string Ascend310DeviceInfo::GetOpSelectImplMode() const { return CharToString(GetOpSelectImplModeChar()); }


void ModelContext::SetGpuTrtInferMode(const std::shared_ptr<Context> &context, const std::string &gpu_trt_infer_mode) {
SetGpuTrtInferMode(context, StringToChar(gpu_trt_infer_mode));
void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::string &cfg_path) {
SetFusionSwitchConfigPath(StringToChar(cfg_path));
} }
std::string ModelContext::GetGpuTrtInferMode(const std::shared_ptr<Context> &context) {
return CharToString(GetGpuTrtInferModeChar(context));
std::string Ascend310DeviceInfo::GetFusionSwitchConfigPath() const {
return CharToString(GetFusionSwitchConfigPathChar());
} }
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_CONTEXT_H #endif // MINDSPORE_INCLUDE_API_CONTEXT_H

+ 138
- 0
tests/ut/stub/include/api/dual_abi_helper.h View File

@@ -16,11 +16,149 @@
#ifndef MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_ #ifndef MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
#define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_ #define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_


#include <algorithm>
#include <map>
#include <memory>
#include <optional>
#include <string> #include <string>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector> #include <vector>


namespace mindspore { namespace mindspore {
inline std::vector<char> StringToChar(const std::string &s) { return std::vector<char>(s.begin(), s.end()); } inline std::vector<char> StringToChar(const std::string &s) { return std::vector<char>(s.begin(), s.end()); }

inline std::string CharToString(const std::vector<char> &c) { return std::string(c.begin(), c.end()); } inline std::string CharToString(const std::vector<char> &c) { return std::string(c.begin(), c.end()); }

inline std::optional<std::vector<char>> OptionalStringToChar(const std::optional<std::string> &s) {
if (s == std::nullopt) return std::nullopt;
std::optional<std::vector<char>> ret = std::vector<char>(s->begin(), s->end());
return ret;
}

inline std::optional<std::string> OptionalCharToString(const std::optional<std::vector<char>> &c) {
if (c == std::nullopt) return std::nullopt;
std::optional<std::string> ret = std::string(c->begin(), c->end());
return ret;
}

inline std::pair<std::vector<char>, int32_t> PairStringToChar(const std::pair<std::string, int32_t> &s) {
return std::pair<std::vector<char>, int32_t>(std::vector<char>(s.first.begin(), s.first.end()), s.second);
}

inline std::pair<std::string, int32_t> PairCharToString(const std::pair<std::vector<char>, int32_t> &c) {
return std::pair<std::string, int32_t>(std::string(c.first.begin(), c.first.end()), c.second);
}

inline std::vector<std::vector<char>> VectorStringToChar(const std::vector<std::string> &s) {
std::vector<std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::back_inserter(ret),
[](auto str) { return std::vector<char>(str.begin(), str.end()); });
return ret;
}

inline std::vector<std::string> VectorCharToString(const std::vector<std::vector<char>> &c) {
std::vector<std::string> ret;
std::transform(c.begin(), c.end(), std::back_inserter(ret),
[](auto ch) { return std::string(ch.begin(), ch.end()); });
return ret;
}

inline std::set<std::vector<char>> SetStringToChar(const std::set<std::string> &s) {
std::set<std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()),
[](auto str) { return std::vector<char>(str.begin(), str.end()); });
return ret;
}

inline std::set<std::string> SetCharToString(const std::set<std::vector<char>> &c) {
std::set<std::string> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()),
[](auto ch) { return std::string(ch.begin(), ch.end()); });
return ret;
}

inline std::map<std::vector<char>, int32_t> MapStringToChar(const std::map<std::string, int32_t> &s) {
std::map<std::vector<char>, int32_t> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, int32_t>(std::vector<char>(str.first.begin(), str.first.end()), str.second);
});
return ret;
}

inline std::map<std::string, int32_t> MapCharToString(const std::map<std::vector<char>, int32_t> &c) {
std::map<std::string, int32_t> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, int32_t>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}

inline std::map<std::vector<char>, std::vector<char>> UnorderedMapStringToChar(
const std::unordered_map<std::string, std::string> &s) {
std::map<std::vector<char>, std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, std::vector<char>>(std::vector<char>(str.first.begin(), str.first.end()),
std::vector<char>(str.second.begin(), str.second.end()));
});
return ret;
}

inline std::unordered_map<std::string, std::string> UnorderedMapCharToString(
const std::map<std::vector<char>, std::vector<char>> &c) {
std::unordered_map<std::string, std::string> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, std::string>(std::string(ch.first.begin(), ch.first.end()),
std::string(ch.second.begin(), ch.second.end()));
});
return ret;
}

inline std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ClassIndexStringToChar(
const std::vector<std::pair<std::string, std::vector<int32_t>>> &s) {
std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ret;
std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) {
return std::pair<std::vector<char>, std::vector<int32_t>>(std::vector<char>(str.first.begin(), str.first.end()),
str.second);
});
return ret;
}

inline std::vector<std::pair<std::string, std::vector<int32_t>>> ClassIndexCharToString(
const std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> &c) {
std::vector<std::pair<std::string, std::vector<int32_t>>> ret;
std::transform(c.begin(), c.end(), std::back_inserter(ret), [](auto ch) {
return std::pair<std::string, std::vector<int32_t>>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}

template <class T>
inline std::map<std::vector<char>, T> PadInfoStringToChar(const std::map<std::string, T> &s_pad_info) {
std::map<std::vector<char>, T> ret;
std::transform(s_pad_info.begin(), s_pad_info.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, T>(std::vector<char>(str.first.begin(), str.first.end()), str.second);
});
return ret;
}

template <class T>
inline std::map<std::string, T> PadInfoCharToString(const std::map<std::vector<char>, T> &c_pad_info) {
std::map<std::string, T> ret;
std::transform(c_pad_info.begin(), c_pad_info.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, T>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}

template <class T>
inline void TensorMapCharToString(const std::map<std::vector<char>, T> *c, std::unordered_map<std::string, T> *s) {
for (auto ch : *c) {
auto key = std::string(ch.first.begin(), ch.first.end());
auto val = ch.second;
s->insert(std::pair<std::string, T>(key, val));
}
}
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_ #endif // MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_

+ 2
- 0
tests/ut/stub/include/api/graph.h View File

@@ -27,6 +27,7 @@ namespace mindspore {
class MS_API Graph { class MS_API Graph {
public: public:
class GraphData; class GraphData;
Graph();
explicit Graph(const std::shared_ptr<GraphData> &graph_data); explicit Graph(const std::shared_ptr<GraphData> &graph_data);
explicit Graph(std::shared_ptr<GraphData> &&graph_data); explicit Graph(std::shared_ptr<GraphData> &&graph_data);
explicit Graph(std::nullptr_t); explicit Graph(std::nullptr_t);
@@ -34,6 +35,7 @@ class MS_API Graph {


enum ModelType ModelType() const; enum ModelType ModelType() const;
bool operator==(std::nullptr_t) const; bool operator==(std::nullptr_t) const;
bool operator!=(std::nullptr_t) const;


private: private:
friend class GraphCell; friend class GraphCell;


+ 21
- 8
tests/ut/stub/include/api/model.h View File

@@ -24,39 +24,52 @@
#include "include/api/status.h" #include "include/api/status.h"
#include "include/api/types.h" #include "include/api/types.h"
#include "include/api/graph.h" #include "include/api/graph.h"
#include "include/api/context.h"
#include "include/api/cell.h" #include "include/api/cell.h"
#include "include/api/dual_abi_helper.h" #include "include/api/dual_abi_helper.h"


namespace mindspore { namespace mindspore {
class ModelImpl; class ModelImpl;
struct Context;


class MS_API Model { class MS_API Model {
public: public:
explicit Model(const std::vector<Output> &network, const std::shared_ptr<Context> &model_context = nullptr);
explicit Model(const GraphCell &graph, const std::shared_ptr<Context> &model_context = nullptr);
Model();
~Model(); ~Model();
Model(const Model &) = delete; Model(const Model &) = delete;
void operator=(const Model &) = delete; void operator=(const Model &) = delete;


Status Build();
Status Build(GraphCell graph, const std::shared_ptr<Context> &model_context = nullptr);
Status Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims); Status Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims);


Status Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs); Status Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs);


std::vector<MSTensor> GetInputs(); std::vector<MSTensor> GetInputs();
inline MSTensor GetInputByTensorName(const std::string &tensor_name);

std::vector<MSTensor> GetOutputs(); std::vector<MSTensor> GetOutputs();
inline std::vector<std::string> GetOutputTensorNames();
inline MSTensor GetOutputByTensorName(const std::string &tensor_name);


static inline bool CheckModelSupport(const std::string &device_type, ModelType model_type);
static bool CheckModelSupport(enum DeviceType device_type, ModelType model_type);


private: private:
// api without std::string // api without std::string
static bool CheckModelSupport(const std::vector<char> &device_type, ModelType model_type);
MSTensor GetInputByTensorName(const std::vector<char> &tensor_name);
std::vector<std::vector<char>> GetOutputTensorNamesChar();
MSTensor GetOutputByTensorName(const std::vector<char> &tensor_name);
std::vector<MSTensor> GetOutputsByNodeName(const std::vector<char> &node_name);

std::shared_ptr<ModelImpl> impl_; std::shared_ptr<ModelImpl> impl_;
}; };


bool Model::CheckModelSupport(const std::string &device_type, ModelType model_type) {
return CheckModelSupport(StringToChar(device_type), model_type);
MSTensor Model::GetInputByTensorName(const std::string &tensor_name) {
return GetInputByTensorName(StringToChar(tensor_name));
}

std::vector<std::string> Model::GetOutputTensorNames() { return VectorCharToString(GetOutputTensorNamesChar()); }

MSTensor Model::GetOutputByTensorName(const std::string &tensor_name) {
return GetOutputByTensorName(StringToChar(tensor_name));
} }
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_MODEL_H #endif // MINDSPORE_INCLUDE_API_MODEL_H

+ 5
- 5
tests/ut/stub/include/api/serialization.h View File

@@ -29,19 +29,19 @@
namespace mindspore { namespace mindspore {
class MS_API Serialization { class MS_API Serialization {
public: public:
static Graph LoadModel(const void *model_data, size_t data_size, ModelType model_type);
inline static Graph LoadModel(const std::string &file, ModelType model_type);
static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph);
inline static Status Load(const std::string &file, ModelType model_type, Graph *graph);
static Status LoadCheckPoint(const std::string &ckpt_file, std::map<std::string, Buffer> *parameters); static Status LoadCheckPoint(const std::string &ckpt_file, std::map<std::string, Buffer> *parameters);
static Status SetParameters(const std::map<std::string, Buffer> &parameters, Model *model); static Status SetParameters(const std::map<std::string, Buffer> &parameters, Model *model);
static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data); static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data);
static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file); static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file);


private: private:
static Graph LoadModel(const std::vector<char> &file, ModelType model_type);
static Status Load(const std::vector<char> &file, ModelType model_type, Graph *graph);
}; };


Graph Serialization::LoadModel(const std::string &file, ModelType model_type) {
return LoadModel(StringToChar(file), model_type);
Status Serialization::Load(const std::string &file, ModelType model_type, Graph *graph) {
return Load(StringToChar(file), model_type, graph);
} }
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H #endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H

+ 29
- 14
tests/ut/stub/include/api/types.h View File

@@ -43,15 +43,19 @@ class MS_API MSTensor {
public: public:
class Impl; class Impl;


static inline MSTensor CreateTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor CreateRefTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor *CreateTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor *CreateRefTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor *StringsToTensor(const std::string &name, const std::vector<std::string> &str);
static inline std::vector<std::string> TensorToStrings(const MSTensor &tensor);
static void DestroyTensorPtr(MSTensor *tensor) noexcept;


MSTensor(); MSTensor();
explicit MSTensor(const std::shared_ptr<Impl> &impl); explicit MSTensor(const std::shared_ptr<Impl> &impl);
inline MSTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data, inline MSTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data,
size_t data_len); size_t data_len);
explicit MSTensor(std::nullptr_t);
~MSTensor(); ~MSTensor();


inline std::string Name() const; inline std::string Name() const;
@@ -65,21 +69,24 @@ class MS_API MSTensor {


bool IsDevice() const; bool IsDevice() const;


MSTensor Clone() const;
MSTensor *Clone() const;
bool operator==(std::nullptr_t) const; bool operator==(std::nullptr_t) const;
bool operator!=(std::nullptr_t) const;


private: private:
// api without std::string // api without std::string
static MSTensor CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor *CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor *CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor *CharStringsToTensor(const std::vector<char> &name, const std::vector<std::vector<char>> &str);
static std::vector<std::vector<char>> TensorToStringChars(const MSTensor &tensor);

MSTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape, const void *data, MSTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,
size_t data_len); size_t data_len);
std::vector<char> CharName() const; std::vector<char> CharName() const;


friend class ModelImpl; friend class ModelImpl;
explicit MSTensor(std::nullptr_t);
std::shared_ptr<Impl> impl_; std::shared_ptr<Impl> impl_;
}; };


@@ -121,16 +128,24 @@ class MS_API Buffer {
std::shared_ptr<Impl> impl_; std::shared_ptr<Impl> impl_;
}; };


MSTensor MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
MSTensor *MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
return CreateTensor(StringToChar(name), type, shape, data, data_len); return CreateTensor(StringToChar(name), type, shape, data, data_len);
} }


MSTensor MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
MSTensor *MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
return CreateRefTensor(StringToChar(name), type, shape, data, data_len); return CreateRefTensor(StringToChar(name), type, shape, data, data_len);
} }


MSTensor *MSTensor::StringsToTensor(const std::string &name, const std::vector<std::string> &str) {
return CharStringsToTensor(StringToChar(name), VectorStringToChar(str));
}

std::vector<std::string> MSTensor::TensorToStrings(const MSTensor &tensor) {
return VectorCharToString(TensorToStringChars(tensor));
}

MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape, const void *data, MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,
size_t data_len) size_t data_len)
: MSTensor(StringToChar(name), type, shape, data, data_len) {} : MSTensor(StringToChar(name), type, shape, data, data_len) {}


+ 1
- 1
third_party/mindspore

@@ -1 +1 @@
Subproject commit 6b8bef2c8afe3f9890cee8e866771dd4b1d23d16
Subproject commit 1e84d77969f50251f63a2f08e14bed15eb02514e

Loading…
Cancel
Save