Browse Source

!206 Serving, fix libevent_core.so link, update cxx inferface

From: @xu-yfei
Reviewed-by: @zhoufeng54,@linqingke
Signed-off-by:
tags/v1.2.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
5a82e36078
29 changed files with 1034 additions and 366 deletions
  1. +2
    -1
      cmake/external_libs/libevent.cmake
  2. +4
    -1
      mindspore_serving/CMakeLists.txt
  3. +16
    -0
      mindspore_serving/ccsrc/python/worker/worker_py.cc
  4. +90
    -43
      mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.cc
  5. +13
    -4
      mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.h
  6. +2
    -0
      mindspore_serving/worker/distributed/agent_startup.py
  7. +59
    -33
      tests/ut/stub/cxx_api/cell.cc
  8. +195
    -116
      tests/ut/stub/cxx_api/context.cc
  9. +2
    -0
      tests/ut/stub/cxx_api/factory.h
  10. +1
    -1
      tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.cc
  11. +1
    -2
      tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.h
  12. +4
    -0
      tests/ut/stub/cxx_api/graph/graph.cc
  13. +2
    -2
      tests/ut/stub/cxx_api/graph/graph_impl.h
  14. +92
    -22
      tests/ut/stub/cxx_api/model/model.cc
  15. +2
    -2
      tests/ut/stub/cxx_api/model/model_impl.h
  16. +25
    -2
      tests/ut/stub/cxx_api/model/ms/ms_model.cc
  17. +1
    -0
      tests/ut/stub/cxx_api/model/ms/ms_model.h
  18. +28
    -9
      tests/ut/stub/cxx_api/serialization.cc
  19. +130
    -14
      tests/ut/stub/cxx_api/types.cc
  20. +8
    -2
      tests/ut/stub/graph_impl_stub.cc
  21. +1
    -1
      tests/ut/stub/graph_impl_stub.h
  22. +2
    -1
      tests/ut/stub/include/api/cell.h
  23. +158
    -82
      tests/ut/stub/include/api/context.h
  24. +138
    -0
      tests/ut/stub/include/api/dual_abi_helper.h
  25. +2
    -0
      tests/ut/stub/include/api/graph.h
  26. +21
    -8
      tests/ut/stub/include/api/model.h
  27. +5
    -5
      tests/ut/stub/include/api/serialization.h
  28. +29
    -14
      tests/ut/stub/include/api/types.h
  29. +1
    -1
      third_party/mindspore

+ 2
- 1
cmake/external_libs/libevent.cmake View File

@@ -2,7 +2,7 @@ set(libevent_CFLAGS "-fstack-protector-all -D_FORTIFY_SOURCE=2 -O2")
set(libevent_LDFLAGS "-Wl,-z,now")
mindspore_add_pkg(libevent
VER 2.1.12
LIBS event event_pthreads
LIBS event event_pthreads event_core
URL https://github.com/libevent/libevent/releases/download/release-2.1.12-stable/libevent-2.1.12-stable.tar.gz
MD5 b5333f021f880fe76490d8a799cd79f4
CMAKE_OPTION -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_TESTING=OFF)
@@ -11,3 +11,4 @@ include_directories(${libevent_INC}) # 将指定目录添加到编译器的头

add_library(mindspore_serving::event ALIAS libevent::event)
add_library(mindspore_serving::event_pthreads ALIAS libevent::event_pthreads)
add_library(mindspore_serving::event_core ALIAS libevent::event_core)

+ 4
- 1
mindspore_serving/CMakeLists.txt View File

@@ -3,7 +3,7 @@

# Find Protobuf installation
# Looks for protobuf-config.cmake file installed by Protobuf's cmake installation.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-rpath,$ORIGIN:$ORIGIN/lib")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-rpath,$ORIGIN:$ORIGIN/lib -Wl,--no-as-needed")

add_library(protobuf::libprotobuf ALIAS protobuf::protobuf)
add_executable(protobuf::libprotoc ALIAS protobuf::protoc)
@@ -102,10 +102,13 @@ set_property(TARGET serving_ascend PROPERTY POSITION_INDEPENDENT_CODE TRUE)
target_link_libraries(serving_common PRIVATE PROTO_SRC_LIB)
target_link_libraries(serving_common PRIVATE ${_REFLECTION} ${_GRPC_GRPCPP} ${_PROTOBUF_LIBPROTOBUF} pthread)
target_link_libraries(serving_common PRIVATE mindspore_serving::event mindspore_serving::event_pthreads)
target_link_libraries(serving_common PRIVATE mindspore_serving::event_core)
target_link_libraries(serving_common PRIVATE mindspore_serving::glog)
target_link_libraries(serving_common PRIVATE mindspore_serving::eigen)
target_link_libraries(serving_common PRIVATE ${SECUREC_LIBRARY})

set_target_properties(serving_common PROPERTIES SKIP_BUILD_RPATH TRUE)

# python
add_compile_definitions(ENABLE_PYTHON)
file(GLOB_RECURSE PY_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "ccsrc/python/*.cc")


+ 16
- 0
mindspore_serving/ccsrc/python/worker/worker_py.cc View File

@@ -42,6 +42,10 @@ void PyWorker::OnEndStartServable(const std::string &servable_directory, const s
void PyWorker::StartServable(const std::string &model_directory, const std::string &model_name, uint32_t version_number,
const std::string &master_ip, uint32_t master_port, const std::string &worker_ip,
uint32_t worker_port) {
if (Worker::GetInstance().IsRunning()) {
MSI_LOG_EXCEPTION << "A servable has been started, only one servable can run in a process currently.";
}

auto notify_master = std::make_shared<GrpcNotfiyMaster>(master_ip, master_port, worker_ip, worker_port);
auto servable = std::make_shared<LocalModelServable>();
auto status = servable->StartServable(model_directory, model_name, version_number);
@@ -69,6 +73,10 @@ void PyWorker::StartServable(const std::string &model_directory, const std::stri

void PyWorker::StartServableInMaster(const std::string &model_directory, const std::string &model_name,
uint32_t version_number) {
if (Worker::GetInstance().IsRunning()) {
MSI_LOG_EXCEPTION << "A servable has been started, only one servable can run in a process currently.";
}

auto notify_master = std::make_shared<LocalNotifyMaster>();
auto servable = std::make_shared<LocalModelServable>();
auto status = servable->StartServable(model_directory, model_name, version_number);
@@ -92,6 +100,10 @@ void PyWorker::StartDistributedServable(const std::string &servable_directory, c
const std::string &worker_ip, uint32_t worker_port,
const std::string &master_ip, uint32_t master_port,
uint32_t wait_agents_time_in_seconds) {
if (Worker::GetInstance().IsRunning()) {
MSI_LOG_EXCEPTION << "A servable has been started, only one servable can run in a process currently.";
}

Status status;
auto servable = std::make_shared<DistributedServable>();
auto grpc_sever = std::make_shared<MSDistributedWorkerServer>(servable);
@@ -122,6 +134,10 @@ void PyWorker::StartDistributedServableInMaster(const std::string &servable_dire
const std::string &rank_table_json_file, uint32_t version_number,
const std::string &worker_ip, uint32_t worker_port,
uint32_t wait_agents_time_in_seconds) {
if (Worker::GetInstance().IsRunning()) {
MSI_LOG_EXCEPTION << "A servable has been started, only one servable can run in a process currently.";
}

Status status;
auto servable = std::make_shared<DistributedServable>();
auto grpc_sever = std::make_shared<MSDistributedWorkerServer>(servable);


+ 90
- 43
mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.cc View File

@@ -78,10 +78,6 @@ Status MindSporeModelWrap::LoadModelFromFile(serving::DeviceType device_type, ui
const std::string &file_name, ModelType model_type, bool with_batch_dim,
const std::vector<int> &without_batch_dim_inputs,
const std::map<std::string, std::string> &other_options) {
std::string ms_device_type = GetMsDeviceType(device_type);
if (ms_device_type.empty()) {
return INFER_STATUS_LOG_ERROR(FAILED) << "Invalid device type " << device_type;
}
auto ms_model_type = GetMsModelType(model_type);
if (ms_model_type == mindspore::kUnknownType) {
return INFER_STATUS_LOG_ERROR(FAILED) << "Invalid model type " << model_type;
@@ -89,26 +85,26 @@ Status MindSporeModelWrap::LoadModelFromFile(serving::DeviceType device_type, ui

std::shared_ptr<mindspore::Model> model = nullptr;
try {
mindspore::GlobalContext::SetGlobalDeviceTarget(ms_device_type);
mindspore::GlobalContext::SetGlobalDeviceID(device_id);
auto graph = mindspore::Serialization::LoadModel(file_name, ms_model_type);
auto context = TransformModelContext(other_options);
model = std::make_shared<mindspore::Model>(mindspore::GraphCell(graph), context);
mindspore::Graph graph;
auto ms_status = mindspore::Serialization::Load(file_name, ms_model_type, &graph);
auto context = TransformModelContext(device_type, device_id, other_options);
model = std::make_shared<mindspore::Model>();
mindspore::Status status = model->Build(mindspore::GraphCell(graph), context);
if (!status.IsOk()) {
return INFER_STATUS_LOG_ERROR(FAILED)
<< "Load model from file failed, model file: " << file_name << ", device_type: '" << device_type
<< "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options
<< ", build error detail: " << status.ToString();
}
} catch (std::runtime_error &ex) {
return INFER_STATUS_LOG_ERROR(FAILED)
<< "Load model from file failed, model file: " << file_name << ", device_type: '" << ms_device_type
<< "Load model from file failed, model file: " << file_name << ", device_type: '" << device_type
<< "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options;
}
mindspore::Status status = model->Build();
if (!status.IsOk()) {
return INFER_STATUS_LOG_ERROR(FAILED)
<< "Load model from file failed, model file: " << file_name << ", device_type: '" << ms_device_type
<< "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options
<< ", build error detail: " << status.ToString();
}

ApiModelInfo api_model_info;
api_model_info.model = model;
api_model_info.device_type = ms_device_type;
api_model_info.device_type = device_type;
api_model_info.device_id = device_id;
api_model_info.with_batch_dim = with_batch_dim;
api_model_info.without_batch_dim_inputs = without_batch_dim_inputs;
@@ -118,45 +114,88 @@ Status MindSporeModelWrap::LoadModelFromFile(serving::DeviceType device_type, ui
}
GetModelBatchSize(&api_model_info);
model_ = api_model_info;
MSI_LOG_INFO << "Load model from file success, model file: " << file_name << ", device_type: '" << ms_device_type
MSI_LOG_INFO << "Load model from file success, model file: " << file_name << ", device_type: '" << device_type
<< "', device_id: " << device_id << ", model type: " << model_type << ", options: " << other_options;
return SUCCESS;
}

std::shared_ptr<Context> MindSporeModelWrap::TransformModelContext(const std::map<std::string, std::string> &options) {
using ContextStrFun = std::function<void(const std::shared_ptr<Context> &, const std::string &)>;
ContextStrFun set_output_type = [](const std::shared_ptr<Context> &context, const std::string &val) {
std::shared_ptr<DeviceInfoContext> MindSporeModelWrap::TransformAscend310ModelContext(
uint32_t device_id, const std::map<std::string, std::string> &options) {
auto context_info = std::make_shared<Ascend310DeviceInfo>();
context_info->SetDeviceID(device_id);

using ContextStrFun = std::function<void(const std::string &)>;
ContextStrFun set_output_type = [context_info](const std::string &val) {
// "FP32", "FP16", "UINT8"
if (val == "FP32") {
mindspore::ModelContext::SetOutputType(context, mindspore::DataType::kNumberTypeFloat32);
context_info->SetOutputType(mindspore::DataType::kNumberTypeFloat32);
} else if (val == "FP16") {
mindspore::ModelContext::SetOutputType(context, mindspore::DataType::kNumberTypeFloat16);
context_info->SetOutputType(mindspore::DataType::kNumberTypeFloat16);
} else if (val == "UINT8") {
mindspore::ModelContext::SetOutputType(context, mindspore::DataType::kNumberTypeUInt8);
context_info->SetOutputType(mindspore::DataType::kNumberTypeUInt8);
} else {
MSI_LOG_ERROR << "Set model context output type failed, unknown data type " << val;
}
};
auto context = std::make_shared<mindspore::ModelContext>();
for (auto &item : options) {
const auto &key = item.first;
const auto &value = item.second;
if (key == "acl_option.insert_op_config_file_path") {
mindspore::ModelContext::SetInsertOpConfigPath(context, value);
context_info->SetInsertOpConfigPath(value);
} else if (key == "acl_option.input_format") {
mindspore::ModelContext::SetInputFormat(context, value);
context_info->SetInputFormat(value);
} else if (key == "acl_option.input_shape") {
mindspore::ModelContext::SetInputShape(context, value);
context_info->SetInputShape(value);
} else if (key == "acl_option.output_type") {
set_output_type(context, value);
set_output_type(value);
} else if (key == "acl_option.precision_mode") {
mindspore::ModelContext::SetPrecisionMode(context, value);
context_info->SetPrecisionMode(value);
} else if (key == "acl_option.op_select_impl_mode") {
mindspore::ModelContext::SetOpSelectImplMode(context, value);
} else if (key == "gpu_option.enable_trt_infer") {
mindspore::ModelContext::SetGpuTrtInferMode(context, value);
context_info->SetOpSelectImplMode(value);
}
}
return context_info;
}

std::shared_ptr<DeviceInfoContext> MindSporeModelWrap::TransformAscend910ModelContext(
uint32_t device_id, const std::map<std::string, std::string> &options) {
auto context_info = std::make_shared<Ascend910DeviceInfo>();
context_info->SetDeviceID(device_id);
return context_info;
}
std::shared_ptr<DeviceInfoContext> MindSporeModelWrap::TransformNvidiaGPUModelContext(
uint32_t device_id, const std::map<std::string, std::string> &options) {
auto context_info = std::make_shared<NvidiaGPUDeviceInfo>();
context_info->SetDeviceID(device_id);

for (auto &item : options) {
const auto &key = item.first;
const auto &value = item.second;
if (key == "gpu_option.enable_trt_infer") {
if (value == "True") {
context_info->SetGpuTrtInferMode(true);
} else {
context_info->SetGpuTrtInferMode(false);
}
}
}
return context_info;
}

std::shared_ptr<Context> MindSporeModelWrap::TransformModelContext(serving::DeviceType device_type, uint32_t device_id,
const std::map<std::string, std::string> &options) {
auto context = std::make_shared<mindspore::Context>();
std::shared_ptr<mindspore::DeviceInfoContext> context_info = nullptr;
if (device_type == kDeviceTypeAscendMS) {
context_info = TransformAscend910ModelContext(device_id, options);
} else if (device_type == kDeviceTypeAscendCL) {
context_info = TransformAscend310ModelContext(device_id, options);
} else if (device_type == kDeviceTypeGpu) {
context_info = TransformNvidiaGPUModelContext(device_id, options);
}
if (context_info != nullptr) {
context->MutableDeviceInfo().push_back(context_info);
}
return context;
}

@@ -311,7 +350,12 @@ Status MindSporeModelWrap::ExecuteModelCommon(size_t request_size, const FuncMak
}
std::vector<mindspore::MSTensor> inputs;
for (size_t i = 0; i < input_names.size(); i++) {
inputs.push_back(in_func(i, input_names[i]));
auto tensor = in_func(i, input_names[i]);
if (tensor == nullptr) {
return INFER_STATUS_LOG_ERROR(FAILED) << "Failed to create input " << i << " MSTensor";
}
inputs.push_back(*tensor);
mindspore::MSTensor::DestroyTensorPtr(tensor);
}
std::vector<mindspore::MSTensor> outputs;
mindspore::Status status = model->Predict(inputs, &outputs);
@@ -344,8 +388,8 @@ std::vector<serving::TensorInfo> MindSporeModelWrap::GetOutputInfos() const { re
ssize_t MindSporeModelWrap::GetBatchSize() const { return model_.batch_size; }

bool MindSporeModelWrap::CheckModelSupport(DeviceType device_type, ModelType model_type) const {
std::string ms_device_type = GetMsDeviceType(device_type);
if (ms_device_type.empty()) {
auto ms_device_type = GetMsDeviceType(device_type);
if (ms_device_type == mindspore::kInvalidDeviceType) {
return false;
}
auto ms_model_type = GetMsModelType(model_type);
@@ -376,22 +420,25 @@ mindspore::ModelType MindSporeModelWrap::GetMsModelType(serving::ModelType model
return ms_model_type;
}

std::string MindSporeModelWrap::GetMsDeviceType(serving::DeviceType device_type) {
std::string device_type_str;
mindspore::DeviceType MindSporeModelWrap::GetMsDeviceType(serving::DeviceType device_type) {
mindspore::DeviceType ms_device_type = mindspore::DeviceType::kInvalidDeviceType;
switch (device_type) {
case kDeviceTypeAscendMS:
device_type_str = mindspore::kDeviceTypeAscend910;
ms_device_type = mindspore::DeviceType::kAscend910;
break;
case kDeviceTypeAscendCL:
device_type_str = mindspore::kDeviceTypeAscend310;
ms_device_type = mindspore::DeviceType::kAscend310;
break;
case kDeviceTypeGpu:
device_type_str = mindspore::kDeviceTypeGPU;
ms_device_type = mindspore::DeviceType::kNvidiaGPU;
break;
case kDeviceTypeCpu:
ms_device_type = mindspore::DeviceType::kCPU;
break;
default:
break;
}
return device_type_str;
return ms_device_type;
}

ApiBufferTensorWrap::ApiBufferTensorWrap() = default;


+ 13
- 4
mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.h View File

@@ -40,7 +40,7 @@ struct ApiModelInfo {
std::vector<serving::TensorInfo> output_tensor_infos;
std::shared_ptr<mindspore::Model> model = nullptr;
uint32_t batch_size = 0;
std::string device_type;
serving::DeviceType device_type;
uint32_t device_id = 0;
bool with_batch_dim = false;
std::vector<int> without_batch_dim_inputs;
@@ -71,15 +71,24 @@ class MindSporeModelWrap : public InferenceBase {
private:
ApiModelInfo model_;

using FuncMakeInBuffer = std::function<mindspore::MSTensor(size_t index, const std::string &name)>;
using FuncMakeInBuffer = std::function<mindspore::MSTensor *(size_t index, const std::string &name)>;
using FuncMakeOutTensor =
std::function<void(const mindspore::MSTensor, DataType data_type, const std::vector<int64_t> &shape)>;
Status ExecuteModelCommon(size_t request_size, const FuncMakeInBuffer &in_func, const FuncMakeOutTensor &out_func);
Status GetModelInfos(ApiModelInfo *model_info);
std::shared_ptr<Context> TransformModelContext(const std::map<std::string, std::string> &other_options);
std::shared_ptr<Context> TransformModelContext(serving::DeviceType device_type, uint32_t device_id,
const std::map<std::string, std::string> &other_options);

std::shared_ptr<DeviceInfoContext> TransformAscend310ModelContext(uint32_t device_id,
const std::map<std::string, std::string> &options);
std::shared_ptr<DeviceInfoContext> TransformAscend910ModelContext(uint32_t device_id,
const std::map<std::string, std::string> &options);
std::shared_ptr<DeviceInfoContext> TransformNvidiaGPUModelContext(uint32_t device_id,
const std::map<std::string, std::string> &options);

void GetModelBatchSize(ApiModelInfo *model_info);
static mindspore::ModelType GetMsModelType(serving::ModelType model_type);
static std::string GetMsDeviceType(serving::DeviceType device_type);
static mindspore::DeviceType GetMsDeviceType(serving::DeviceType device_type);
};

class ApiBufferTensorWrap : public TensorBase {


+ 2
- 0
mindspore_serving/worker/distributed/agent_startup.py View File

@@ -350,6 +350,7 @@ def _startup_agents(common_meta, worker_ip, worker_port,

class DistributedServableConfig:
"""Python DistributedServableConfig"""

def __init__(self):
self.rank_table_content = ""
self.rank_list = None
@@ -407,6 +408,7 @@ def _get_worker_distributed_config(worker_ip, worker_port):
# pylint: disable=broad-except
except Exception as e:
c_send_pipe.send(e)

process = Process(target=process_fun, args=(c_send_pipe,),
name=f"worker_agent_get_agents_config_from_worker")
process.start()


+ 59
- 33
tests/ut/stub/cxx_api/cell.cc View File

@@ -21,12 +21,19 @@
namespace mindspore {
std::vector<Output> CellBase::operator()(const std::vector<Input> &inputs) const { return Clone()->Construct(inputs); }

ParameterCell::ParameterCell(const ParameterCell &cell) : tensor_(cell.tensor_.Clone()) {}
ParameterCell::ParameterCell(const ParameterCell &cell) {
auto tmp_ptr = cell.tensor_.Clone();
tensor_ = *tmp_ptr;
MSTensor::DestroyTensorPtr(tmp_ptr);
}

ParameterCell &ParameterCell::operator=(const ParameterCell &cell) {
if (&cell == this) {
return *this;
}
tensor_ = cell.tensor_.Clone();
auto tmp_ptr = cell.tensor_.Clone();
tensor_ = *tmp_ptr;
MSTensor::DestroyTensorPtr(tmp_ptr);
return *this;
}

@@ -40,10 +47,16 @@ ParameterCell &ParameterCell::operator=(ParameterCell &&cell) {
return *this;
}

ParameterCell::ParameterCell(const MSTensor &tensor) : tensor_(tensor.Clone()) {}
ParameterCell::ParameterCell(const MSTensor &tensor) {
auto tmp_ptr = tensor.Clone();
tensor_ = *tmp_ptr;
MSTensor::DestroyTensorPtr(tmp_ptr);
}

ParameterCell &ParameterCell::operator=(const MSTensor &tensor) {
tensor_ = tensor.Clone();
auto tmp_ptr = tensor.Clone();
tensor_ = *tmp_ptr;
MSTensor::DestroyTensorPtr(tmp_ptr);
return *this;
}

@@ -54,54 +67,67 @@ ParameterCell &ParameterCell::operator=(MSTensor &&tensor) {
return *this;
}

GraphCell::GraphCell(const Graph &graph)
: graph_(std::make_shared<Graph>(graph)),
executor_(Factory<GraphCell::GraphImpl>::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) {
MS_EXCEPTION_IF_NULL(graph_);
MS_EXCEPTION_IF_NULL(executor_);
executor_->SetGraph(graph_);
}
GraphCell::GraphCell(const Graph &graph) : graph_(std::make_shared<Graph>(graph)) { MS_EXCEPTION_IF_NULL(graph_); }

GraphCell::GraphCell(const std::shared_ptr<Graph> &graph)
: graph_(graph),
executor_(Factory<GraphCell::GraphImpl>::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) {
MS_EXCEPTION_IF_NULL(graph_);
MS_EXCEPTION_IF_NULL(executor_);
executor_->SetGraph(graph_);
}
GraphCell::GraphCell(const std::shared_ptr<Graph> &graph) : graph_(graph) { MS_EXCEPTION_IF_NULL(graph_); }

GraphCell::GraphCell(Graph &&graph)
: graph_(std::make_shared<Graph>(graph)),
executor_(Factory<GraphCell::GraphImpl>::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) {
MS_EXCEPTION_IF_NULL(graph_);
MS_EXCEPTION_IF_NULL(executor_);
executor_->SetGraph(graph_);
}
GraphCell::GraphCell(Graph &&graph) : graph_(std::make_shared<Graph>(graph)) { MS_EXCEPTION_IF_NULL(graph_); }

Status GraphCell::Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {
MS_EXCEPTION_IF_NULL(executor_);
if (executor_ == nullptr) {
executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);
if (executor_ == nullptr) {
MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed.";
return kMEFailed;
}
executor_->SetGraph(graph_);
}
return executor_->Run(inputs, outputs);
}

Status GraphCell::Load() {
MS_EXCEPTION_IF_NULL(executor_);
return executor_->Load();
Status GraphCell::Load(uint32_t device_id) {
if (executor_ == nullptr) {
executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);
if (executor_ == nullptr) {
MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed.";
return kMEFailed;
}
executor_->SetGraph(graph_);
}
return executor_->Load(device_id);
}

std::vector<MSTensor> GraphCell::GetInputs() {
MS_EXCEPTION_IF_NULL(executor_);
if (executor_ == nullptr) {
executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);
if (executor_ == nullptr) {
MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed.";
return {};
}
executor_->SetGraph(graph_);
}
return executor_->GetInputs();
}

std::vector<MSTensor> GraphCell::GetOutputs() {
MS_EXCEPTION_IF_NULL(executor_);
if (executor_ == nullptr) {
executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);
if (executor_ == nullptr) {
MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed.";
return {};
}
executor_->SetGraph(graph_);
}
return executor_->GetOutputs();
}

InputAndOutput::InputAndOutput() : cell_(nullptr), prev_(), index_(-1) {}

InputAndOutput::InputAndOutput(const MSTensor &tensor)
: cell_(std::make_shared<ParameterCell>(tensor.Clone())), prev_(), index_(-1) {}
InputAndOutput::InputAndOutput(const MSTensor &tensor) : prev_(), index_(-1) {
auto tmp_ptr = tensor.Clone();
cell_ = std::make_shared<ParameterCell>(*tmp_ptr);
MSTensor::DestroyTensorPtr(tmp_ptr);
}
InputAndOutput::InputAndOutput(MSTensor &&tensor)
: cell_(std::make_shared<ParameterCell>(tensor)), prev_(), index_(-1) {}



+ 195
- 116
tests/ut/stub/cxx_api/context.cc View File

@@ -17,36 +17,57 @@
#include <any>
#include <map>
#include <type_traits>
#include "cxx_api/factory.h"
#include "utils/log_adapter.h"

constexpr auto kGlobalContextDeviceTarget = "mindspore.ascend.globalcontext.device_target";
constexpr auto kGlobalContextDeviceID = "mindspore.ascend.globalcontext.device_id";
constexpr auto kModelOptionInsertOpCfgPath = "mindspore.option.insert_op_config_file_path"; // aipp config file
constexpr auto kModelOptionInputFormat = "mindspore.option.input_format"; // nchw or nhwc
constexpr auto kModelOptionInputShape = "mindspore.option.input_shape";
constexpr auto kModelOptionCpuEnableFP16 = "mindspore.option.cpu.enable_fp16";
constexpr auto kModelOptionCpuThreadAffinity = "mindspore.option.cpu.thread_affinity";
constexpr auto kModelOptionMaliGpuEnableFP16 = "mindspore.option.mali_gpu.enable_fp16";
constexpr auto kModelOptionKirinNpuFrequency = "mindspore.option.kirin_npu.frequency";
constexpr auto kModelOptionDeviceID = "mindspore.option.device_id";
constexpr auto kModelOptionNvidiaGpuDeviceID = kModelOptionDeviceID;
constexpr auto kModelOptionNvidiaGpuTrtInferMode = "mindspore.option.nvidia_gpu.trt_infer_mode";
constexpr auto kModelOptionAscend910DeviceID = kModelOptionDeviceID;
constexpr auto kModelOptionAscend310DeviceID = kModelOptionDeviceID;
constexpr auto kModelOptionAscend310DumpCfgPath = "mindspore.option.ascend310.dump_config_file_path";
constexpr auto kModelOptionAscend310InsertOpCfgPath =
"mindspore.option.ascend310.insert_op_config_file_path"; // aipp config file
constexpr auto kModelOptionAscend310InputFormat = "mindspore.option.ascend310.input_format"; // nchw or nhwc
constexpr auto kModelOptionAscend310InputShapeMap = "mindspore.option.ascend310.input_shape_map";
constexpr auto kModelOptionAscend310InputShape = "mindspore.option.ascend310.input_shape";
// Mandatory while dynamic batch: e.g. "input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1"
constexpr auto kModelOptionOutputType = "mindspore.option.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32"
constexpr auto kModelOptionPrecisionMode = "mindspore.option.precision_mode";
constexpr auto kModelOptionAscend310OutputType =
"mindspore.option.ascend310.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32"
constexpr auto kModelOptionAscend310PrecisionMode = "mindspore.option.ascend310.precision_mode";
// "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" or "allow_mix_precision", default as "force_fp16"
constexpr auto kModelOptionOpSelectImplMode = "mindspore.option.op_select_impl_mode";
constexpr auto kModelOptionAscend310OpSelectImplMode = "mindspore.option.ascend310.op_select_impl_mode";
constexpr auto KModelOptionAscend310FusionSwitchCfgPath = "mindspore.option.ascend310.fusion_switch_config_file_path";
// "False": Inference with native backend, "True": Inference with Tensor-RT engine, default as "False"
constexpr auto kModelOptionGpuTrtInferMode = "mindspore.option.gpu_trt_infer_mode";
constexpr auto kModelOptionAscend310DynamicBatchSize = "mindspore.option.ascend310.dynamic_batch_size";

namespace mindspore {
class Allocator {};

struct Context::Data {
std::vector<std::shared_ptr<DeviceInfoContext>> device_info_list;
int32_t thread_num;
std::shared_ptr<Allocator> allocator;
};

struct DeviceInfoContext::Data {
std::map<std::string, std::any> params;
};

Context::Context() : data(std::make_shared<Data>()) {}
Context::Context() : data_(std::make_shared<Data>()) {}

template <class T, typename U = std::remove_cv_t<std::remove_reference_t<T>>>
static const U &GetValue(const std::shared_ptr<Context> &context, const std::string &key) {
static const U &GetValue(const std::shared_ptr<DeviceInfoContext::Data> &data, const std::string &key) {
static U empty_result;
if (context == nullptr || context->data == nullptr) {
if (data == nullptr) {
return empty_result;
}
auto iter = context->data->params.find(key);
if (iter == context->data->params.end()) {
auto iter = data->params.find(key);
if (iter == data->params.end()) {
return empty_result;
}
const std::any &value = iter->second;
@@ -57,147 +78,205 @@ static const U &GetValue(const std::shared_ptr<Context> &context, const std::str
return std::any_cast<const U &>(value);
}

std::shared_ptr<Context> GlobalContext::GetGlobalContext() {
static std::shared_ptr<Context> g_context = std::make_shared<Context>();
return g_context;
void Context::SetThreadNum(int32_t thread_num) {
MS_EXCEPTION_IF_NULL(data_);
data_->thread_num = thread_num;
}
int32_t Context::GetThreadNum() const {
MS_EXCEPTION_IF_NULL(data_);
return data_->thread_num;
}

void GlobalContext::SetGlobalDeviceTarget(const std::vector<char> &device_target) {
auto global_context = GetGlobalContext();
MS_EXCEPTION_IF_NULL(global_context);
if (global_context->data == nullptr) {
global_context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(global_context->data);
}
global_context->data->params[kGlobalContextDeviceTarget] = CharToString(device_target);
void Context::SetAllocator(const std::shared_ptr<Allocator> &allocator) {
MS_EXCEPTION_IF_NULL(data_);
data_->allocator = allocator;
}
std::shared_ptr<Allocator> Context::GetAllocator() const {
MS_EXCEPTION_IF_NULL(data_);
return data_->allocator;
}

std::vector<char> GlobalContext::GetGlobalDeviceTargetChar() {
auto global_context = GetGlobalContext();
MS_EXCEPTION_IF_NULL(global_context);
const std::string &ref = GetValue<std::string>(global_context, kGlobalContextDeviceTarget);
return StringToChar(ref);
std::vector<std::shared_ptr<DeviceInfoContext>> &Context::MutableDeviceInfo() {
MS_EXCEPTION_IF_NULL(data_);
return data_->device_info_list;
}

void GlobalContext::SetGlobalDeviceID(const uint32_t &device_id) {
auto global_context = GetGlobalContext();
MS_EXCEPTION_IF_NULL(global_context);
if (global_context->data == nullptr) {
global_context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(global_context->data);
}
global_context->data->params[kGlobalContextDeviceID] = device_id;
DeviceInfoContext::DeviceInfoContext() : data_(std::make_shared<Data>()) {}

void CPUDeviceInfo::SetEnableFP16(bool is_fp16) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionCpuEnableFP16] = is_fp16;
}
bool CPUDeviceInfo::GetEnableFP16() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<bool>(data_, kModelOptionCpuEnableFP16);
}

uint32_t GlobalContext::GetGlobalDeviceID() {
auto global_context = GetGlobalContext();
MS_EXCEPTION_IF_NULL(global_context);
return GetValue<uint32_t>(global_context, kGlobalContextDeviceID);
void CPUDeviceInfo::SetThreadAffinity(int affinity) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionCpuThreadAffinity] = affinity;
}
int CPUDeviceInfo::GetThreadAffinity() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<bool>(data_, kModelOptionCpuThreadAffinity);
}

void ModelContext::SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::vector<char> &cfg_path) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionInsertOpCfgPath] = CharToString(cfg_path);
void MaliGPUDeviceInfo::SetEnableFP16(bool is_fp16) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionMaliGpuEnableFP16] = is_fp16;
}
bool MaliGPUDeviceInfo::GetEnableFP16() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<bool>(data_, kModelOptionMaliGpuEnableFP16);
}

std::vector<char> ModelContext::GetInsertOpConfigPathChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionInsertOpCfgPath);
return StringToChar(ref);
void KirinNPUDeviceInfo::SetFrequency(int frequency) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionKirinNpuFrequency] = frequency;
}
int KirinNPUDeviceInfo::GetFrequency() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<int>(data_, kModelOptionKirinNpuFrequency);
}

void ModelContext::SetInputFormat(const std::shared_ptr<Context> &context, const std::vector<char> &format) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionInputFormat] = CharToString(format);
void NvidiaGPUDeviceInfo::SetDeviceID(uint32_t device_id) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionNvidiaGpuDeviceID] = device_id;
}
uint32_t NvidiaGPUDeviceInfo::GetDeviceID() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<uint32_t>(data_, kModelOptionNvidiaGpuDeviceID);
}

std::vector<char> ModelContext::GetInputFormatChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionInputFormat);
return StringToChar(ref);
void NvidiaGPUDeviceInfo::SetGpuTrtInferMode(bool gpu_trt_infer_mode) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionNvidiaGpuTrtInferMode] = gpu_trt_infer_mode;
}
bool NvidiaGPUDeviceInfo::GetGpuTrtInferMode() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<bool>(data_, kModelOptionNvidiaGpuTrtInferMode);
}

void ModelContext::SetInputShape(const std::shared_ptr<Context> &context, const std::vector<char> &shape) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionInputShape] = CharToString(shape);
void Ascend910DeviceInfo::SetDeviceID(uint32_t device_id) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend910DeviceID] = device_id;
}
uint32_t Ascend910DeviceInfo::GetDeviceID() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<uint32_t>(data_, kModelOptionAscend910DeviceID);
}

std::vector<char> ModelContext::GetInputShapeChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionInputShape);
return StringToChar(ref);
void Ascend310DeviceInfo::SetDeviceID(uint32_t device_id) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310DeviceID] = device_id;
}
uint32_t Ascend310DeviceInfo::GetDeviceID() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<uint32_t>(data_, kModelOptionAscend310DeviceID);
}

void ModelContext::SetOutputType(const std::shared_ptr<Context> &context, enum DataType output_type) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionOutputType] = output_type;
void Ascend310DeviceInfo::SetDumpConfigPath(const std::vector<char> &cfg_path) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310DumpCfgPath] = CharToString(cfg_path);
}
std::vector<char> Ascend310DeviceInfo::GetDumpConfigPathChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310DeviceID);
return StringToChar(ref);
}

enum DataType ModelContext::GetOutputType(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
return GetValue<enum DataType>(context, kModelOptionOutputType);
void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::vector<char> &cfg_path) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310InsertOpCfgPath] = CharToString(cfg_path);
}
std::vector<char> Ascend310DeviceInfo::GetInsertOpConfigPathChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310InsertOpCfgPath);
return StringToChar(ref);
}

void ModelContext::SetPrecisionMode(const std::shared_ptr<Context> &context, const std::vector<char> &precision_mode) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionPrecisionMode] = CharToString(precision_mode);
void Ascend310DeviceInfo::SetInputFormat(const std::vector<char> &format) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310InputFormat] = CharToString(format);
}
std::vector<char> Ascend310DeviceInfo::GetInputFormatChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310InputFormat);
return StringToChar(ref);
}

std::vector<char> ModelContext::GetPrecisionModeChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionPrecisionMode);
void Ascend310DeviceInfo::SetInputShape(const std::vector<char> &shape) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310InputShape] = CharToString(shape);
}
std::vector<char> Ascend310DeviceInfo::GetInputShapeChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310InputShape);
return StringToChar(ref);
}

void ModelContext::SetOpSelectImplMode(const std::shared_ptr<Context> &context,
const std::vector<char> &op_select_impl_mode) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
void Ascend310DeviceInfo::SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size) {
MS_EXCEPTION_IF_NULL(data_);
std::string batchs = "";
for (size_t i = 0; i < dynamic_batch_size.size(); ++i) {
if (i != 0) {
batchs.push_back(',');
}
batchs += std::to_string(dynamic_batch_size[i]);
}
context->data->params[kModelOptionOpSelectImplMode] = CharToString(op_select_impl_mode);
data_->params[kModelOptionAscend310DynamicBatchSize] = batchs;
}
std::vector<char> Ascend310DeviceInfo::GetDynamicBatchSizeChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310DynamicBatchSize);
return StringToChar(ref);
}

std::vector<char> ModelContext::GetOpSelectImplModeChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionOpSelectImplMode);
void Ascend310DeviceInfo::SetPrecisionMode(const std::vector<char> &precision_mode) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310PrecisionMode] = CharToString(precision_mode);
}
std::vector<char> Ascend310DeviceInfo::GetPrecisionModeChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310PrecisionMode);
return StringToChar(ref);
}

void ModelContext::SetGpuTrtInferMode(const std::shared_ptr<Context> &context,
const std::vector<char> &gpu_trt_infer_mode) {
MS_EXCEPTION_IF_NULL(context);
if (context->data == nullptr) {
context->data = std::make_shared<Data>();
MS_EXCEPTION_IF_NULL(context->data);
}
context->data->params[kModelOptionGpuTrtInferMode] = CharToString(gpu_trt_infer_mode);
void Ascend310DeviceInfo::SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310OpSelectImplMode] = CharToString(op_select_impl_mode);
}
std::vector<char> Ascend310DeviceInfo::GetOpSelectImplModeChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310OpSelectImplMode);
return StringToChar(ref);
}

std::vector<char> ModelContext::GetGpuTrtInferModeChar(const std::shared_ptr<Context> &context) {
MS_EXCEPTION_IF_NULL(context);
const std::string &ref = GetValue<std::string>(context, kModelOptionGpuTrtInferMode);
void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::vector<char> &cfg_path) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[KModelOptionAscend310FusionSwitchCfgPath] = CharToString(cfg_path);
}
std::vector<char> Ascend310DeviceInfo::GetFusionSwitchConfigPathChar() const {
MS_EXCEPTION_IF_NULL(data_);
const std::string &ref = GetValue<std::string>(data_, KModelOptionAscend310FusionSwitchCfgPath);
return StringToChar(ref);
}

void Ascend310DeviceInfo::SetInputShapeMap(const std::map<int, std::vector<int>> &shape) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310InputShapeMap] = shape;
}
std::map<int, std::vector<int>> Ascend310DeviceInfo::GetInputShapeMap() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<std::map<int, std::vector<int>>>(data_, kModelOptionAscend310InputShapeMap);
}

void Ascend310DeviceInfo::SetOutputType(enum DataType output_type) {
MS_EXCEPTION_IF_NULL(data_);
data_->params[kModelOptionAscend310OutputType] = output_type;
}
enum DataType Ascend310DeviceInfo::GetOutputType() const {
MS_EXCEPTION_IF_NULL(data_);
return GetValue<enum DataType>(data_, kModelOptionAscend310OutputType);
}
} // namespace mindspore

+ 2
- 0
tests/ut/stub/cxx_api/factory.h View File

@@ -24,6 +24,8 @@
#include "utils/utils.h"

namespace mindspore {
inline std::string g_device_target = "Default";

template <class T>
class Factory {
using U = std::function<std::shared_ptr<T>()>;


+ 1
- 1
tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.cc View File

@@ -42,7 +42,7 @@ std::vector<MSTensor> AscendGraphImpl::GetOutputs() {
return graph_imp_stub_->GetOutputs();
}

Status AscendGraphImpl::Load() { return kSuccess; }
Status AscendGraphImpl::Load(uint32_t device_id) { return kSuccess; }

Status AscendGraphImpl::Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {
if (!graph_imp_stub_) {


+ 1
- 2
tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.h View File

@@ -27,14 +27,13 @@
#include "cxx_api/model/model_impl.h"

namespace mindspore {

class AscendGraphImpl : public GraphCell::GraphImpl {
public:
AscendGraphImpl();
~AscendGraphImpl() override;

Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;
Status Load() override;
Status Load(uint32_t device_id) override;
std::vector<MSTensor> GetInputs() override;
std::vector<MSTensor> GetOutputs() override;



+ 4
- 0
tests/ut/stub/cxx_api/graph/graph.cc View File

@@ -18,6 +18,8 @@
#include "utils/log_adapter.h"

namespace mindspore {
Graph::Graph() : graph_data_(nullptr) {}

Graph::Graph(const std::shared_ptr<GraphData> &graph_data) : graph_data_(graph_data) {}

Graph::Graph(std::shared_ptr<GraphData> &&graph_data) : graph_data_(graph_data) {}
@@ -28,6 +30,8 @@ Graph::Graph(std::nullptr_t) : graph_data_(nullptr) {}

bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; }

bool Graph::operator!=(std::nullptr_t) const { return graph_data_ != nullptr; }

ModelType Graph::ModelType() const {
MS_EXCEPTION_IF_NULL(graph_data_);
return graph_data_->ModelType();


+ 2
- 2
tests/ut/stub/cxx_api/graph/graph_impl.h View File

@@ -29,14 +29,14 @@
namespace mindspore {
class GraphCell::GraphImpl {
public:
GraphImpl() = default;
GraphImpl() : graph_(nullptr) {}
virtual ~GraphImpl() = default;

std::shared_ptr<Graph::GraphData> &MutableGraphData() const { return graph_->graph_data_; }
void SetGraph(const std::shared_ptr<Graph> &graph) { graph_ = graph; }

virtual Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) = 0;
virtual Status Load() = 0;
virtual Status Load(uint32_t device_id) = 0;

virtual std::vector<MSTensor> GetInputs() = 0;
virtual std::vector<MSTensor> GetOutputs() = 0;


+ 92
- 22
tests/ut/stub/cxx_api/model/model.cc View File

@@ -21,60 +21,130 @@

namespace mindspore {
namespace {
const std::map<std::string, std::set<ModelType>> kSupportedModelMap = {
{kDeviceTypeAscend310, {kOM, kMindIR}},
{kDeviceTypeAscend910, {kMindIR}},
{kDeviceTypeGPU, {kMindIR}},
const std::map<enum DeviceType, std::set<ModelType>> kSupportedModelMap = {
{kAscend310, {kOM, kMindIR}},
{kAscend910, {kMindIR}},
{kNvidiaGPU, {kMindIR}},
};

std::string GetDeviceTypeString(enum DeviceType type) {
static const std::map<enum DeviceType, std::string> kDeviceTypeStrs = {
{kCPU, "CPU"}, {kMaliGPU, "MaliGPU"}, {kNvidiaGPU, "GPU"},
{kKirinNPU, "KirinGPU"}, {kAscend910, "Ascend910"}, {kAscend310, "Ascend310"},
};
auto iter = kDeviceTypeStrs.find(type);
if (iter != kDeviceTypeStrs.end()) {
return iter->second;
}

return "InvalidDeviceType" + std::to_string(type);
}
Status Model::Build() {
MS_EXCEPTION_IF_NULL(impl_);
} // namespace
Status Model::Build(GraphCell graph_cell, const std::shared_ptr<Context> &model_context) {
if (graph_cell.GetGraph() == nullptr) {
MS_LOG(ERROR) << "Invalid graph input.";
return kMCInvalidInput;
}

if (model_context == nullptr) {
MS_LOG(ERROR) << "Invalid model context.";
return kMCInvalidInput;
}
auto &device_info = model_context->MutableDeviceInfo();
if (device_info.size() != 1) {
MS_LOG(ERROR) << "Invalid model context, only single device info is supported.";
return kMCInvalidInput;
}

std::string device_target = GetDeviceTypeString(device_info[0]->GetDeviceType());
impl_ = Factory<ModelImpl>::Instance().Create(device_target);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Create session type " << device_target << " failed";
return kMEFailed;
}

g_device_target = device_target;

impl_->SetGraph(std::make_shared<Graph>(*graph_cell.GetGraph()));
impl_->SetContext(model_context);

return impl_->Build();
}

Status Model::Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims) {
MS_EXCEPTION_IF_NULL(impl_);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Failed because this model has not been built.";
return kMCFailed;
}
return impl_->Resize(inputs, dims);
}

Status Model::Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {
MS_EXCEPTION_IF_NULL(impl_);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Failed because this model has not been built.";
return kMCFailed;
}
return impl_->Predict(inputs, outputs);
}

std::vector<MSTensor> Model::GetInputs() {
MS_EXCEPTION_IF_NULL(impl_);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Failed because this model has not been built.";
return {};
}
return impl_->GetInputs();
}

std::vector<MSTensor> Model::GetOutputs() {
MS_EXCEPTION_IF_NULL(impl_);
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Failed because this model has not been built.";
return {};
}
return impl_->GetOutputs();
}

Model::Model(const GraphCell &graph_cell, const std::shared_ptr<Context> &model_context)
: impl_(Factory<ModelImpl>::Instance().Create(mindspore::GlobalContext::GetGlobalDeviceTarget())) {
if (impl_ == nullptr) {
MS_LOG(EXCEPTION) << "Create session type " << mindspore::GlobalContext::GetGlobalDeviceTarget() << " failed";
MSTensor Model::GetInputByTensorName(const std::vector<char> &tensor_name) {
std::string tensor_name_str = CharToString(tensor_name);
auto inputs = GetInputs();
for (auto in : inputs) {
if (in.Name() == tensor_name_str) {
return in;
}
}
MS_EXCEPTION_IF_NULL(graph_cell.GetGraph());
impl_->SetGraph(std::make_shared<Graph>(*graph_cell.GetGraph()));
impl_->SetContext(model_context);

return MSTensor(std::shared_ptr<MSTensor::Impl>(nullptr));
}

Model::Model(const std::vector<Output> &network, const std::shared_ptr<Context> &model_context) {
MS_LOG(EXCEPTION) << "Unsupported feature.";
std::vector<std::vector<char>> Model::GetOutputTensorNamesChar() {
std::vector<std::vector<char>> ret;
auto outputs = GetOutputs();
std::transform(outputs.begin(), outputs.end(), std::back_inserter(ret),
[](MSTensor item) -> std::vector<char> { return StringToChar(item.Name()); });
return ret;
}

MSTensor Model::GetOutputByTensorName(const std::vector<char> &tensor_name) {
std::string tensor_name_str = CharToString(tensor_name);
auto outputs = GetOutputs();
for (auto out : outputs) {
if (out.Name() == tensor_name_str) {
return out;
}
}

return MSTensor(std::shared_ptr<MSTensor::Impl>(nullptr));
}

Model::Model() : impl_(nullptr) {}
Model::~Model() {}

bool Model::CheckModelSupport(const std::vector<char> &device_type, ModelType model_type) {
std::string device_type_str = CharToString(device_type);
bool Model::CheckModelSupport(enum DeviceType device_type, ModelType model_type) {
std::string device_type_str = GetDeviceTypeString(device_type);
if (!Factory<ModelImpl>::Instance().CheckModelSupport(device_type_str)) {
return false;
}

auto first_iter = kSupportedModelMap.find(device_type_str);
auto first_iter = kSupportedModelMap.find(device_type);
if (first_iter == kSupportedModelMap.end()) {
return false;
}


+ 2
- 2
tests/ut/stub/cxx_api/model/model_impl.h View File

@@ -42,9 +42,9 @@ class ModelImpl {
virtual std::vector<MSTensor> GetOutputs() = 0;

protected:
Status Load(const std::shared_ptr<GraphCell> &graph_cell) {
Status Load(const std::shared_ptr<GraphCell> &graph_cell, uint32_t device_id) {
MS_EXCEPTION_IF_NULL(graph_cell);
return graph_cell->Load();
return graph_cell->Load(device_id);
}

FuncGraphPtr GetFuncGraph() const {


+ 25
- 2
tests/ut/stub/cxx_api/model/ms/ms_model.cc View File

@@ -53,7 +53,7 @@ std::shared_ptr<GraphCell> MsModel::GenerateGraphCell(const std::vector<std::vec
MS_EXCEPTION_IF_NULL(graph);
auto graph_cell = std::make_shared<GraphCell>(graph);
MS_EXCEPTION_IF_NULL(graph_cell);
auto ret = ModelImpl::Load(graph_cell);
auto ret = ModelImpl::Load(graph_cell, GetDeviceID());
if (ret != kSuccess) {
MS_LOG(ERROR) << "Load failed.";
return nullptr;
@@ -78,7 +78,7 @@ Status MsModel::Build() {
MS_EXCEPTION_IF_NULL(graph);
auto graph_cell = std::make_shared<GraphCell>(graph);
MS_EXCEPTION_IF_NULL(graph_cell);
auto ret = ModelImpl::Load(graph_cell);
auto ret = ModelImpl::Load(graph_cell, GetDeviceID());
if (ret != kSuccess) {
MS_LOG(ERROR) << "Load failed.";
return ret;
@@ -149,4 +149,27 @@ std::vector<MSTensor> MsModel::GetOutputs() {
MS_EXCEPTION_IF_NULL(graph_cell_);
return graph_cell_->GetOutputs();
}

uint32_t MsModel::GetDeviceID() const {
if (model_context_ == nullptr) {
return 0;
}

auto &device_infos = model_context_->MutableDeviceInfo();
if (device_infos.size() != 1) {
return 0;
}

auto ascend910_info = device_infos[0]->Cast<Ascend910DeviceInfo>();
if (ascend910_info != nullptr) {
return ascend910_info->GetDeviceID();
}

auto gpu_info = device_infos[0]->Cast<NvidiaGPUDeviceInfo>();
if (gpu_info != nullptr) {
return gpu_info->GetDeviceID();
}

return 0;
}
} // namespace mindspore

+ 1
- 0
tests/ut/stub/cxx_api/model/ms/ms_model.h View File

@@ -42,6 +42,7 @@ class MsModel : public ModelImpl {

private:
std::shared_ptr<GraphCell> GenerateGraphCell(const std::vector<std::vector<int64_t>> &dims);
uint32_t GetDeviceID() const;

std::shared_ptr<GraphCell> graph_cell_;
std::map<std::string, std::shared_ptr<GraphCell>> dynamic_size_graph_map_;


+ 28
- 9
tests/ut/stub/cxx_api/serialization.cc View File

@@ -67,17 +67,31 @@ static Buffer ReadFile(const std::string &file) {
return buffer;
}

Graph Serialization::LoadModel(const void *model_data, size_t data_size, ModelType model_type) {
Status Serialization::Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph) {
if (graph == nullptr) {
MS_LOG(ERROR) << "Output args graph is nullptr.";
return kMEInvalidInput;
}

if (model_type == kMindIR) {
auto anf_graph = std::make_shared<FuncGraph>();
return Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));
*graph = Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));
return kSuccess;
} else if (model_type == kOM) {
return Graph(std::make_shared<Graph::GraphData>(Buffer(model_data, data_size), kOM));
*graph = Graph(std::make_shared<Graph::GraphData>(Buffer(model_data, data_size), kOM));
return kSuccess;
}
MS_LOG(EXCEPTION) << "Unsupported ModelType " << model_type;

MS_LOG(ERROR) << "Unsupported ModelType " << model_type;
return kMEInvalidInput;
}

Graph Serialization::LoadModel(const std::vector<char> &file, ModelType model_type) {
Status Serialization::Load(const std::vector<char> &file, ModelType model_type, Graph *graph) {
if (graph == nullptr) {
MS_LOG(ERROR) << "Output args graph is nullptr.";
return kMEInvalidInput;
}

std::string file_path = CharToString(file);
Buffer data = ReadFile(file_path);
if (data.Data() == nullptr) {
@@ -86,13 +100,18 @@ Graph Serialization::LoadModel(const std::vector<char> &file, ModelType model_ty
if (model_type == kMindIR) {
auto anf_graph = std::make_shared<FuncGraph>();
if (anf_graph == nullptr) {
MS_LOG(EXCEPTION) << "Load model failed.";
MS_LOG(ERROR) << "Load model failed.";
return kMEInvalidInput;
}
return Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));
*graph = Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));
return kSuccess;
} else if (model_type == kOM) {
return Graph(std::make_shared<Graph::GraphData>(data, kOM));
*graph = Graph(std::make_shared<Graph::GraphData>(data, kOM));
return kSuccess;
}
MS_LOG(EXCEPTION) << "Unsupported ModelType " << model_type;

MS_LOG(ERROR) << "Unsupported ModelType " << model_type;
return kMEInvalidInput;
}

Status Serialization::LoadCheckPoint(const std::string &ckpt_file, std::map<std::string, Buffer> *parameters) {


+ 130
- 14
tests/ut/stub/cxx_api/types.cc View File

@@ -133,33 +133,139 @@ class TensorReferenceImpl : public MSTensor::Impl {
std::vector<int64_t> shape_;
};

MSTensor MSTensor::CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
MSTensor *MSTensor::CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
std::string name_str = CharToString(name);
try {
std::shared_ptr<Impl> impl = std::make_shared<TensorDefaultImpl>(name_str, type, shape, data, data_len);
return MSTensor(impl);
MSTensor *ret = new MSTensor(impl);
return ret;
} catch (const std::bad_alloc &) {
MS_LOG(ERROR) << "Malloc memory failed.";
return MSTensor(nullptr);
return nullptr;
} catch (...) {
MS_LOG(ERROR) << "Unknown error occurred.";
return MSTensor(nullptr);
return nullptr;
}
}

MSTensor MSTensor::CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
MSTensor *MSTensor::CreateRefTensor(const std::vector<char> &name, enum DataType type,
const std::vector<int64_t> &shape, const void *data, size_t data_len) noexcept {
std::string name_str = CharToString(name);
try {
std::shared_ptr<Impl> impl = std::make_shared<TensorReferenceImpl>(name_str, type, shape, data, data_len);
return MSTensor(impl);
MSTensor *ret = new MSTensor(impl);
return ret;
} catch (const std::bad_alloc &) {
MS_LOG(ERROR) << "Malloc memory failed.";
return MSTensor(nullptr);
return nullptr;
} catch (...) {
MS_LOG(ERROR) << "Unknown error occurred.";
return MSTensor(nullptr);
return nullptr;
}
}

MSTensor *MSTensor::CharStringsToTensor(const std::vector<char> &name, const std::vector<std::vector<char>> &str) {
// num(4 bytes) + offset1(4 bytes) + offset2(4 bytes) + ... + data1(str1.len) + data2(str2.len) + ...
// str1.len() = offset2 - offset1
// data1.begin() = start + offset1
size_t mem_size = 0;
mem_size += sizeof(int32_t); // for num
for (const auto &s : str) {
mem_size += sizeof(int32_t); // for offset
mem_size += s.size(); // for data
}

auto tensor = CreateTensor(name, DataType::kObjectTypeString, {static_cast<int64_t>(mem_size)}, nullptr, mem_size);
if (tensor == nullptr) {
MS_LOG(ERROR) << "Create tensor failed.";
return nullptr;
}

int32_t *data = reinterpret_cast<int32_t *>(tensor->MutableData());
if (data == nullptr) {
MS_LOG(ERROR) << "Create tensor failed.";
DestroyTensorPtr(tensor);
return nullptr;
}
uint8_t *cur_data = reinterpret_cast<uint8_t *>(data + 1 + str.size());
*reinterpret_cast<int32_t *>(data) = str.size();
for (size_t i = 0; i < str.size(); ++i) {
int32_t offset = (cur_data - reinterpret_cast<uint8_t *>(data));
data[i + 1] = offset;
if (str[i].empty()) {
continue;
}
auto ret = memcpy_s(reinterpret_cast<void *>(cur_data), str[i].size(), str[i].data(), str[i].size());
if (ret != 0) {
MS_LOG(ERROR) << "memcpy_s failed, ret = " << ret;
DestroyTensorPtr(tensor);
return nullptr;
}
cur_data += str[i].size();
}

return tensor;
}

std::vector<std::vector<char>> MSTensor::TensorToStringChars(const MSTensor &tensor) {
if (tensor == nullptr || tensor.DataType() != DataType::kObjectTypeString || tensor.DataSize() < 4) {
MS_LOG(ERROR) << "Invalid tensor.";
return {};
}

std::vector<std::vector<char>> strings;
auto host_data = tensor.Data();
const int32_t *data = reinterpret_cast<const int32_t *>(host_data.get());
int32_t str_num = data[0];
if (str_num == 0) {
return {};
}
if (str_num < 0) {
MS_LOG(ERROR) << "str num " << str_num << " cannot be negative.";
return {};
}

if (tensor.DataSize() < (str_num + 1) * sizeof(int32_t)) {
MS_LOG(ERROR) << "Invalid tensor data size " << tensor.DataSize() << ", need " << (str_num + 1) * sizeof(int32_t)
<< " at least for " << str_num << " strings.";
return {};
}
for (size_t i = 0; i < static_cast<size_t>(str_num); ++i) {
strings.push_back({});
auto &str = strings[i];
int32_t str_len;
int32_t offset = data[i + 1];
if (i + 1 != static_cast<size_t>(str_num)) {
str_len = data[i + 1 + 1] - offset;
} else {
str_len = tensor.DataSize() - offset;
}

if (str_len == 0) {
continue;
}

if (str_len < 0) {
MS_LOG(ERROR) << "str " << i << " len " << str_len << " cannot be negative.";
return {};
}

str.resize(str_len);
const uint8_t *cur_data = reinterpret_cast<const uint8_t *>(data) + offset;
auto ret = memcpy_s(reinterpret_cast<void *>(str.data()), str.size(), cur_data, str_len);
if (ret != 0) {
MS_LOG(ERROR) << "memcpy_s failed, ret = " << ret;
return {};
}
}

return strings;
}

void MSTensor::DestroyTensorPtr(MSTensor *tensor) noexcept {
if (tensor != nullptr) {
delete tensor;
}
}

@@ -173,11 +279,21 @@ MSTensor::~MSTensor() = default;

bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; }

MSTensor MSTensor::Clone() const {
bool MSTensor::operator!=(std::nullptr_t) const { return impl_ != nullptr; }

MSTensor *MSTensor::Clone() const {
MS_EXCEPTION_IF_NULL(impl_);
MSTensor ret;
ret.impl_ = impl_->Clone();
return ret;
try {
MSTensor *ret = new MSTensor();
ret->impl_ = impl_->Clone();
return ret;
} catch (const std::bad_alloc &) {
MS_LOG(ERROR) << "Malloc memory failed.";
return nullptr;
} catch (...) {
MS_LOG(ERROR) << "Unknown error occurred.";
return nullptr;
}
}

std::vector<char> MSTensor::CharName() const {


+ 8
- 2
tests/ut/stub/graph_impl_stub.cc View File

@@ -57,7 +57,13 @@ Status GraphImplStubAdd::Run(const std::vector<MSTensor> &inputs, std::vector<MS
}
auto x1 = reinterpret_cast<const float *>(inputs[0].Data().get());
auto x2 = reinterpret_cast<const float *>(inputs[1].Data().get());
MSTensor output = outputs_[0].Clone();
MSTensor* output_ptr = outputs_[0].Clone();
if (output_ptr == nullptr) {
return mindspore::kCoreFailed;
}
MSTensor output = *output_ptr;
mindspore::MSTensor::DestroyTensorPtr(output_ptr);

auto y = reinterpret_cast<float *>(output.MutableData());
for (size_t i = 0; i < outputs_[0].DataSize() / sizeof(float); i++) {
y[i] = x1[i] + x2[i];
@@ -66,7 +72,7 @@ Status GraphImplStubAdd::Run(const std::vector<MSTensor> &inputs, std::vector<MS
return mindspore::kSuccess;
}

Status GraphImplStubAdd::Load() { return kSuccess; }
Status GraphImplStubAdd::Load(uint32_t device_id) { return kSuccess; }

std::vector<MSTensor> GraphImplStubAdd::GetInputs() { return inputs_; }



+ 1
- 1
tests/ut/stub/graph_impl_stub.h View File

@@ -36,7 +36,7 @@ class GraphImplStubAdd : public GraphCell::GraphImpl {
~GraphImplStubAdd() override;

Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;
Status Load() override;
Status Load(uint32_t device_id) override;

std::vector<MSTensor> GetInputs() override;
std::vector<MSTensor> GetOutputs() override;


+ 2
- 1
tests/ut/stub/include/api/cell.h View File

@@ -103,8 +103,9 @@ class MS_API GraphCell final : public Cell<GraphCell> {
std::vector<MSTensor> GetOutputs();

private:
friend class Model;
friend class ModelImpl;
Status Load();
Status Load(uint32_t device_id);

std::shared_ptr<Graph> graph_;
std::shared_ptr<GraphImpl> executor_;


+ 158
- 82
tests/ut/stub/include/api/context.h View File

@@ -19,130 +19,206 @@
#include <string>
#include <memory>
#include <vector>
#include <map>
#include "include/api/types.h"
#include "include/api/dual_abi_helper.h"

namespace mindspore {
constexpr auto kDeviceTypeAscend310 = "Ascend310";
constexpr auto kDeviceTypeAscend910 = "Ascend910";
constexpr auto kDeviceTypeGPU = "GPU";
enum DeviceType {
kCPU = 0,
kMaliGPU,
kNvidiaGPU,
kKirinNPU,
kAscend910,
kAscend310,
// add new type here
kInvalidDeviceType = 100,
};

class Allocator;
class DeviceInfoContext;

struct MS_API Context {
class MS_API Context {
public:
Context();
virtual ~Context() = default;
~Context() = default;

void SetThreadNum(int32_t thread_num);
int32_t GetThreadNum() const;

void SetAllocator(const std::shared_ptr<Allocator> &allocator);
std::shared_ptr<Allocator> GetAllocator() const;

std::vector<std::shared_ptr<DeviceInfoContext>> &MutableDeviceInfo();

private:
struct Data;
std::shared_ptr<Data> data;
std::shared_ptr<Data> data_;
};

struct MS_API GlobalContext : public Context {
class MS_API DeviceInfoContext : public std::enable_shared_from_this<DeviceInfoContext> {
public:
static std::shared_ptr<Context> GetGlobalContext();
struct Data;

static inline void SetGlobalDeviceTarget(const std::string &device_target);
static inline std::string GetGlobalDeviceTarget();
DeviceInfoContext();
virtual ~DeviceInfoContext() = default;
virtual enum DeviceType GetDeviceType() const = 0;

static void SetGlobalDeviceID(const uint32_t &device_id);
static uint32_t GetGlobalDeviceID();
template <class T>
std::shared_ptr<T> Cast() {
static_assert(std::is_base_of<DeviceInfoContext, T>::value, "Wrong cast type.");
if (GetDeviceType() != T().GetDeviceType()) {
return nullptr;
}

private:
// api without std::string
static void SetGlobalDeviceTarget(const std::vector<char> &device_target);
static std::vector<char> GetGlobalDeviceTargetChar();
return std::static_pointer_cast<T>(shared_from_this());
}

protected:
std::shared_ptr<Data> data_;
};

class MS_API CPUDeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kCPU; };

/// \brief Set the thread affinity of CPU cores.
///
/// \param mode: 0: no affinities, 1: big cores first, 2: little cores first
void SetThreadAffinity(int mode);
int GetThreadAffinity() const;
void SetEnableFP16(bool is_fp16);
bool GetEnableFP16() const;
};

class MS_API MaliGPUDeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kMaliGPU; };

void SetEnableFP16(bool is_fp16);
bool GetEnableFP16() const;
};

class MS_API KirinNPUDeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kKirinNPU; };

void SetFrequency(int frequency);
int GetFrequency() const;
};

class MS_API NvidiaGPUDeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kNvidiaGPU; };

void SetDeviceID(uint32_t device_id);
uint32_t GetDeviceID() const;

void SetGpuTrtInferMode(bool gpu_trt_infer_mode);
bool GetGpuTrtInferMode() const;
};

class MS_API Ascend910DeviceInfo : public DeviceInfoContext {
public:
enum DeviceType GetDeviceType() const override { return DeviceType::kAscend910; };

void SetDeviceID(uint32_t device_id);
uint32_t GetDeviceID() const;
};

struct MS_API ModelContext : public Context {
class MS_API Ascend310DeviceInfo : public DeviceInfoContext {
public:
static inline void SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::string &cfg_path);
static inline std::string GetInsertOpConfigPath(const std::shared_ptr<Context> &context);
enum DeviceType GetDeviceType() const override { return DeviceType::kAscend310; };

void SetDeviceID(uint32_t device_id);
uint32_t GetDeviceID() const;

inline void SetDumpConfigPath(const std::string &cfg_path);
inline std::string GetDumpConfigPath() const;

inline void SetInsertOpConfigPath(const std::string &cfg_path);
inline std::string GetInsertOpConfigPath() const;

inline void SetInputFormat(const std::string &format);
inline std::string GetInputFormat() const;

inline void SetInputShape(const std::string &shape);
inline std::string GetInputShape() const;

static inline void SetInputFormat(const std::shared_ptr<Context> &context, const std::string &format);
static inline std::string GetInputFormat(const std::shared_ptr<Context> &context);
void SetInputShapeMap(const std::map<int, std::vector<int>> &shape);
std::map<int, std::vector<int>> GetInputShapeMap() const;

static inline void SetInputShape(const std::shared_ptr<Context> &context, const std::string &shape);
static inline std::string GetInputShape(const std::shared_ptr<Context> &context);
void SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size);
inline std::string GetDynamicBatchSize() const;

static void SetOutputType(const std::shared_ptr<Context> &context, enum DataType output_type);
static enum DataType GetOutputType(const std::shared_ptr<Context> &context);
void SetOutputType(enum DataType output_type);
enum DataType GetOutputType() const;

static inline void SetPrecisionMode(const std::shared_ptr<Context> &context, const std::string &precision_mode);
static inline std::string GetPrecisionMode(const std::shared_ptr<Context> &context);
inline void SetPrecisionMode(const std::string &precision_mode);
inline std::string GetPrecisionMode() const;

static inline void SetOpSelectImplMode(const std::shared_ptr<Context> &context,
const std::string &op_select_impl_mode);
static inline std::string GetOpSelectImplMode(const std::shared_ptr<Context> &context);
inline void SetOpSelectImplMode(const std::string &op_select_impl_mode);
inline std::string GetOpSelectImplMode() const;

static inline void SetGpuTrtInferMode(const std::shared_ptr<Context> &context, const std::string &gpu_trt_infer_mode);
static inline std::string GetGpuTrtInferMode(const std::shared_ptr<Context> &context);
inline void SetFusionSwitchConfigPath(const std::string &cfg_path);
inline std::string GetFusionSwitchConfigPath() const;

private:
// api without std::string
static void SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::vector<char> &cfg_path);
static std::vector<char> GetInsertOpConfigPathChar(const std::shared_ptr<Context> &context);
void SetDumpConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetDumpConfigPathChar() const;

static void SetInputFormat(const std::shared_ptr<Context> &context, const std::vector<char> &format);
static std::vector<char> GetInputFormatChar(const std::shared_ptr<Context> &context);
void SetInsertOpConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetInsertOpConfigPathChar() const;

static void SetInputShape(const std::shared_ptr<Context> &context, const std::vector<char> &shape);
static std::vector<char> GetInputShapeChar(const std::shared_ptr<Context> &context);
void SetInputFormat(const std::vector<char> &format);
std::vector<char> GetInputFormatChar() const;

static void SetPrecisionMode(const std::shared_ptr<Context> &context, const std::vector<char> &precision_mode);
static std::vector<char> GetPrecisionModeChar(const std::shared_ptr<Context> &context);
void SetInputShape(const std::vector<char> &shape);
std::vector<char> GetInputShapeChar() const;

static void SetOpSelectImplMode(const std::shared_ptr<Context> &context,
const std::vector<char> &op_select_impl_mode);
static std::vector<char> GetOpSelectImplModeChar(const std::shared_ptr<Context> &context);
std::vector<char> GetDynamicBatchSizeChar() const;

static void SetGpuTrtInferMode(const std::shared_ptr<Context> &context, const std::vector<char> &gpu_trt_infer_mode);
static std::vector<char> GetGpuTrtInferModeChar(const std::shared_ptr<Context> &context);
void SetPrecisionMode(const std::vector<char> &precision_mode);
std::vector<char> GetPrecisionModeChar() const;

void SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode);
std::vector<char> GetOpSelectImplModeChar() const;

void SetFusionSwitchConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetFusionSwitchConfigPathChar() const;
};

void GlobalContext::SetGlobalDeviceTarget(const std::string &device_target) {
SetGlobalDeviceTarget(StringToChar(device_target));
}
std::string GlobalContext::GetGlobalDeviceTarget() { return CharToString(GetGlobalDeviceTargetChar()); }
void Ascend310DeviceInfo::SetDumpConfigPath(const std::string &cfg_path) { SetDumpConfigPath(StringToChar(cfg_path)); }
std::string Ascend310DeviceInfo::GetDumpConfigPath() const { return CharToString(GetDumpConfigPathChar()); }

void ModelContext::SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::string &cfg_path) {
SetInsertOpConfigPath(context, StringToChar(cfg_path));
}
std::string ModelContext::GetInsertOpConfigPath(const std::shared_ptr<Context> &context) {
return CharToString(GetInsertOpConfigPathChar(context));
void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::string &cfg_path) {
SetInsertOpConfigPath(StringToChar(cfg_path));
}
std::string Ascend310DeviceInfo::GetInsertOpConfigPath() const { return CharToString(GetInsertOpConfigPathChar()); }

void ModelContext::SetInputFormat(const std::shared_ptr<Context> &context, const std::string &format) {
SetInputFormat(context, StringToChar(format));
}
std::string ModelContext::GetInputFormat(const std::shared_ptr<Context> &context) {
return CharToString(GetInputFormatChar(context));
}
void Ascend310DeviceInfo::SetInputFormat(const std::string &format) { SetInputFormat(StringToChar(format)); }
std::string Ascend310DeviceInfo::GetInputFormat() const { return CharToString(GetInputFormatChar()); }

void ModelContext::SetInputShape(const std::shared_ptr<Context> &context, const std::string &shape) {
SetInputShape(context, StringToChar(shape));
}
std::string ModelContext::GetInputShape(const std::shared_ptr<Context> &context) {
return CharToString(GetInputShapeChar(context));
}
void Ascend310DeviceInfo::SetInputShape(const std::string &shape) { SetInputShape(StringToChar(shape)); }
std::string Ascend310DeviceInfo::GetInputShape() const { return CharToString(GetInputShapeChar()); }

void ModelContext::SetPrecisionMode(const std::shared_ptr<Context> &context, const std::string &precision_mode) {
SetPrecisionMode(context, StringToChar(precision_mode));
}
std::string ModelContext::GetPrecisionMode(const std::shared_ptr<Context> &context) {
return CharToString(GetPrecisionModeChar(context));
}
std::string Ascend310DeviceInfo::GetDynamicBatchSize() const { return CharToString(GetDynamicBatchSizeChar()); }

void ModelContext::SetOpSelectImplMode(const std::shared_ptr<Context> &context,
const std::string &op_select_impl_mode) {
SetOpSelectImplMode(context, StringToChar(op_select_impl_mode));
void Ascend310DeviceInfo::SetPrecisionMode(const std::string &precision_mode) {
SetPrecisionMode(StringToChar(precision_mode));
}
std::string ModelContext::GetOpSelectImplMode(const std::shared_ptr<Context> &context) {
return CharToString(GetOpSelectImplModeChar(context));
std::string Ascend310DeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }

void Ascend310DeviceInfo::SetOpSelectImplMode(const std::string &op_select_impl_mode) {
SetOpSelectImplMode(StringToChar(op_select_impl_mode));
}
std::string Ascend310DeviceInfo::GetOpSelectImplMode() const { return CharToString(GetOpSelectImplModeChar()); }

void ModelContext::SetGpuTrtInferMode(const std::shared_ptr<Context> &context, const std::string &gpu_trt_infer_mode) {
SetGpuTrtInferMode(context, StringToChar(gpu_trt_infer_mode));
void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::string &cfg_path) {
SetFusionSwitchConfigPath(StringToChar(cfg_path));
}
std::string ModelContext::GetGpuTrtInferMode(const std::shared_ptr<Context> &context) {
return CharToString(GetGpuTrtInferModeChar(context));
std::string Ascend310DeviceInfo::GetFusionSwitchConfigPath() const {
return CharToString(GetFusionSwitchConfigPathChar());
}
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_CONTEXT_H

+ 138
- 0
tests/ut/stub/include/api/dual_abi_helper.h View File

@@ -16,11 +16,149 @@
#ifndef MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
#define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_

#include <algorithm>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>

namespace mindspore {
inline std::vector<char> StringToChar(const std::string &s) { return std::vector<char>(s.begin(), s.end()); }

inline std::string CharToString(const std::vector<char> &c) { return std::string(c.begin(), c.end()); }

inline std::optional<std::vector<char>> OptionalStringToChar(const std::optional<std::string> &s) {
if (s == std::nullopt) return std::nullopt;
std::optional<std::vector<char>> ret = std::vector<char>(s->begin(), s->end());
return ret;
}

inline std::optional<std::string> OptionalCharToString(const std::optional<std::vector<char>> &c) {
if (c == std::nullopt) return std::nullopt;
std::optional<std::string> ret = std::string(c->begin(), c->end());
return ret;
}

inline std::pair<std::vector<char>, int32_t> PairStringToChar(const std::pair<std::string, int32_t> &s) {
return std::pair<std::vector<char>, int32_t>(std::vector<char>(s.first.begin(), s.first.end()), s.second);
}

inline std::pair<std::string, int32_t> PairCharToString(const std::pair<std::vector<char>, int32_t> &c) {
return std::pair<std::string, int32_t>(std::string(c.first.begin(), c.first.end()), c.second);
}

inline std::vector<std::vector<char>> VectorStringToChar(const std::vector<std::string> &s) {
std::vector<std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::back_inserter(ret),
[](auto str) { return std::vector<char>(str.begin(), str.end()); });
return ret;
}

inline std::vector<std::string> VectorCharToString(const std::vector<std::vector<char>> &c) {
std::vector<std::string> ret;
std::transform(c.begin(), c.end(), std::back_inserter(ret),
[](auto ch) { return std::string(ch.begin(), ch.end()); });
return ret;
}

inline std::set<std::vector<char>> SetStringToChar(const std::set<std::string> &s) {
std::set<std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()),
[](auto str) { return std::vector<char>(str.begin(), str.end()); });
return ret;
}

inline std::set<std::string> SetCharToString(const std::set<std::vector<char>> &c) {
std::set<std::string> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()),
[](auto ch) { return std::string(ch.begin(), ch.end()); });
return ret;
}

inline std::map<std::vector<char>, int32_t> MapStringToChar(const std::map<std::string, int32_t> &s) {
std::map<std::vector<char>, int32_t> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, int32_t>(std::vector<char>(str.first.begin(), str.first.end()), str.second);
});
return ret;
}

inline std::map<std::string, int32_t> MapCharToString(const std::map<std::vector<char>, int32_t> &c) {
std::map<std::string, int32_t> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, int32_t>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}

inline std::map<std::vector<char>, std::vector<char>> UnorderedMapStringToChar(
const std::unordered_map<std::string, std::string> &s) {
std::map<std::vector<char>, std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, std::vector<char>>(std::vector<char>(str.first.begin(), str.first.end()),
std::vector<char>(str.second.begin(), str.second.end()));
});
return ret;
}

inline std::unordered_map<std::string, std::string> UnorderedMapCharToString(
const std::map<std::vector<char>, std::vector<char>> &c) {
std::unordered_map<std::string, std::string> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, std::string>(std::string(ch.first.begin(), ch.first.end()),
std::string(ch.second.begin(), ch.second.end()));
});
return ret;
}

inline std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ClassIndexStringToChar(
const std::vector<std::pair<std::string, std::vector<int32_t>>> &s) {
std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ret;
std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) {
return std::pair<std::vector<char>, std::vector<int32_t>>(std::vector<char>(str.first.begin(), str.first.end()),
str.second);
});
return ret;
}

inline std::vector<std::pair<std::string, std::vector<int32_t>>> ClassIndexCharToString(
const std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> &c) {
std::vector<std::pair<std::string, std::vector<int32_t>>> ret;
std::transform(c.begin(), c.end(), std::back_inserter(ret), [](auto ch) {
return std::pair<std::string, std::vector<int32_t>>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}

template <class T>
inline std::map<std::vector<char>, T> PadInfoStringToChar(const std::map<std::string, T> &s_pad_info) {
std::map<std::vector<char>, T> ret;
std::transform(s_pad_info.begin(), s_pad_info.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, T>(std::vector<char>(str.first.begin(), str.first.end()), str.second);
});
return ret;
}

template <class T>
inline std::map<std::string, T> PadInfoCharToString(const std::map<std::vector<char>, T> &c_pad_info) {
std::map<std::string, T> ret;
std::transform(c_pad_info.begin(), c_pad_info.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, T>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}

template <class T>
inline void TensorMapCharToString(const std::map<std::vector<char>, T> *c, std::unordered_map<std::string, T> *s) {
for (auto ch : *c) {
auto key = std::string(ch.first.begin(), ch.first.end());
auto val = ch.second;
s->insert(std::pair<std::string, T>(key, val));
}
}
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_

+ 2
- 0
tests/ut/stub/include/api/graph.h View File

@@ -27,6 +27,7 @@ namespace mindspore {
class MS_API Graph {
public:
class GraphData;
Graph();
explicit Graph(const std::shared_ptr<GraphData> &graph_data);
explicit Graph(std::shared_ptr<GraphData> &&graph_data);
explicit Graph(std::nullptr_t);
@@ -34,6 +35,7 @@ class MS_API Graph {

enum ModelType ModelType() const;
bool operator==(std::nullptr_t) const;
bool operator!=(std::nullptr_t) const;

private:
friend class GraphCell;


+ 21
- 8
tests/ut/stub/include/api/model.h View File

@@ -24,39 +24,52 @@
#include "include/api/status.h"
#include "include/api/types.h"
#include "include/api/graph.h"
#include "include/api/context.h"
#include "include/api/cell.h"
#include "include/api/dual_abi_helper.h"

namespace mindspore {
class ModelImpl;
struct Context;

class MS_API Model {
public:
explicit Model(const std::vector<Output> &network, const std::shared_ptr<Context> &model_context = nullptr);
explicit Model(const GraphCell &graph, const std::shared_ptr<Context> &model_context = nullptr);
Model();
~Model();
Model(const Model &) = delete;
void operator=(const Model &) = delete;

Status Build();
Status Build(GraphCell graph, const std::shared_ptr<Context> &model_context = nullptr);
Status Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims);

Status Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs);

std::vector<MSTensor> GetInputs();
inline MSTensor GetInputByTensorName(const std::string &tensor_name);

std::vector<MSTensor> GetOutputs();
inline std::vector<std::string> GetOutputTensorNames();
inline MSTensor GetOutputByTensorName(const std::string &tensor_name);

static inline bool CheckModelSupport(const std::string &device_type, ModelType model_type);
static bool CheckModelSupport(enum DeviceType device_type, ModelType model_type);

private:
// api without std::string
static bool CheckModelSupport(const std::vector<char> &device_type, ModelType model_type);
MSTensor GetInputByTensorName(const std::vector<char> &tensor_name);
std::vector<std::vector<char>> GetOutputTensorNamesChar();
MSTensor GetOutputByTensorName(const std::vector<char> &tensor_name);
std::vector<MSTensor> GetOutputsByNodeName(const std::vector<char> &node_name);

std::shared_ptr<ModelImpl> impl_;
};

bool Model::CheckModelSupport(const std::string &device_type, ModelType model_type) {
return CheckModelSupport(StringToChar(device_type), model_type);
MSTensor Model::GetInputByTensorName(const std::string &tensor_name) {
return GetInputByTensorName(StringToChar(tensor_name));
}

std::vector<std::string> Model::GetOutputTensorNames() { return VectorCharToString(GetOutputTensorNamesChar()); }

MSTensor Model::GetOutputByTensorName(const std::string &tensor_name) {
return GetOutputByTensorName(StringToChar(tensor_name));
}
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_MODEL_H

+ 5
- 5
tests/ut/stub/include/api/serialization.h View File

@@ -29,19 +29,19 @@
namespace mindspore {
class MS_API Serialization {
public:
static Graph LoadModel(const void *model_data, size_t data_size, ModelType model_type);
inline static Graph LoadModel(const std::string &file, ModelType model_type);
static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph);
inline static Status Load(const std::string &file, ModelType model_type, Graph *graph);
static Status LoadCheckPoint(const std::string &ckpt_file, std::map<std::string, Buffer> *parameters);
static Status SetParameters(const std::map<std::string, Buffer> &parameters, Model *model);
static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data);
static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file);

private:
static Graph LoadModel(const std::vector<char> &file, ModelType model_type);
static Status Load(const std::vector<char> &file, ModelType model_type, Graph *graph);
};

Graph Serialization::LoadModel(const std::string &file, ModelType model_type) {
return LoadModel(StringToChar(file), model_type);
Status Serialization::Load(const std::string &file, ModelType model_type, Graph *graph) {
return Load(StringToChar(file), model_type, graph);
}
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H

+ 29
- 14
tests/ut/stub/include/api/types.h View File

@@ -43,15 +43,19 @@ class MS_API MSTensor {
public:
class Impl;

static inline MSTensor CreateTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor CreateRefTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor *CreateTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor *CreateRefTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor *StringsToTensor(const std::string &name, const std::vector<std::string> &str);
static inline std::vector<std::string> TensorToStrings(const MSTensor &tensor);
static void DestroyTensorPtr(MSTensor *tensor) noexcept;

MSTensor();
explicit MSTensor(const std::shared_ptr<Impl> &impl);
inline MSTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data,
size_t data_len);
explicit MSTensor(std::nullptr_t);
~MSTensor();

inline std::string Name() const;
@@ -65,21 +69,24 @@ class MS_API MSTensor {

bool IsDevice() const;

MSTensor Clone() const;
MSTensor *Clone() const;
bool operator==(std::nullptr_t) const;
bool operator!=(std::nullptr_t) const;

private:
// api without std::string
static MSTensor CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor *CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor *CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor *CharStringsToTensor(const std::vector<char> &name, const std::vector<std::vector<char>> &str);
static std::vector<std::vector<char>> TensorToStringChars(const MSTensor &tensor);

MSTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,
size_t data_len);
std::vector<char> CharName() const;

friend class ModelImpl;
explicit MSTensor(std::nullptr_t);
std::shared_ptr<Impl> impl_;
};

@@ -121,16 +128,24 @@ class MS_API Buffer {
std::shared_ptr<Impl> impl_;
};

MSTensor MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
MSTensor *MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
return CreateTensor(StringToChar(name), type, shape, data, data_len);
}

MSTensor MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
MSTensor *MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
return CreateRefTensor(StringToChar(name), type, shape, data, data_len);
}

MSTensor *MSTensor::StringsToTensor(const std::string &name, const std::vector<std::string> &str) {
return CharStringsToTensor(StringToChar(name), VectorStringToChar(str));
}

std::vector<std::string> MSTensor::TensorToStrings(const MSTensor &tensor) {
return VectorCharToString(TensorToStringChars(tensor));
}

MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,
size_t data_len)
: MSTensor(StringToChar(name), type, shape, data, data_len) {}


+ 1
- 1
third_party/mindspore

@@ -1 +1 @@
Subproject commit 6b8bef2c8afe3f9890cee8e866771dd4b1d23d16
Subproject commit 340583367f22313cf1d4ca8252e366c803f4e907

Loading…
Cancel
Save