From b064ed864d1dc196ae97b33dc7860a418cff516e Mon Sep 17 00:00:00 2001 From: yeyunpeng2020 Date: Fri, 9 Apr 2021 15:57:33 +0800 Subject: [PATCH] demo add annotation & fix gpu codex --- mindspore/lite/examples/quick_start_cpp/main.cc | 4 ++++ .../src/main/java/com/mindspore/lite/demo/Main.java | 2 ++ .../lite/src/runtime/kernel/opencl/kernel/one_hot.cc | 9 ++++++--- mindspore/lite/src/runtime/kernel/opencl/utils.cc | 4 ++-- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/mindspore/lite/examples/quick_start_cpp/main.cc b/mindspore/lite/examples/quick_start_cpp/main.cc index 820ba60491..8262e9a6cd 100644 --- a/mindspore/lite/examples/quick_start_cpp/main.cc +++ b/mindspore/lite/examples/quick_start_cpp/main.cc @@ -107,18 +107,22 @@ int GenerateInputDataWithRandom(std::vector input int Run(mindspore::session::LiteSession *session) { auto inputs = session->GetInputs(); + + // Generate random data as input data. auto ret = GenerateInputDataWithRandom(inputs); if (ret != mindspore::lite::RET_OK) { std::cerr << "Generate Random Input Data failed." << std::endl; return ret; } + // Run Inference. ret = session->RunGraph(); if (ret != mindspore::lite::RET_OK) { std::cerr << "Inference error " << ret << std::endl; return ret; } + // Get Output Tensor Data. auto out_tensors = session->GetOutputs(); for (auto tensor : out_tensors) { std::cout << "tensor name is:" << tensor.first << " tensor size is:" << tensor.second->Size() diff --git a/mindspore/lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java b/mindspore/lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java index 9671f9e98c..a98100e917 100644 --- a/mindspore/lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java +++ b/mindspore/lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java @@ -39,6 +39,8 @@ public class Main { private static boolean compile() { MSConfig msConfig = new MSConfig(); + // You can set config through Init Api or use the default parameters directly. + // The default parameter is that the backend type is DeviceType.DT_CPU, and the number of threads is 2. boolean ret = msConfig.init(DeviceType.DT_CPU, 2); if (!ret) { System.err.println("Init context failed"); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc index 7a7cac3e47..48b77c72fc 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc @@ -65,10 +65,13 @@ int OneHotOpenCLKernel::Prepare() { int OneHotOpenCLKernel::InitWeights() { depth_ = static_cast(in_tensors_[1]->data_c())[0]; - if (in_tensors_.size() > 2) { - on_value_ = static_cast(in_tensors_[2]->data_c())[0]; + // inputs num is 3 or 4. + if (in_tensors_.size() == 3) { // onnx + off_value_ = static_cast(in_tensors_[2]->data_c())[0]; + on_value_ = static_cast(in_tensors_[2]->data_c())[1]; } - if (in_tensors_.size() > 3) { + if (in_tensors_.size() == 4) { // tf + on_value_ = static_cast(in_tensors_[2]->data_c())[0]; off_value_ = static_cast(in_tensors_[3]->data_c())[0]; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/utils.cc b/mindspore/lite/src/runtime/kernel/opencl/utils.cc index fdbde0be1f..0abc134cfb 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/utils.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/utils.cc @@ -300,7 +300,7 @@ static std::set tmp_weights; void StoreTmpWeight(lite::Tensor *tensor) { MS_LOG(WARNING) << "store weight when kernel don't infer shape!"; - if (tensor && tensor->data_c() && tensor->Size()) { + if (tensor != nullptr && tensor->data_c() != nullptr && tensor->Size() > 0) { void *new_data = malloc(tensor->Size()); MS_ASSERT(new_data); if (new_data == nullptr) { @@ -314,8 +314,8 @@ void StoreTmpWeight(lite::Tensor *tensor) { void FreeTmpWeight(void *data) { if (tmp_weights.count(data)) { - free(data); tmp_weights.erase(data); + free(data); } }