Browse Source

demo add annotation & fix gpu codex

pull/14838/head
yeyunpeng2020 5 years ago
parent
commit
b064ed864d
4 changed files with 14 additions and 5 deletions
  1. +4
    -0
      mindspore/lite/examples/quick_start_cpp/main.cc
  2. +2
    -0
      mindspore/lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java
  3. +6
    -3
      mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc
  4. +2
    -2
      mindspore/lite/src/runtime/kernel/opencl/utils.cc

+ 4
- 0
mindspore/lite/examples/quick_start_cpp/main.cc View File

@@ -107,18 +107,22 @@ int GenerateInputDataWithRandom(std::vector<mindspore::tensor::MSTensor *> input

int Run(mindspore::session::LiteSession *session) {
auto inputs = session->GetInputs();

// Generate random data as input data.
auto ret = GenerateInputDataWithRandom(inputs);
if (ret != mindspore::lite::RET_OK) {
std::cerr << "Generate Random Input Data failed." << std::endl;
return ret;
}

// Run Inference.
ret = session->RunGraph();
if (ret != mindspore::lite::RET_OK) {
std::cerr << "Inference error " << ret << std::endl;
return ret;
}

// Get Output Tensor Data.
auto out_tensors = session->GetOutputs();
for (auto tensor : out_tensors) {
std::cout << "tensor name is:" << tensor.first << " tensor size is:" << tensor.second->Size()


+ 2
- 0
mindspore/lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java View File

@@ -39,6 +39,8 @@ public class Main {

private static boolean compile() {
MSConfig msConfig = new MSConfig();
// You can set config through Init Api or use the default parameters directly.
// The default parameter is that the backend type is DeviceType.DT_CPU, and the number of threads is 2.
boolean ret = msConfig.init(DeviceType.DT_CPU, 2);
if (!ret) {
System.err.println("Init context failed");


+ 6
- 3
mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc View File

@@ -65,10 +65,13 @@ int OneHotOpenCLKernel::Prepare() {

int OneHotOpenCLKernel::InitWeights() {
depth_ = static_cast<int32_t *>(in_tensors_[1]->data_c())[0];
if (in_tensors_.size() > 2) {
on_value_ = static_cast<float *>(in_tensors_[2]->data_c())[0];
// inputs num is 3 or 4.
if (in_tensors_.size() == 3) { // onnx
off_value_ = static_cast<float *>(in_tensors_[2]->data_c())[0];
on_value_ = static_cast<float *>(in_tensors_[2]->data_c())[1];
}
if (in_tensors_.size() > 3) {
if (in_tensors_.size() == 4) { // tf
on_value_ = static_cast<float *>(in_tensors_[2]->data_c())[0];
off_value_ = static_cast<float *>(in_tensors_[3]->data_c())[0];
}
return RET_OK;


+ 2
- 2
mindspore/lite/src/runtime/kernel/opencl/utils.cc View File

@@ -300,7 +300,7 @@ static std::set<void *> tmp_weights;

void StoreTmpWeight(lite::Tensor *tensor) {
MS_LOG(WARNING) << "store weight when kernel don't infer shape!";
if (tensor && tensor->data_c() && tensor->Size()) {
if (tensor != nullptr && tensor->data_c() != nullptr && tensor->Size() > 0) {
void *new_data = malloc(tensor->Size());
MS_ASSERT(new_data);
if (new_data == nullptr) {
@@ -314,8 +314,8 @@ void StoreTmpWeight(lite::Tensor *tensor) {

void FreeTmpWeight(void *data) {
if (tmp_weights.count(data)) {
free(data);
tmp_weights.erase(data);
free(data);
}
}



Loading…
Cancel
Save