You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tensor_add.cc 2.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <string>
  17. #include <vector>
  18. #include "common/common_test.h"
  19. #include "include/api/model.h"
  20. #include "include/api/serialization.h"
  21. #include "include/api/context.h"
  22. using namespace mindspore;
  23. static const char tensor_add_file[] = "/home/workspace/mindspore_dataset/mindir/add/add.mindir";
  24. static const std::vector<float> input_data_1 = {1, 2, 3, 4};
  25. static const std::vector<float> input_data_2 = {2, 3, 4, 5};
  26. class TestAdd : public ST::Common {
  27. public:
  28. TestAdd() {}
  29. };
  30. TEST_F(TestAdd, InferMindIR) {
  31. ContextAutoSet();
  32. auto graph = Serialization::LoadModel(tensor_add_file, ModelType::kMindIR);
  33. Model tensor_add((GraphCell(graph)));
  34. ASSERT_TRUE(tensor_add.Build() == kSuccess);
  35. // get model inputs
  36. std::vector<MSTensor> origin_inputs = tensor_add.GetInputs();
  37. ASSERT_EQ(origin_inputs.size(), 2);
  38. // prepare input
  39. std::vector<MSTensor> outputs;
  40. std::vector<MSTensor> inputs;
  41. inputs.emplace_back(origin_inputs[0].Name(), origin_inputs[0].DataType(), origin_inputs[0].Shape(),
  42. input_data_1.data(), sizeof(float) * input_data_1.size());
  43. inputs.emplace_back(origin_inputs[1].Name(), origin_inputs[1].DataType(), origin_inputs[1].Shape(),
  44. input_data_2.data(), sizeof(float) * input_data_2.size());
  45. // infer
  46. ASSERT_TRUE(tensor_add.Predict(inputs, &outputs) == kSuccess);
  47. // assert input
  48. inputs = tensor_add.GetInputs();
  49. ASSERT_EQ(inputs.size(), 2);
  50. auto after_input_data_1 = inputs[0].Data();
  51. auto after_input_data_2 = inputs[1].Data();
  52. const float *p = reinterpret_cast<const float *>(after_input_data_1.get());
  53. for (size_t i = 0; i < inputs[0].DataSize() / sizeof(float); ++i) {
  54. ASSERT_LE(std::abs(p[i] - input_data_1[i]), 1e-4);
  55. }
  56. p = reinterpret_cast<const float *>(after_input_data_2.get());
  57. for (size_t i = 0; i < inputs[0].DataSize() / sizeof(float); ++i) {
  58. ASSERT_LE(std::abs(p[i] - input_data_2[i]), 1e-4);
  59. }
  60. // assert output
  61. for (auto &buffer : outputs) {
  62. auto buffer_data = buffer.Data();
  63. p = reinterpret_cast<const float *>(buffer_data.get());
  64. for (size_t i = 0; i < buffer.DataSize() / sizeof(float); ++i) {
  65. ASSERT_LE(std::abs(p[i] - (input_data_1[i] + input_data_2[i])), 1e-4);
  66. }
  67. }
  68. }