You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

http_process.cc 20 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <map>
  17. #include <vector>
  18. #include <string>
  19. #include <functional>
  20. #include <utility>
  21. #include <nlohmann/json.hpp>
  22. #include "serving/ms_service.pb.h"
  23. #include "util/status.h"
  24. #include "core/session.h"
  25. #include "core/http_process.h"
  26. #include "core/serving_tensor.h"
  27. using ms_serving::MSService;
  28. using ms_serving::PredictReply;
  29. using ms_serving::PredictRequest;
  30. using nlohmann::json;
  31. namespace mindspore {
  32. namespace serving {
  33. const int BUF_MAX = 0x7FFFFFFF;
  34. static constexpr char HTTP_DATA[] = "data";
  35. static constexpr char HTTP_TENSOR[] = "tensor";
  36. enum HTTP_TYPE { TYPE_DATA = 0, TYPE_TENSOR };
  37. enum HTTP_DATA_TYPE { HTTP_DATA_NONE, HTTP_DATA_INT, HTTP_DATA_FLOAT };
  38. static const std::map<inference::DataType, HTTP_DATA_TYPE> infer_type2_http_type{
  39. {inference::DataType::kMSI_Int32, HTTP_DATA_INT}, {inference::DataType::kMSI_Float32, HTTP_DATA_FLOAT}};
  40. Status GetPostMessage(struct evhttp_request *const req, std::string *const buf) {
  41. Status status(SUCCESS);
  42. size_t post_size = evbuffer_get_length(req->input_buffer);
  43. if (post_size == 0) {
  44. ERROR_INFER_STATUS(status, INVALID_INPUTS, "http message invalid");
  45. return status;
  46. } else if (post_size > BUF_MAX) {
  47. ERROR_INFER_STATUS(status, INVALID_INPUTS, "http message is bigger than 0x7FFFFFFF.");
  48. return status;
  49. } else {
  50. buf->resize(post_size);
  51. memcpy_s(buf->data(), post_size, evbuffer_pullup(req->input_buffer, -1), post_size);
  52. return status;
  53. }
  54. }
  55. Status CheckRequestValid(const struct evhttp_request *const http_request) {
  56. Status status(SUCCESS);
  57. switch (evhttp_request_get_command(http_request)) {
  58. case EVHTTP_REQ_POST:
  59. return status;
  60. default:
  61. ERROR_INFER_STATUS(status, INVALID_INPUTS, "http message only support POST right now");
  62. return status;
  63. }
  64. }
  65. void ErrorMessage(struct evhttp_request *const req, Status status) {
  66. json error_json = {{"error_message", status.StatusMessage()}};
  67. std::string out_error_str = error_json.dump();
  68. struct evbuffer *retbuff = evbuffer_new();
  69. evbuffer_add(retbuff, out_error_str.data(), out_error_str.size());
  70. evhttp_send_reply(req, HTTP_OK, "Client", retbuff);
  71. evbuffer_free(retbuff);
  72. }
  73. Status CheckMessageValid(const json &message_info, HTTP_TYPE *const type) {
  74. Status status(SUCCESS);
  75. int count = 0;
  76. if (message_info.find(HTTP_DATA) != message_info.end()) {
  77. *type = TYPE_DATA;
  78. count++;
  79. }
  80. if (message_info.find(HTTP_TENSOR) != message_info.end()) {
  81. *type = TYPE_TENSOR;
  82. count++;
  83. }
  84. if (count != 1) {
  85. ERROR_INFER_STATUS(status, INVALID_INPUTS, "http message must have only one type of (data, tensor)");
  86. return status;
  87. }
  88. return status;
  89. }
  90. std::vector<int64_t> GetJsonArrayShape(const json &json_array) {
  91. std::vector<int64_t> json_shape;
  92. const json *tmp_json = &json_array;
  93. while (tmp_json->is_array()) {
  94. if (tmp_json->empty()) {
  95. break;
  96. }
  97. json_shape.push_back(tmp_json->size());
  98. tmp_json = &tmp_json->at(0);
  99. }
  100. return json_shape;
  101. }
  102. Status GetScalarDataFromJson(const json &json_data_array, ServingTensor *const request_tensor, HTTP_DATA_TYPE type) {
  103. Status status(SUCCESS);
  104. auto type_name = [](const json &json_data) -> std::string {
  105. if (json_data.is_number_integer()) {
  106. return "integer";
  107. } else if (json_data.is_number_float()) {
  108. return "float";
  109. }
  110. return json_data.type_name();
  111. };
  112. const json *json_data = &json_data_array;
  113. if (json_data_array.is_array()) {
  114. if (json_data_array.size() != 1 || json_data_array[0].is_array()) {
  115. status = INFER_STATUS(INVALID_INPUTS) << "get data failed, expected scalar data is scalar or shape(1) array, "
  116. "now array shape is "
  117. << GetJsonArrayShape(json_data_array);
  118. MSI_LOG_ERROR << status.StatusMessage();
  119. return status;
  120. }
  121. json_data = &json_data_array.at(0);
  122. }
  123. if (type == HTTP_DATA_INT) {
  124. auto data = reinterpret_cast<int32_t *>(request_tensor->mutable_data());
  125. if (!json_data->is_number_integer()) {
  126. status = INFER_STATUS(INVALID_INPUTS) << "get data failed, expected integer, given " << type_name(*json_data);
  127. MSI_LOG_ERROR << status.StatusMessage();
  128. return status;
  129. }
  130. data[0] = json_data->get<int32_t>();
  131. } else if (type == HTTP_DATA_FLOAT) {
  132. auto data = reinterpret_cast<float *>(request_tensor->mutable_data());
  133. if (!json_data->is_number_float()) {
  134. status = INFER_STATUS(INVALID_INPUTS) << "get data failed, expected float, given " << type_name(*json_data);
  135. MSI_LOG_ERROR << status.StatusMessage();
  136. return status;
  137. }
  138. data[0] = json_data->get<float>();
  139. }
  140. return SUCCESS;
  141. }
  142. Status GetDataFromJson(const json &json_data_array, ServingTensor *const request_tensor, size_t data_index,
  143. HTTP_DATA_TYPE type) {
  144. Status status(SUCCESS);
  145. auto type_name = [](const json &json_data) -> std::string {
  146. if (json_data.is_number_integer()) {
  147. return "integer";
  148. } else if (json_data.is_number_float()) {
  149. return "float";
  150. }
  151. return json_data.type_name();
  152. };
  153. size_t array_size = json_data_array.size();
  154. if (type == HTTP_DATA_INT) {
  155. auto data = reinterpret_cast<int32_t *>(request_tensor->mutable_data()) + data_index;
  156. for (size_t k = 0; k < array_size; k++) {
  157. auto &json_data = json_data_array[k];
  158. if (!json_data.is_number_integer()) {
  159. status = INFER_STATUS(INVALID_INPUTS) << "get data failed, expected integer, given " << type_name(json_data);
  160. MSI_LOG_ERROR << status.StatusMessage();
  161. return status;
  162. }
  163. data[k] = json_data.get<int32_t>();
  164. }
  165. } else if (type == HTTP_DATA_FLOAT) {
  166. auto data = reinterpret_cast<float *>(request_tensor->mutable_data()) + data_index;
  167. for (size_t k = 0; k < array_size; k++) {
  168. auto &json_data = json_data_array[k];
  169. if (!json_data.is_number_float()) {
  170. status = INFER_STATUS(INVALID_INPUTS) << "get data failed, expected float, given " << type_name(json_data);
  171. MSI_LOG_ERROR << status.StatusMessage();
  172. return status;
  173. }
  174. data[k] = json_data.get<float>();
  175. }
  176. }
  177. return SUCCESS;
  178. }
  179. Status RecusiveGetTensor(const json &json_data, size_t depth, ServingTensor *const request_tensor, size_t data_index,
  180. HTTP_DATA_TYPE type) {
  181. Status status(SUCCESS);
  182. std::vector<int64_t> required_shape = request_tensor->shape();
  183. if (depth >= required_shape.size()) {
  184. status = INFER_STATUS(INVALID_INPUTS)
  185. << "input tensor shape dims is more than required dims " << required_shape.size();
  186. MSI_LOG_ERROR << status.StatusMessage();
  187. return status;
  188. }
  189. if (!json_data.is_array()) {
  190. ERROR_INFER_STATUS(status, INVALID_INPUTS, "the tensor is constructed illegally");
  191. return status;
  192. }
  193. if (json_data.size() != static_cast<size_t>(required_shape[depth])) {
  194. status = INFER_STATUS(INVALID_INPUTS)
  195. << "tensor format request is constructed illegally, input tensor shape dim " << depth
  196. << " not match, required " << required_shape[depth] << ", given " << json_data.size();
  197. MSI_LOG_ERROR << status.StatusMessage();
  198. return status;
  199. }
  200. if (depth + 1 < required_shape.size()) {
  201. size_t sub_element_cnt =
  202. std::accumulate(required_shape.begin() + depth + 1, required_shape.end(), 1LL, std::multiplies<size_t>());
  203. for (size_t k = 0; k < json_data.size(); k++) {
  204. status = RecusiveGetTensor(json_data[k], depth + 1, request_tensor, data_index + sub_element_cnt * k, type);
  205. if (status != SUCCESS) {
  206. return status;
  207. }
  208. }
  209. } else {
  210. status = GetDataFromJson(json_data, request_tensor, data_index, type);
  211. if (status != SUCCESS) {
  212. return status;
  213. }
  214. }
  215. return status;
  216. }
  217. Status TransDataToPredictRequest(const json &message_info, PredictRequest *const request) {
  218. Status status = SUCCESS;
  219. auto tensors = message_info.find(HTTP_DATA);
  220. if (tensors == message_info.end()) {
  221. ERROR_INFER_STATUS(status, INVALID_INPUTS, "http message do not have data type");
  222. return status;
  223. }
  224. if (!tensors->is_array()) {
  225. ERROR_INFER_STATUS(status, INVALID_INPUTS, "the input tensor list is not array");
  226. return status;
  227. }
  228. auto const &json_shape = GetJsonArrayShape(*tensors);
  229. if (json_shape.size() != 2) { // 2 is data format list deep
  230. status = INFER_STATUS(INVALID_INPUTS)
  231. << "the data format request is constructed illegally, expected list nesting depth 2, given "
  232. << json_shape.size();
  233. MSI_LOG_ERROR << status.StatusMessage();
  234. return status;
  235. }
  236. if (tensors->size() != static_cast<size_t>(request->data_size())) {
  237. status = INFER_STATUS(INVALID_INPUTS)
  238. << "model input count not match, model required " << request->data_size() << ", given " << tensors->size();
  239. MSI_LOG_ERROR << status.StatusMessage();
  240. return status;
  241. }
  242. for (size_t i = 0; i < tensors->size(); i++) {
  243. const auto &tensor = tensors->at(i);
  244. ServingTensor request_tensor(*(request->mutable_data(i)));
  245. auto iter = infer_type2_http_type.find(request_tensor.data_type());
  246. if (iter == infer_type2_http_type.end()) {
  247. ERROR_INFER_STATUS(status, FAILED, "the model input type is not supported right now");
  248. return status;
  249. }
  250. HTTP_DATA_TYPE type = iter->second;
  251. if (!tensor.is_array()) {
  252. ERROR_INFER_STATUS(status, INVALID_INPUTS, "the tensor is constructed illegally");
  253. return status;
  254. }
  255. if (tensor.empty()) {
  256. ERROR_INFER_STATUS(status, INVALID_INPUTS, "the input tensor is null");
  257. return status;
  258. }
  259. if (tensor.size() != static_cast<size_t>(request_tensor.ElementNum())) {
  260. status = INFER_STATUS(INVALID_INPUTS) << "input " << i << " element count not match, model required "
  261. << request_tensor.ElementNum() << ", given " << tensor.size();
  262. MSI_LOG_ERROR << status.StatusMessage();
  263. return status;
  264. }
  265. status = GetDataFromJson(tensor, &request_tensor, 0, type);
  266. if (status != SUCCESS) {
  267. return status;
  268. }
  269. }
  270. return SUCCESS;
  271. }
  272. Status TransTensorToPredictRequest(const json &message_info, PredictRequest *const request) {
  273. Status status(SUCCESS);
  274. auto tensors = message_info.find(HTTP_TENSOR);
  275. if (tensors == message_info.end()) {
  276. ERROR_INFER_STATUS(status, INVALID_INPUTS, "http message do not have tensor type");
  277. return status;
  278. }
  279. if (!tensors->is_array()) {
  280. ERROR_INFER_STATUS(status, INVALID_INPUTS, "the input tensor list is not array");
  281. return status;
  282. }
  283. if (tensors->size() != static_cast<size_t>(request->data_size())) {
  284. status =
  285. INFER_STATUS(INVALID_INPUTS)
  286. << "model input count not match or json tensor request is constructed illegally, model input count required "
  287. << request->data_size() << ", given " << tensors->size();
  288. MSI_LOG_ERROR << status.StatusMessage();
  289. return status;
  290. }
  291. for (size_t i = 0; i < tensors->size(); i++) {
  292. const auto &tensor = tensors->at(i);
  293. ServingTensor request_tensor(*(request->mutable_data(i)));
  294. auto iter = infer_type2_http_type.find(request_tensor.data_type());
  295. if (iter == infer_type2_http_type.end()) {
  296. ERROR_INFER_STATUS(status, FAILED, "the model input type is not supported right now");
  297. return status;
  298. }
  299. HTTP_DATA_TYPE type = iter->second;
  300. // check data shape
  301. auto const &json_shape = GetJsonArrayShape(tensor);
  302. auto is_scalar_shape = [](const std::vector<int64_t> &shape) {
  303. return shape.empty() || (shape.size() == 1 && shape[0] == 1);
  304. };
  305. if (is_scalar_shape(request_tensor.shape())) {
  306. return GetScalarDataFromJson(tensor, &request_tensor, type);
  307. } else {
  308. if (json_shape != request_tensor.shape()) { // data shape not match
  309. status = INFER_STATUS(INVALID_INPUTS) << "input " << i << " shape is invalid, expected "
  310. << request_tensor.shape() << ", given " << json_shape;
  311. MSI_LOG_ERROR << status.StatusMessage();
  312. return status;
  313. }
  314. size_t depth = 0;
  315. size_t data_index = 0;
  316. status = RecusiveGetTensor(tensor, depth, &request_tensor, data_index, type);
  317. if (status != SUCCESS) {
  318. MSI_LOG_ERROR << "Transfer tensor to predict request failed";
  319. return status;
  320. }
  321. }
  322. }
  323. return status;
  324. }
  325. Status TransHTTPMsgToPredictRequest(struct evhttp_request *const http_request, PredictRequest *const request,
  326. HTTP_TYPE *const type) {
  327. Status status = CheckRequestValid(http_request);
  328. if (status != SUCCESS) {
  329. return status;
  330. }
  331. std::string post_message;
  332. status = GetPostMessage(http_request, &post_message);
  333. if (status != SUCCESS) {
  334. return status;
  335. }
  336. // get model required shape
  337. std::vector<inference::InferTensor> tensor_list;
  338. status = Session::Instance().GetModelInputsInfo(tensor_list);
  339. if (status != SUCCESS) {
  340. ERROR_INFER_STATUS(status, FAILED, "get model inputs info failed");
  341. return status;
  342. }
  343. for (auto &item : tensor_list) {
  344. auto input = request->add_data();
  345. ServingTensor tensor(*input);
  346. tensor.set_shape(item.shape());
  347. tensor.set_data_type(item.data_type());
  348. int64_t element_num = tensor.ElementNum();
  349. int64_t data_type_size = tensor.GetTypeSize(tensor.data_type());
  350. if (element_num <= 0 || INT64_MAX / element_num < data_type_size) {
  351. ERROR_INFER_STATUS(status, FAILED, "model shape invalid");
  352. return status;
  353. }
  354. tensor.resize_data(element_num * data_type_size);
  355. }
  356. MSI_TIME_STAMP_START(ParseJson)
  357. json message_info;
  358. try {
  359. message_info = nlohmann::json::parse(post_message);
  360. } catch (nlohmann::json::exception &e) {
  361. std::string json_exception = e.what();
  362. std::string error_message = "Illegal JSON format." + json_exception;
  363. ERROR_INFER_STATUS(status, INVALID_INPUTS, error_message);
  364. return status;
  365. }
  366. MSI_TIME_STAMP_END(ParseJson)
  367. status = CheckMessageValid(message_info, type);
  368. if (status != SUCCESS) {
  369. return status;
  370. }
  371. switch (*type) {
  372. case TYPE_DATA:
  373. status = TransDataToPredictRequest(message_info, request);
  374. break;
  375. case TYPE_TENSOR:
  376. status = TransTensorToPredictRequest(message_info, request);
  377. break;
  378. default:
  379. ERROR_INFER_STATUS(status, INVALID_INPUTS, "http message must have only one type of (data, tensor)");
  380. return status;
  381. }
  382. return status;
  383. }
  384. Status GetJsonFromTensor(const ms_serving::Tensor &tensor, int len, int *const pos, json *const out_json) {
  385. Status status(SUCCESS);
  386. switch (tensor.tensor_type()) {
  387. case ms_serving::MS_INT32: {
  388. auto data = reinterpret_cast<const int *>(tensor.data().data()) + *pos;
  389. std::vector<int32_t> result_tensor(len);
  390. memcpy_s(result_tensor.data(), result_tensor.size() * sizeof(int32_t), data, len * sizeof(int32_t));
  391. *out_json = std::move(result_tensor);
  392. *pos += len;
  393. break;
  394. }
  395. case ms_serving::MS_FLOAT32: {
  396. auto data = reinterpret_cast<const float *>(tensor.data().data()) + *pos;
  397. std::vector<float> result_tensor(len);
  398. (void)memcpy_s(result_tensor.data(), result_tensor.size() * sizeof(float), data, len * sizeof(float));
  399. *out_json = std::move(result_tensor);
  400. *pos += len;
  401. break;
  402. }
  403. default:
  404. MSI_LOG(ERROR) << "the result type is not supported in restful api, type is " << tensor.tensor_type();
  405. ERROR_INFER_STATUS(status, FAILED, "reply have unsupported type");
  406. }
  407. return status;
  408. }
  409. Status TransPredictReplyToData(const PredictReply &reply, json *const out_json) {
  410. Status status(SUCCESS);
  411. for (int i = 0; i < reply.result_size(); i++) {
  412. (*out_json)["data"].push_back(json());
  413. json &tensor_json = (*out_json)["data"].back();
  414. int num = 1;
  415. for (auto j = 0; j < reply.result(i).tensor_shape().dims_size(); j++) {
  416. num *= reply.result(i).tensor_shape().dims(j);
  417. }
  418. int pos = 0;
  419. status = GetJsonFromTensor(reply.result(i), num, &pos, &tensor_json);
  420. if (status != SUCCESS) {
  421. return status;
  422. }
  423. }
  424. return status;
  425. }
  426. Status RecusiveGetJson(const ms_serving::Tensor &tensor, int depth, int *const pos, json *const out_json) {
  427. Status status(SUCCESS);
  428. if (depth >= 10) {
  429. ERROR_INFER_STATUS(status, FAILED, "result tensor shape dims is larger than 10");
  430. return status;
  431. }
  432. if (depth == tensor.tensor_shape().dims_size() - 1) {
  433. status = GetJsonFromTensor(tensor, tensor.tensor_shape().dims(depth), pos, out_json);
  434. if (status != SUCCESS) {
  435. return status;
  436. }
  437. } else {
  438. for (int i = 0; i < tensor.tensor_shape().dims(depth); i++) {
  439. out_json->push_back(json());
  440. json &tensor_json = out_json->back();
  441. status = RecusiveGetJson(tensor, depth + 1, pos, &tensor_json);
  442. if (status != SUCCESS) {
  443. return status;
  444. }
  445. }
  446. }
  447. return status;
  448. }
  449. Status TransPredictReplyToTensor(const PredictReply &reply, json *const out_json) {
  450. Status status(SUCCESS);
  451. for (int i = 0; i < reply.result_size(); i++) {
  452. (*out_json)["tensor"].push_back(json());
  453. json &tensor_json = (*out_json)["tensor"].back();
  454. int pos = 0;
  455. status = RecusiveGetJson(reply.result(i), 0, &pos, &tensor_json);
  456. if (status != SUCCESS) {
  457. return status;
  458. }
  459. }
  460. return status;
  461. }
  462. Status TransPredictReplyToHTTPMsg(const PredictReply &reply, const HTTP_TYPE &type, struct evbuffer *const buf) {
  463. Status status(SUCCESS);
  464. json out_json;
  465. switch (type) {
  466. case TYPE_DATA:
  467. status = TransPredictReplyToData(reply, &out_json);
  468. break;
  469. case TYPE_TENSOR:
  470. status = TransPredictReplyToTensor(reply, &out_json);
  471. break;
  472. default:
  473. ERROR_INFER_STATUS(status, FAILED, "http message must have only one type of (data, tensor)");
  474. return status;
  475. }
  476. const std::string &out_str = out_json.dump();
  477. evbuffer_add(buf, out_str.data(), out_str.size());
  478. return status;
  479. }
  480. Status HttpHandleMsgDetail(struct evhttp_request *const req, void *const arg, struct evbuffer *const retbuff) {
  481. PredictRequest request;
  482. PredictReply reply;
  483. HTTP_TYPE type;
  484. MSI_TIME_STAMP_START(ParseRequest)
  485. auto status = TransHTTPMsgToPredictRequest(req, &request, &type);
  486. MSI_TIME_STAMP_END(ParseRequest)
  487. if (status != SUCCESS) {
  488. MSI_LOG(ERROR) << "restful trans to request failed";
  489. return status;
  490. }
  491. MSI_TIME_STAMP_START(Predict)
  492. status = Session::Instance().Predict(request, reply);
  493. MSI_TIME_STAMP_END(Predict)
  494. if (status != SUCCESS) {
  495. MSI_LOG(ERROR) << "restful predict failed";
  496. return status;
  497. }
  498. MSI_TIME_STAMP_START(CreateReplyJson)
  499. status = TransPredictReplyToHTTPMsg(reply, type, retbuff);
  500. MSI_TIME_STAMP_END(CreateReplyJson)
  501. if (status != SUCCESS) {
  502. MSI_LOG(ERROR) << "restful trans to reply failed";
  503. return status;
  504. }
  505. return SUCCESS;
  506. }
  507. void http_handler_msg(struct evhttp_request *const req, void *const arg) {
  508. MSI_TIME_STAMP_START(TotalRestfulPredict)
  509. struct evbuffer *retbuff = evbuffer_new();
  510. if (retbuff == nullptr) {
  511. MSI_LOG_ERROR << "Create event buffer failed";
  512. return;
  513. }
  514. auto status = HttpHandleMsgDetail(req, arg, retbuff);
  515. if (status != SUCCESS) {
  516. ErrorMessage(req, status);
  517. evbuffer_free(retbuff);
  518. return;
  519. }
  520. MSI_TIME_STAMP_START(ReplyJson)
  521. evhttp_send_reply(req, HTTP_OK, "Client", retbuff);
  522. MSI_TIME_STAMP_END(ReplyJson)
  523. evbuffer_free(retbuff);
  524. MSI_TIME_STAMP_END(TotalRestfulPredict)
  525. }
  526. } // namespace serving
  527. } // namespace mindspore