You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cpu_kernel_runtime.cc 12 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "device/cpu/cpu_kernel_runtime.h"
  17. #include <string>
  18. #include <vector>
  19. #include <memory>
  20. #include <numeric>
  21. #include <utility>
  22. #include <functional>
  23. #include <unordered_map>
  24. #include "kernel/kernel.h"
  25. #include "device/cpu/cpu_device_address.h"
  26. #include "utils/context/ms_context.h"
  27. #include "utils/config_manager.h"
  28. #include "common/utils.h"
  29. #include "session/anf_runtime_algorithm.h"
  30. #include "session/session_basic.h"
  31. #include "operator/ops.h"
  32. namespace mindspore {
  33. namespace device {
  34. namespace cpu {
  35. const size_t INIT_NODE_REF = 1;
  36. namespace {
  37. TypeId GetCPUSupportOutputTypeId(const TypeId type_id) {
  38. TypeId support_type_id = type_id;
  39. if (type_id == kNumberTypeUInt32) {
  40. support_type_id = kNumberTypeInt32;
  41. }
  42. if (type_id == kNumberTypeFloat || type_id == kNumberTypeFloat16 || type_id == kNumberTypeFloat32 ||
  43. type_id == kNumberTypeFloat64) {
  44. support_type_id = kNumberTypeFloat32;
  45. }
  46. if (support_type_id != kNumberTypeInt32 && support_type_id != kNumberTypeFloat32) {
  47. MS_LOG(EXCEPTION) << "Check output type failed.";
  48. }
  49. return support_type_id;
  50. }
  51. } // namespace
  52. void CPUKernelRuntime::AssignKernelAddress(session::KernelGraph *kernel_graph) {
  53. AssignValueNodeAddress(kernel_graph);
  54. AssignInputNodeAddress(kernel_graph);
  55. AssignKernelOutputAddress(kernel_graph);
  56. resource_manager_.MemPlan(kernel_graph);
  57. resource_manager_.MemMalloc(kernel_graph);
  58. }
  59. void CPUKernelRuntime::AssignValueNodeAddress(session::KernelGraph *kernel_graph) {
  60. MS_EXCEPTION_IF_NULL(kernel_graph);
  61. size_t type_size = sizeof(float);
  62. for (auto &item_node : kernel_graph->graph_value_nodes()) {
  63. MS_EXCEPTION_IF_NULL(item_node);
  64. if (item_node->isa<ValueNode>()) {
  65. auto value_node = item_node->cast<ValueNodePtr>();
  66. MS_EXCEPTION_IF_NULL(value_node);
  67. auto node_value = value_node->value();
  68. MS_EXCEPTION_IF_NULL(node_value);
  69. if (!node_value->isa<tensor::Tensor>()) {
  70. continue;
  71. }
  72. auto tensor = node_value->cast<TensorPtr>();
  73. MS_EXCEPTION_IF_NULL(tensor);
  74. std::vector<int> data_shape = tensor->shape();
  75. size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multiplies<size_t>());
  76. DeviceAddressPtr address = CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, kNumberTypeFloat32);
  77. if (tensor->data_type() == kNumberTypeFloat32 || tensor->data_type() == kNumberTypeInt32) {
  78. address->ptr_ = tensor->data_c(false);
  79. } else {
  80. address->ptr_ = resource_manager_.MemMalloc(tensor_size);
  81. if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(),
  82. tensor->data_c(false))) {
  83. MS_LOG(EXCEPTION) << "Value node sync host to device failed!";
  84. }
  85. }
  86. address->ref_count_ = INIT_NODE_REF;
  87. AnfAlgo::SetOutputAddr(address, 0, item_node.get());
  88. }
  89. }
  90. }
  91. void CPUKernelRuntime::AssignInputNodeAddress(const session::KernelGraph *kernel_graph) {
  92. MS_EXCEPTION_IF_NULL(kernel_graph);
  93. size_t type_size = sizeof(float);
  94. for (auto &item : kernel_graph->inputs()) {
  95. MS_EXCEPTION_IF_NULL(item);
  96. if (item->isa<Parameter>()) {
  97. auto output_num = AnfAlgo::GetOutputTensorNum(item);
  98. for (size_t index = 0; index < output_num; index++) {
  99. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
  100. std::vector<size_t> fmt_shape = AnfAlgo::GetOutputDeviceShape(item, index);
  101. size_t tensor_size =
  102. fmt_shape.empty() ? type_size
  103. : std::accumulate(fmt_shape.begin(), fmt_shape.end(), type_size, std::multiplies<size_t>());
  104. auto format = AnfAlgo::GetOutputFormat(item, index);
  105. auto address = CreateDeviceAddress(nullptr, tensor_size, format, output_type_id);
  106. AnfAlgo::SetOutputAddr(address, index, item.get());
  107. }
  108. }
  109. }
  110. }
  111. void CPUKernelRuntime::AssignKernelOutputAddress(const session::KernelGraph *kernel_graph) {
  112. MS_EXCEPTION_IF_NULL(kernel_graph);
  113. auto kernels = kernel_graph->execution_order();
  114. for (auto &kernel : kernels) {
  115. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  116. MS_EXCEPTION_IF_NULL(kernel_mod);
  117. auto output_sizes = kernel_mod->GetOutputSizeList();
  118. for (size_t i = 0; i < output_sizes.size(); ++i) {
  119. auto output_format = AnfAlgo::GetOutputFormat(kernel, i);
  120. auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel, i);
  121. AnfAlgo::SetOutputAddr(CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type), i,
  122. kernel.get());
  123. }
  124. auto workspace_sizes = kernel_mod->GetWorkspaceSizeList();
  125. for (size_t i = 0; i < workspace_sizes.size(); ++i) {
  126. AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(nullptr, workspace_sizes[i], kOpFormat_DEFAULT, kNumberTypeFloat32),
  127. i, kernel.get());
  128. }
  129. }
  130. }
  131. DeviceAddressPtr CPUKernelRuntime::CreateDeviceAddress(void *device_ptr, size_t device_size, const string &format,
  132. TypeId type_id) {
  133. return std::make_shared<CPUDeviceAddress>(device_ptr, device_size, format, type_id);
  134. }
  135. BaseRef CPUKernelRuntime::CreatTensorForOutput(const AnfNodePtr &input_node, size_t index,
  136. const std::unordered_map<AnfNode *, tensor::TensorPtr> &input_map) {
  137. MS_EXCEPTION_IF_NULL(input_node);
  138. if (input_node->isa<CNode>() && AnfAlgo::GetCNodeName(input_node) == prim::kPrimMakeTuple->name()) {
  139. auto cnode = input_node->cast<CNodePtr>();
  140. MS_EXCEPTION_IF_NULL(cnode);
  141. VectorRef ret;
  142. for (size_t i = 1; i < cnode->inputs().size(); i++) {
  143. auto item_with_index = AnfAlgo::VisitKernelWithReturnType(cnode->input(i), 0);
  144. auto out = CreatTensorForOutput(item_with_index.first, item_with_index.second, input_map);
  145. ret.push_back(out);
  146. }
  147. return ret;
  148. }
  149. if (input_node->isa<CNode>()) {
  150. auto node = input_node->cast<CNodePtr>();
  151. MS_EXCEPTION_IF_NULL(node);
  152. size_t output_size = AnfAlgo::GetOutputTensorNum(node);
  153. if (index >= output_size) {
  154. MS_LOG(EXCEPTION) << "Invalid input index " << index;
  155. }
  156. auto address = AnfAlgo::GetMutableOutputAddr(node, index);
  157. MS_EXCEPTION_IF_NULL(address);
  158. auto shape = AnfAlgo::GetOutputInferShape(node, index);
  159. std::vector<int> temp_shape;
  160. (void)temp_shape.insert(temp_shape.end(), shape.begin(), shape.end());
  161. TypeId type_id = AnfAlgo::GetOutputInferDataType(node, index);
  162. type_id = GetCPUSupportOutputTypeId(type_id);
  163. tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, temp_shape);
  164. MS_EXCEPTION_IF_NULL(tensor);
  165. if (address->ref_count_ > 0 && address->ptr_ != nullptr) {
  166. tensor->set_device_address(address);
  167. } else {
  168. address->ptr_ = tensor->data_c(true);
  169. address->ref_count_ = INIT_NODE_REF;
  170. }
  171. tensor->set_dirty(false);
  172. return tensor;
  173. } else if (input_node->isa<Parameter>() || input_node->isa<ValueNode>()) {
  174. auto iter = input_map.find(input_node.get());
  175. if (iter != input_map.end()) {
  176. return iter->second;
  177. }
  178. }
  179. return BaseRef();
  180. }
  181. void CPUKernelRuntime::BindInputOutput(const session::KernelGraph *kernel_graph,
  182. const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
  183. MS_EXCEPTION_IF_NULL(kernel_graph);
  184. MS_EXCEPTION_IF_NULL(outputs);
  185. // bind input ptr
  186. auto &input_nodes = kernel_graph->inputs();
  187. if (input_nodes.size() != inputs.size()) {
  188. MS_LOG(EXCEPTION) << "Input size not equal to input node size!";
  189. }
  190. std::unordered_map<AnfNode *, tensor::TensorPtr> input_map;
  191. size_t input_idx = 0;
  192. size_t type_size = sizeof(float);
  193. for (auto &item : input_nodes) {
  194. MS_EXCEPTION_IF_NULL(item);
  195. input_map[item.get()] = inputs[input_idx];
  196. if (item->isa<Parameter>()) {
  197. auto address = AnfAlgo::GetMutableOutputAddr(item, 0);
  198. auto tensor = inputs[input_idx];
  199. auto tensor_address = tensor->device_address();
  200. MS_EXCEPTION_IF_NULL(address);
  201. MS_EXCEPTION_IF_NULL(tensor);
  202. if (tensor_address != nullptr && tensor_address != address) {
  203. (void)tensor->data_sync();
  204. }
  205. std::vector<int> data_shape = tensor->shape();
  206. size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multiplies<size_t>());
  207. if (tensor->data_type() == kNumberTypeFloat32 || tensor->data_type() == kNumberTypeInt32) {
  208. address->ptr_ = tensor->data_c(false);
  209. } else {
  210. address->ptr_ = resource_manager_.MemMalloc(tensor_size);
  211. if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(),
  212. tensor->data_c(false))) {
  213. MS_LOG(EXCEPTION) << "Parameter node sync host to device failed!";
  214. }
  215. tensor->set_dirty(true);
  216. }
  217. address->ref_count_ = INIT_NODE_REF;
  218. tensor->set_device_address(address);
  219. }
  220. input_idx++;
  221. }
  222. // new output and bind ptr
  223. auto output_nodes = kernel_graph->outputs();
  224. for (const auto &item : output_nodes) {
  225. auto item_with_index = AnfAlgo::VisitKernelWithReturnType(item, 0, true);
  226. auto out = CreatTensorForOutput(item_with_index.first, item_with_index.second, input_map);
  227. outputs->push_back(std::move(out));
  228. }
  229. }
  230. void CPUKernelRuntime::AddRuntimeAddress(DeviceAddress *address, std::vector<kernel::AddressPtr> *input_list) {
  231. MS_EXCEPTION_IF_NULL(address);
  232. kernel::AddressPtr input = std::make_shared<kernel::Address>();
  233. MS_EXCEPTION_IF_NULL(input);
  234. if (address->ptr_ == nullptr) {
  235. address->ptr_ = resource_manager_.MemMalloc(address->size_);
  236. }
  237. MS_EXCEPTION_IF_NULL(address->ptr_);
  238. input->addr = address->ptr_;
  239. input->size = address->size_;
  240. input_list->push_back(input);
  241. }
  242. void CPUKernelRuntime::IncreaseSummaryRefCount(const session::NamedSummaryOutputs &summary_outputs) {
  243. resource_manager_.IncreaseSummaryRefCount(summary_outputs);
  244. }
  245. void CPUKernelRuntime::DecreaseSummaryRefCount(const session::NamedSummaryOutputs &summary_outputs) {
  246. resource_manager_.DecreaseSummaryRefCount(summary_outputs);
  247. }
  248. bool CPUKernelRuntime::Run(session::KernelGraph *kernel_graph) {
  249. MS_EXCEPTION_IF_NULL(kernel_graph);
  250. resource_manager_.IncreaseAddressRefCount(kernel_graph);
  251. auto kernels = kernel_graph->execution_order();
  252. for (const auto &kernel : kernels) {
  253. std::vector<kernel::AddressPtr> kernel_inputs;
  254. std::vector<kernel::AddressPtr> kernel_workspaces;
  255. std::vector<kernel::AddressPtr> kernel_outputs;
  256. size_t input_num = AnfAlgo::GetInputTensorNum(kernel);
  257. for (size_t i = 0; i < input_num; ++i) {
  258. auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, i).get();
  259. MS_EXCEPTION_IF_NULL(device_address);
  260. AddRuntimeAddress(device_address, &kernel_inputs);
  261. }
  262. size_t output_num = AnfAlgo::GetOutputTensorNum(kernel);
  263. for (size_t i = 0; i < output_num; ++i) {
  264. auto device_address = AnfAlgo::GetMutableOutputAddr(kernel, i).get();
  265. MS_EXCEPTION_IF_NULL(device_address);
  266. AddRuntimeAddress(device_address, &kernel_outputs);
  267. }
  268. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  269. MS_EXCEPTION_IF_NULL(kernel_mod);
  270. for (size_t i = 0; i < kernel_mod->GetWorkspaceSizeList().size(); ++i) {
  271. auto device_address = AnfAlgo::GetWorkspaceAddr(kernel, i);
  272. MS_EXCEPTION_IF_NULL(device_address);
  273. AddRuntimeAddress(device_address, &kernel_workspaces);
  274. }
  275. auto ret = kernel_mod->Launch(kernel_inputs, kernel_workspaces, kernel_outputs, 0);
  276. resource_manager_.DecreaseAddressRefCount(kernel);
  277. if (!ret) {
  278. MS_LOG(EXCEPTION) << "Launch kernel failed.";
  279. }
  280. }
  281. return true;
  282. }
  283. } // namespace cpu
  284. } // namespace device
  285. } // namespace mindspore