You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

parameter_server.cc 29 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /**
  2. * Copyright 2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "ps/parameter_server.h"
  17. namespace mindspore {
  18. namespace ps {
  19. void ParameterServer::Run(const FuncGraphPtr &func_graph) {
  20. MS_EXCEPTION_IF_NULL(func_graph);
  21. MS_LOG(INFO) << "PServer starts connecting to scheduler and workers...";
  22. core::ClusterMetadata::instance()->Init(
  23. PSContext::instance()->initial_worker_num(), PSContext::instance()->initial_server_num(),
  24. PSContext::instance()->scheduler_host(), PSContext::instance()->scheduler_port());
  25. MS_LOG(INFO) << "PServer connected successfully.";
  26. if (!PSContext::instance()->is_server()) {
  27. MS_LOG(INFO) << "This is not the Server node.";
  28. return;
  29. }
  30. Init(func_graph);
  31. server_node_.Start();
  32. rank_id_ = server_node_.rank_id();
  33. PSContext::instance()->SetPSRankId(rank_id_);
  34. thread_->join();
  35. SyncEmbeddingTables();
  36. MS_LOG(INFO) << "PServer finished updating models, starts finalizing...";
  37. server_node_.Finish();
  38. server_node_.Stop();
  39. MS_LOG(INFO) << "PServer finalized successfully.";
  40. }
  41. bool ParameterServer::Init(const FuncGraphPtr &func_graph) {
  42. pserver_num_ = std::strtol(mindspore::common::GetEnv(kEnvPServerNum).c_str(), nullptr, 10);
  43. worker_num_ = std::strtol(mindspore::common::GetEnv(kEnvWorkerNum).c_str(), nullptr, 10);
  44. func_graph_ = func_graph;
  45. handler_.reset(new ServerHandler(this));
  46. handler_->Init();
  47. InitOptimInfoBuilders();
  48. server_node_.set_handler(*handler_);
  49. thread_.reset(new std::thread(&ParameterServer::UpdateWeights, this));
  50. GetEmbeddingTableParamPtr();
  51. return true;
  52. }
  53. void ParameterServer::InitOptimInfoBuilders() {
  54. std::shared_ptr<OptimizerInfoBuilder> momentum_info_builder = std::make_shared<MomentumOptimInfoBuilder>(worker_num_);
  55. std::shared_ptr<OptimizerInfoBuilder> sparse_adam_info_builder =
  56. std::make_shared<SparseAdamOptimInfoBuilder>(worker_num_);
  57. std::shared_ptr<OptimizerInfoBuilder> sparse_ftrl_info_builder =
  58. std::make_shared<SparseFtrlOptimInfoBuilder>(worker_num_);
  59. optim_info_builders_[kApplyMomentum] = momentum_info_builder;
  60. optim_info_builders_[kSparseAdam] = sparse_adam_info_builder;
  61. optim_info_builders_[kSparseFtrl] = sparse_ftrl_info_builder;
  62. }
  63. void ParameterServer::InitWeightKeyToOptims(const Key &key, const int64_t &optim_id) {
  64. if (weight_key_to_optims_.count(key) > 0 || Util::optimizer_name(optim_id) == "") {
  65. return;
  66. }
  67. weight_key_to_optims_[key] = Util::optimizer_name(optim_id);
  68. weight_key_to_optim_op_[key] = Util::optimizer_node_name(optim_id);
  69. MS_LOG(INFO) << "Initializing optimizer id for key:" << key << ", optimizer name:" << weight_key_to_optims_[key]
  70. << ", optimizer op name:" << weight_key_to_optim_op_[key];
  71. }
  72. void ParameterServer::InitOptimInputsShape(const Keys &keys, const Values &values, const Lengths &lengths) {
  73. InputsShapePtr inputs_shape = std::make_shared<InputsShape>();
  74. MS_EXCEPTION_IF_NULL(inputs_shape);
  75. InputsShapePtr original_inputs_shape = std::make_shared<InputsShape>();
  76. MS_EXCEPTION_IF_NULL(original_inputs_shape);
  77. int64_t val_idx = 0;
  78. const Key &key = keys[0];
  79. MS_LOG(INFO) << "Initializing optimizer inputs shape for key:" << key;
  80. if (optim_inputs_shape_.count(key) == 0) {
  81. original_optim_inputs_shape_[key] = original_inputs_shape;
  82. optim_inputs_shape_[key] = inputs_shape;
  83. }
  84. for (size_t i = 0; i < keys.size(); i++) {
  85. auto shape = std::make_shared<std::vector<size_t>>();
  86. MS_EXCEPTION_IF_NULL(shape);
  87. auto original_shape = std::make_shared<std::vector<size_t>>();
  88. MS_EXCEPTION_IF_NULL(original_shape);
  89. inputs_shape->push_back(shape);
  90. original_inputs_shape->push_back(original_shape);
  91. for (int64_t j = 0; j < lengths[i]; j++) {
  92. shape->push_back(values[val_idx]);
  93. original_shape->push_back(values[val_idx++]);
  94. }
  95. }
  96. if (weight_key_to_optims_.count(key) > 0) {
  97. const std::string &optim_name = weight_key_to_optims_[key];
  98. const std::string &optim_op_name = weight_key_to_optim_op_[key];
  99. if (optimizers_.count(key) == 0 && optim_inputs_shape_.count(key) > 0) {
  100. const CNodePtr cnode = GetCNode(optim_op_name);
  101. MS_EXCEPTION_IF_NULL(cnode);
  102. if (optim_name == kSparseAdam) {
  103. std::shared_ptr<PServerKernel> optimizer =
  104. std::make_shared<kernel::ps::SparseApplyAdamPSKernel>(rank_id_, pserver_num_, worker_num_);
  105. optimizer->InitKernel(cnode, optim_inputs_shape_[key]);
  106. optimizers_[key] = optimizer;
  107. } else if (optim_name == kSparseLazyAdam) {
  108. std::shared_ptr<PServerKernel> optimizer =
  109. std::make_shared<kernel::ps::SparseApplyLazyAdamPSKernel>(rank_id_, pserver_num_, worker_num_);
  110. optimizer->InitKernel(cnode, optim_inputs_shape_[key]);
  111. optimizers_[key] = optimizer;
  112. } else if (optim_name == kApplyMomentum) {
  113. std::shared_ptr<PServerKernel> optimizer =
  114. std::make_shared<kernel::ps::ApplyMomentumPSKernel>(rank_id_, pserver_num_, worker_num_);
  115. optimizer->InitKernel(cnode, optim_inputs_shape_[key]);
  116. optimizers_[key] = optimizer;
  117. } else if (optim_name == kSparseFtrl) {
  118. std::shared_ptr<PServerKernel> optimizer =
  119. std::make_shared<kernel::ps::SparseApplyFtrlPSKernel>(rank_id_, pserver_num_, worker_num_);
  120. optimizer->InitKernel(cnode, optim_inputs_shape_[key]);
  121. optimizers_[key] = optimizer;
  122. }
  123. }
  124. }
  125. }
  126. void ParameterServer::InitWeight(const Key &key, const WeightPtr &weight) {
  127. MS_EXCEPTION_IF_NULL(weight);
  128. if ((weights_.count(key) == 0) || (is_embedding_[key] && weights_.count(key) != 0)) {
  129. MS_LOG(INFO) << "Initializing weight for key " << key << ", server rank " << rank_id_;
  130. weights_[key] = weight;
  131. tokens_[key] = 0;
  132. is_embedding_[key] = false;
  133. }
  134. }
  135. void ParameterServer::InitGrad(const Key &key, const GradPtr &grad) {
  136. MS_EXCEPTION_IF_NULL(grad);
  137. if (grads_.count(key) == 0) {
  138. grads_[key] = grad;
  139. grads_accum_counter_[key] = 0;
  140. }
  141. }
  142. void ParameterServer::InitEmbeddingTable(
  143. const Key &key, const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &shapes,
  144. const ParamInitInfo &param_init_info) {
  145. MS_EXCEPTION_IF_NULL(shapes);
  146. if (weights_.count(key) == 0) {
  147. std::shared_ptr<PServerKernel> lookup =
  148. std::make_shared<kernel::ps::EmbeddingLookUpPSKernel>(rank_id_, pserver_num_, worker_num_);
  149. lookup->InitKernel(shapes);
  150. embedding_lookup_ops_[key] = lookup;
  151. // Init embedding weight
  152. const std::vector<size_t> &input_shapes = lookup->input_sizes();
  153. size_t total_dims =
  154. std::accumulate(input_shapes.begin(), input_shapes.end(), IntToSize(1), std::multiplies<size_t>());
  155. WeightPtr embedding = std::make_shared<Weight>(total_dims, 0);
  156. MS_EXCEPTION_IF_NULL(embedding);
  157. float *embedding_data = embedding->data();
  158. std::default_random_engine engine;
  159. std::normal_distribution<float> random(0, 0.01);
  160. if (ps::PsDataPrefetch::GetInstance().cache_enable()) {
  161. if (param_init_info.param_type_ == kWeight) {
  162. InitRandomNormal(0, 0.01, input_shapes, param_init_info.global_seed_, param_init_info.op_seed_, embedding_data);
  163. } else if (param_init_info.param_type_ == kAccumulation) {
  164. for (size_t i = 0; i < total_dims; i++) {
  165. embedding_data[i] = param_init_info.init_val_;
  166. }
  167. }
  168. } else {
  169. for (size_t i = 0; i < total_dims; i++) {
  170. embedding_data[i] = random(engine);
  171. }
  172. }
  173. weights_[key] = embedding;
  174. MS_LOG(DEBUG) << "The key:" << key << " the embedding:" << *embedding;
  175. tokens_[key] = 0;
  176. is_embedding_[key] = true;
  177. grads_accum_counter_[key] = 0;
  178. }
  179. }
  180. bool ParameterServer::HasWeight(const Key &key) { return (weights_.count(key) > 0 && !is_embedding_.count(key)); }
  181. void ParameterServer::Finalize() {
  182. running_ = false;
  183. apply_grads_cv_.notify_one();
  184. }
  185. void ParameterServer::UpdateWeights() {
  186. while (true) {
  187. MS_LOG(INFO) << "The running is:" << running_ << " the ready is:" << this->ReadyForUpdateWeights();
  188. std::unique_lock<std::mutex> lock(mutex_);
  189. apply_grads_cv_.wait(lock, [this] { return this->ReadyForUpdateWeights() || !running_; });
  190. if (!running_) {
  191. break;
  192. }
  193. for (auto iter = weights_.begin(); iter != weights_.end(); iter++) {
  194. Key key = iter->first;
  195. WeightPtr weight_ptr = iter->second;
  196. std::shared_ptr<PServerKernel> optimizer = nullptr;
  197. if (weight_key_to_optims_.count(key) > 0) {
  198. optimizer = optimizers_[key];
  199. }
  200. MS_EXCEPTION_IF_NULL(optimizer);
  201. std::shared_ptr<OptimizerInfo> optim_info = optim_infos_[key];
  202. if (optim_info != nullptr) {
  203. const std::vector<kernel::AddressPtr> &inputs = optim_info->inputs();
  204. const std::vector<kernel::AddressPtr> &workspaces = optim_info->workspaces();
  205. const std::vector<kernel::AddressPtr> &outputs = optim_info->outputs();
  206. std::vector<std::vector<size_t>> shapes = {};
  207. std::vector<size_t> indices_shape = {};
  208. indices_shape.emplace_back(optim_info->indice_size());
  209. shapes.push_back(indices_shape);
  210. if (original_optim_inputs_shape_.count(key) != 0) {
  211. std::transform(
  212. (*(original_optim_inputs_shape_[key])).begin(), (*(original_optim_inputs_shape_[key])).end(),
  213. std::back_inserter(shapes),
  214. [](std::shared_ptr<std::vector<size_t>> input_shapes) -> std::vector<size_t> { return *input_shapes; });
  215. }
  216. optimizer->ReInit(shapes);
  217. optim_info->ComputeMean(shapes, worker_num_, pserver_num_, rank_id_);
  218. optimizer->Execute(inputs, workspaces, outputs);
  219. optim_info->Reset();
  220. }
  221. if (!is_embedding_[key]) {
  222. tokens_[key] = worker_num_;
  223. }
  224. }
  225. ResetGradAccumCount();
  226. }
  227. }
  228. void ParameterServer::AccumGrad(const Keys &keys, const Values &values, const Lengths &lengths) {
  229. std::unique_lock<std::mutex> lock(mutex_);
  230. const Key &key = keys[0];
  231. bool no_sparse_grad = values.size() == 1 && values[0] == -100;
  232. if (!no_sparse_grad) {
  233. std::shared_ptr<OptimizerInfo> optim_info = optim_infos_[key];
  234. // Create or update the optimizer info
  235. if (optim_info == nullptr) {
  236. const std::shared_ptr<OptimizerInfoBuilder> &builder = optim_info_builders_[weight_key_to_optims_[key]];
  237. std::shared_ptr<kernel::ps::PServerKernel> pserver_kernel = optimizers_[key];
  238. if (pserver_kernel == nullptr) {
  239. MS_LOG(EXCEPTION) << "no optimizer found for key " << key << " optim name " << weight_key_to_optims_[key];
  240. }
  241. MS_EXCEPTION_IF_NULL(pserver_kernel);
  242. OptimizerInfo *optim = builder->Build(pserver_kernel, weights_[key], keys, values, lengths,
  243. optim_inputs_shape_[key], worker_num_, is_embedding_[key]);
  244. optim_info.reset(optim);
  245. optim_infos_[key] = optim_info;
  246. } else {
  247. optim_info->Update(values, lengths);
  248. optim_info->Accumulate(values, lengths);
  249. }
  250. }
  251. grads_accum_counter_[key] += 1;
  252. if (grads_accum_counter_[key] == worker_num_) {
  253. grad_accum_count_++;
  254. }
  255. if (ReadyForUpdateWeights()) {
  256. apply_grads_cv_.notify_one();
  257. }
  258. }
  259. WeightPtr ParameterServer::weight(const Key &key) {
  260. std::unique_lock<std::mutex> lock(mutex_);
  261. if (weights_.count(key) == 0) {
  262. MS_LOG(EXCEPTION) << "Invalid weight key " << key;
  263. }
  264. WeightPtr weight_ptr = weights_[key];
  265. MS_LOG(DEBUG) << "The weight ptr size is:" << weight_ptr->size();
  266. MS_EXCEPTION_IF_NULL(weight_ptr);
  267. WeightPtr copy_weight_ptr = std::make_shared<std::vector<float>>(weight_ptr->size(), 0);
  268. MS_EXCEPTION_IF_NULL(copy_weight_ptr);
  269. copy_weight_ptr = weight_ptr;
  270. tokens_[key] -= 1;
  271. return copy_weight_ptr;
  272. }
  273. void ParameterServer::DoEmbeddingLookup(Key key, const LookupIds &lookup_ids, KVMessage *res) {
  274. std::unique_lock<std::mutex> lock(mutex_);
  275. MS_EXCEPTION_IF_NULL(res);
  276. if (weights_.count(key) == 0) {
  277. MS_LOG(ERROR) << "Invalid embedding table key " << key;
  278. return;
  279. }
  280. if (embedding_lookup_ops_.count(key) == 0) {
  281. MS_LOG(ERROR) << "Invalid embedding lookup op key " << key;
  282. return;
  283. }
  284. WeightPtr table_ptr = weights_[key];
  285. MS_EXCEPTION_IF_NULL(table_ptr);
  286. std::shared_ptr<PServerKernel> table_lookup_op = embedding_lookup_ops_[key];
  287. MS_EXCEPTION_IF_NULL(table_lookup_op);
  288. // Update shapes of lookup operator
  289. std::vector<std::vector<size_t>> shapes = {};
  290. std::vector<size_t> indices_shape = {};
  291. indices_shape.emplace_back(lookup_ids.size());
  292. shapes.push_back(indices_shape);
  293. table_lookup_op->ReInit(shapes);
  294. const std::vector<size_t> output_shapes = table_lookup_op->output_sizes();
  295. std::vector<kernel::AddressPtr> inputs;
  296. AddressPtr embedding_table = std::make_shared<kernel::Address>();
  297. MS_EXCEPTION_IF_NULL(embedding_table);
  298. AddressPtr indices = std::make_shared<kernel::Address>();
  299. MS_EXCEPTION_IF_NULL(indices);
  300. inputs.push_back(embedding_table);
  301. inputs.push_back(indices);
  302. embedding_table->addr = table_ptr->data();
  303. embedding_table->size = table_ptr->size() * sizeof(float);
  304. std::unique_ptr<int[]> tmp_ids(new int[lookup_ids.size()]);
  305. MS_EXCEPTION_IF_NULL(tmp_ids);
  306. for (size_t i = 0; i < lookup_ids.size(); i++) {
  307. tmp_ids[i] = static_cast<int>(lookup_ids[i]);
  308. }
  309. indices->addr = tmp_ids.get();
  310. indices->size = lookup_ids.size() * sizeof(int);
  311. std::vector<kernel::AddressPtr> workspaces;
  312. std::vector<kernel::AddressPtr> outputs;
  313. AddressPtr output = std::make_shared<kernel::Address>();
  314. MS_EXCEPTION_IF_NULL(output);
  315. std::shared_ptr<Values> addr = std::make_shared<Values>(output_shapes[0] / sizeof(float), 0);
  316. MS_EXCEPTION_IF_NULL(addr);
  317. output->addr = addr->data();
  318. output->size = output_shapes[0];
  319. outputs.push_back(output);
  320. table_lookup_op->Execute(inputs, workspaces, outputs);
  321. *res->mutable_values() = {addr->begin(), addr->end()};
  322. res->add_len(res->values_size());
  323. }
  324. void ParameterServer::UpdateEmbeddings(const Key &key, const LookupIds &lookup_ids, const Values &vals) {
  325. if (weights_.count(key) == 0) {
  326. MS_LOG(ERROR) << "Invalid embedding table key " << key;
  327. return;
  328. }
  329. if (embedding_lookup_ops_.count(key) == 0) {
  330. MS_LOG(ERROR) << "Invalid embedding lookup op key " << key;
  331. return;
  332. }
  333. WeightPtr table_ptr = weights_[key];
  334. MS_EXCEPTION_IF_NULL(table_ptr);
  335. std::shared_ptr<PServerKernel> table_lookup_op = embedding_lookup_ops_[key];
  336. MS_EXCEPTION_IF_NULL(table_lookup_op);
  337. table_lookup_op->UpdateEmbeddings(table_ptr->data(), lookup_ids.data(), vals.data(), lookup_ids.size());
  338. }
  339. inline bool ParameterServer::ReadyForUpdateWeights() {
  340. return grads_accum_counter_.size() > 0 && grad_accum_count_ == grads_accum_counter_.size();
  341. }
  342. inline bool ParameterServer::ReadyForPush(const Key &key) {
  343. std::unique_lock<std::mutex> lock(mutex_);
  344. if (weights_.empty()) {
  345. MS_LOG(EXCEPTION) << "The weights in server is empty. Many reasons could cause this: 1.The Worker didn't send "
  346. "kInitWeightsCmd command. 2.The Server failed to initialize weights.";
  347. }
  348. MS_LOG(INFO) << "The grad_accum_count_:" << grad_accum_count_ << " the weights_:" << weights_.size()
  349. << " the token:" << (tokens_[key] <= 0);
  350. return grad_accum_count_ < weights_.size() && tokens_[key] <= 0;
  351. }
  352. inline bool ParameterServer::ReadyForPull(const Key &key) {
  353. std::unique_lock<std::mutex> lock(mutex_);
  354. if (tokens_.count(key) == 0 || weights_[key] == 0) {
  355. MS_LOG(EXCEPTION) << "Invalid weight key " << key;
  356. }
  357. MS_LOG(INFO) << "ReadyForPull: " << (tokens_[key] > 0);
  358. return tokens_[key] > 0;
  359. }
  360. inline void ParameterServer::ResetGradAccumCount() {
  361. grad_accum_count_ = 0;
  362. for (auto iter = grads_accum_counter_.begin(); iter != grads_accum_counter_.end(); iter++) {
  363. grads_accum_counter_[iter->first] = 0;
  364. }
  365. }
  366. const CNodePtr ParameterServer::GetCNode(const std::string &name) const {
  367. std::list<CNodePtr> cnodes = func_graph_->GetOrderedCnodes();
  368. for (CNodePtr cnode : cnodes) {
  369. MS_EXCEPTION_IF_NULL(cnode);
  370. std::string fullname = cnode->fullname_with_scope();
  371. if (fullname.find(name) != std::string::npos && fullname.find("Push") != std::string::npos) {
  372. return cnode;
  373. }
  374. }
  375. return nullptr;
  376. }
  377. inline std::mutex &ParameterServer::mutex() { return mutex_; }
  378. void ParameterServer::GetEmbeddingTableParamPtr() {
  379. MS_EXCEPTION_IF_NULL(func_graph_);
  380. auto cnodes = func_graph_->GetOrderedCnodes();
  381. Key count = 0;
  382. for (auto cnode : cnodes) {
  383. MS_EXCEPTION_IF_NULL(cnode);
  384. std::string cnode_name = AnfAlgo::GetCNodeName(cnode);
  385. if (cnode_name == kEmbeddingLookupOpName || cnode_name == kGatherV2OpName || cnode_name == kSparseGatherV2OpName) {
  386. auto embedding_table = AnfAlgo::GetInputNode(cnode, 0);
  387. if (IsPrimitiveCNode(embedding_table, prim::kPrimLoad)) {
  388. auto embedding_cnode = embedding_table->cast<CNodePtr>();
  389. embedding_table = AnfAlgo::GetInputNode(embedding_cnode, 0);
  390. }
  391. MS_EXCEPTION_IF_NULL(embedding_table);
  392. if (embedding_table->isa<Parameter>()) {
  393. MS_LOG(INFO) << "Embedding table name is " << embedding_table->fullname_with_scope() << ", key is " << count;
  394. embedding_tables_.insert(std::make_pair(count, embedding_table->cast<ParameterPtr>()));
  395. count++;
  396. }
  397. }
  398. }
  399. }
  400. void ParameterServer::SyncEmbeddingTables() {
  401. for (auto embedding_table : embedding_tables_) {
  402. Key key = embedding_table.first;
  403. if (embedding_lookup_ops_.count(key) == 0) {
  404. MS_LOG(WARNING) << "Can't find look up PS kernel for key " << key;
  405. continue;
  406. }
  407. auto lookup = embedding_lookup_ops_[key];
  408. const std::vector<size_t> &input_shapes = lookup->input_sizes();
  409. std::vector<int64_t> new_tensor_shape(input_shapes.begin(), input_shapes.end());
  410. tensor::TensorPtr new_tensor = std::make_shared<tensor::Tensor>(kNumberTypeFloat32, new_tensor_shape);
  411. MS_EXCEPTION_IF_NULL(new_tensor);
  412. float *new_tensor_data_ptr = reinterpret_cast<float *>(new_tensor->data_c());
  413. size_t new_tensor_size = static_cast<size_t>(new_tensor->data().nbytes());
  414. size_t embedding_table_size = weights_[key]->size() * sizeof(float);
  415. if (new_tensor_size != embedding_table_size) {
  416. MS_LOG(EXCEPTION) << "Shape of embedding table can't match. New tensor size:" << new_tensor_size
  417. << ", embedding_table size:" << embedding_table_size;
  418. }
  419. MS_EXCEPTION_IF_NULL(new_tensor_data_ptr);
  420. MS_EXCEPTION_IF_NULL(weights_[key]->data());
  421. int64_t ret = memcpy_s(new_tensor_data_ptr, new_tensor_size, weights_[key]->data(), embedding_table_size);
  422. if (ret != 0) {
  423. MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret << ")";
  424. return;
  425. }
  426. auto paramter_tensor_ptr = embedding_table.second->default_param();
  427. MS_EXCEPTION_IF_NULL(paramter_tensor_ptr);
  428. paramter_tensor_ptr->cast<tensor::TensorPtr>()->AssignValue(*new_tensor);
  429. }
  430. }
  431. void ParameterServer::ServerHandler::Init() {
  432. handlers_[kInitWeightsCmd] = &ServerHandler::HandleInitWeights;
  433. handlers_[kInitWeightToOptimIdCmd] = &ServerHandler::HandleInitWeightToOptimId;
  434. handlers_[kInitOptimInputsShapeCmd] = &ServerHandler::HandleInitInputsShape;
  435. handlers_[kInitEmbeddingsCmd] = &ServerHandler::HandleInitEmbeddings;
  436. handlers_[kCheckReadyForPushCmd] = &ServerHandler::HandleCheckReadyForPush;
  437. handlers_[kCheckReadyForPullCmd] = &ServerHandler::HandleCheckReadyForPull;
  438. handlers_[kEmbeddingLookupCmd] = &ServerHandler::HandleEmbeddingLookup;
  439. handlers_[kUpdateEmbeddingsCmd] = &ServerHandler::HandleUpdateEmbeddings;
  440. handlers_[kFinalizeCmd] = &ServerHandler::HandleFinalize;
  441. handlers_[kPushCmd] = &ServerHandler::HandlePushReq;
  442. handlers_[kPullCmd] = &ServerHandler::HandlePullReq;
  443. }
  444. void ParameterServer::ServerHandler::operator()(std::shared_ptr<core::TcpConnection> conn,
  445. std::shared_ptr<core::MessageMeta> meta, DataPtr data, size_t size) {
  446. auto output = std::make_shared<std::vector<unsigned char>>();
  447. MS_LOG(INFO) << "The command is:" << meta->user_cmd();
  448. if (handlers_.count(meta->user_cmd()) == 0) {
  449. MS_LOG(EXCEPTION) << "The command:" << meta->user_cmd() << " is not supported!";
  450. }
  451. auto &handler_ptr = handlers_[meta->user_cmd()];
  452. (this->*handler_ptr)(data, size, output);
  453. std::shared_ptr<unsigned char> res(new unsigned char[output->size()]);
  454. MS_LOG(DEBUG) << "The output size is:" << output->size();
  455. if (output->size() > 0) {
  456. int ret = memcpy_s(res.get(), output->size(), output->data(), output->size());
  457. if (ret != 0) {
  458. MS_LOG(EXCEPTION) << "The memcpy_s error, errorno(" << ret << ")";
  459. }
  460. }
  461. ps_->server_node_.Response(conn, meta, res, output->size());
  462. MS_LOG(DEBUG) << "The request id is:" << meta->request_id() << " the current time is:"
  463. << std::chrono::time_point_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now())
  464. .time_since_epoch()
  465. .count();
  466. }
  467. void ParameterServer::ServerHandler::HandlePushReq(DataPtr data, size_t size, VectorPtr res) {
  468. MS_EXCEPTION_IF_NULL(res);
  469. KVMessage input;
  470. input.ParseFromArray(data.get(), size);
  471. Keys keys = {input.keys().begin(), input.keys().end()};
  472. Values values = {input.values().begin(), input.values().end()};
  473. Lengths lens = {input.len().begin(), input.len().end()};
  474. MS_LOG(DEBUG) << "The keys:" << keys << " the values:" << values << " the len:" << lens;
  475. ps_->AccumGrad(keys, values, lens);
  476. }
  477. void ParameterServer::ServerHandler::HandlePullReq(DataPtr data, size_t size, VectorPtr res) {
  478. MS_EXCEPTION_IF_NULL(res);
  479. KVMessage input;
  480. input.ParseFromArray(data.get(), size);
  481. KVMessage res_data;
  482. *res_data.mutable_keys() = input.keys();
  483. Key key = input.keys()[0];
  484. auto weight = ps_->weight(key);
  485. *res_data.mutable_values() = {weight->begin(), weight->end()};
  486. res->resize(res_data.ByteSizeLong());
  487. int ret =
  488. memcpy_s(res->data(), res_data.ByteSizeLong(), res_data.SerializeAsString().data(), res_data.ByteSizeLong());
  489. if (ret != 0) {
  490. MS_LOG(EXCEPTION) << "The memcpy_s error, errorno(" << ret << ")";
  491. }
  492. }
  493. void ParameterServer::ServerHandler::HandleInitWeights(DataPtr data, size_t size, VectorPtr res) {
  494. std::unique_lock<std::mutex> lock(ps_->mutex());
  495. MS_EXCEPTION_IF_NULL(res);
  496. KVMessage input;
  497. input.ParseFromArray(data.get(), size);
  498. int key_num = input.keys_size();
  499. const float *data_ptr = input.values().data();
  500. size_t pos = 0;
  501. for (int i = 0; i < key_num; i++) {
  502. Key key = input.keys()[i];
  503. size_t data_len = input.len_size() != key_num ? input.values_size() / key_num : input.len()[i];
  504. if (!ps_->HasWeight(key)) {
  505. WeightPtr weight_ptr = std::make_shared<std::vector<float>>(data_ptr + pos, data_ptr + (pos + data_len));
  506. MS_EXCEPTION_IF_NULL(weight_ptr);
  507. ps_->InitWeight(key, weight_ptr);
  508. GradPtr grad_ptr = std::make_shared<std::vector<float>>(data_len, 0);
  509. MS_EXCEPTION_IF_NULL(grad_ptr);
  510. ps_->InitGrad(key, grad_ptr);
  511. }
  512. pos += data_len;
  513. }
  514. }
  515. void ParameterServer::ServerHandler::HandleInitWeightToOptimId(DataPtr data, size_t size, VectorPtr res) {
  516. std::unique_lock<std::mutex> lock(ps_->mutex());
  517. MS_EXCEPTION_IF_NULL(res);
  518. KVMessage input;
  519. input.ParseFromArray(data.get(), size);
  520. size_t key_num = input.keys_size();
  521. for (size_t i = 0; i < key_num; i++) {
  522. Key key = input.keys()[i];
  523. float val = input.values()[i];
  524. if (init_weight_to_optim_[key]) {
  525. continue;
  526. } else {
  527. init_weight_to_optim_[key] = true;
  528. }
  529. ps_->InitWeightKeyToOptims(key, val);
  530. }
  531. }
  532. void ParameterServer::ServerHandler::HandleInitInputsShape(DataPtr data, size_t size, VectorPtr res) {
  533. std::unique_lock<std::mutex> lock(ps_->mutex());
  534. MS_EXCEPTION_IF_NULL(res);
  535. KVMessage input;
  536. input.ParseFromArray(data.get(), size);
  537. const Key &key = input.keys()[0];
  538. if (init_optim_info_[key]) {
  539. return;
  540. } else {
  541. init_optim_info_[key] = true;
  542. }
  543. Keys keys = {input.keys().begin(), input.keys().end()};
  544. Values values = {input.values().begin(), input.values().end()};
  545. Lengths lens = {input.len().begin(), input.len().end()};
  546. ps_->InitOptimInputsShape(keys, values, lens);
  547. }
  548. void ParameterServer::ServerHandler::HandleInitEmbeddings(DataPtr data, size_t size, VectorPtr res) {
  549. std::unique_lock<std::mutex> lock(ps_->mutex());
  550. EmbeddingTableMeta embedding_table_meta;
  551. embedding_table_meta.ParseFromArray(data.get(), size);
  552. const Key &key = embedding_table_meta.key();
  553. MS_LOG(INFO) << "Initializing embedding table for key:" << key;
  554. std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> shapes =
  555. std::make_shared<std::vector<std::shared_ptr<std::vector<size_t>>>>();
  556. MS_EXCEPTION_IF_NULL(shapes);
  557. std::shared_ptr<std::vector<size_t>> input_shape = std::make_shared<std::vector<size_t>>(
  558. embedding_table_meta.input_shape().begin(), embedding_table_meta.input_shape().end());
  559. MS_EXCEPTION_IF_NULL(input_shape);
  560. std::shared_ptr<std::vector<size_t>> indices_shape = std::make_shared<std::vector<size_t>>(
  561. embedding_table_meta.indices_shape().begin(), embedding_table_meta.indices_shape().end());
  562. MS_EXCEPTION_IF_NULL(indices_shape);
  563. std::shared_ptr<std::vector<size_t>> output_shape = std::make_shared<std::vector<size_t>>(
  564. embedding_table_meta.output_shape().begin(), embedding_table_meta.output_shape().end());
  565. MS_EXCEPTION_IF_NULL(output_shape);
  566. shapes->push_back(input_shape);
  567. shapes->push_back(indices_shape);
  568. shapes->push_back(output_shape);
  569. const ParamInitInfoMessage &info = embedding_table_meta.info();
  570. ParamInitInfo param_init_info;
  571. if (ps::PsDataPrefetch::GetInstance().cache_enable()) {
  572. param_init_info.param_type_ = static_cast<ParamType>(info.param_type());
  573. if (param_init_info.param_type_ == kWeight) {
  574. param_init_info.global_seed_ = info.global_seed();
  575. param_init_info.op_seed_ = info.op_seed();
  576. } else if (param_init_info.param_type_ == kAccumulation) {
  577. param_init_info.init_val_ = info.init_val();
  578. }
  579. }
  580. ps_->InitEmbeddingTable(key, shapes, param_init_info);
  581. }
  582. void ParameterServer::ServerHandler::HandleCheckReadyForPush(DataPtr data, size_t size, VectorPtr res) {
  583. MS_EXCEPTION_IF_NULL(res);
  584. KVMessage input;
  585. input.ParseFromArray(data.get(), size);
  586. const Key &key = input.keys()[0];
  587. bool ready = ps_->ReadyForPush(key);
  588. MS_LOG(INFO) << "The ready is:" << ready;
  589. KVMessage res_data;
  590. res_data.add_keys(key);
  591. res_data.add_values(ready);
  592. res->resize(res_data.ByteSizeLong());
  593. int ret =
  594. memcpy_s(res->data(), res_data.ByteSizeLong(), res_data.SerializeAsString().data(), res_data.ByteSizeLong());
  595. if (ret != 0) {
  596. MS_LOG(EXCEPTION) << "The memcpy_s error, errorno(" << ret << ")";
  597. }
  598. }
  599. void ParameterServer::ServerHandler::HandleCheckReadyForPull(DataPtr data, size_t size, VectorPtr res) {
  600. MS_EXCEPTION_IF_NULL(res);
  601. KVMessage input;
  602. input.ParseFromArray(data.get(), size);
  603. const Key &key = input.keys()[0];
  604. bool ready = ps_->ReadyForPull(key);
  605. KVMessage res_data;
  606. res_data.add_keys(key);
  607. res_data.add_values(ready);
  608. res->resize(res_data.ByteSizeLong());
  609. int ret =
  610. memcpy_s(res->data(), res_data.ByteSizeLong(), res_data.SerializeAsString().data(), res_data.ByteSizeLong());
  611. if (ret != 0) {
  612. MS_LOG(EXCEPTION) << "The memcpy_s error, errorno(" << ret << ")";
  613. }
  614. }
  615. void ParameterServer::ServerHandler::HandleEmbeddingLookup(DataPtr data, size_t size, VectorPtr res) {
  616. MS_EXCEPTION_IF_NULL(res);
  617. EmbeddingTableLookup input;
  618. input.ParseFromArray(data.get(), size);
  619. const Key &key = input.key();
  620. KVMessage res_data;
  621. std::vector<Key> keys = {input.keys().begin(), input.keys().end()};
  622. *res_data.mutable_keys() = {input.keys().begin(), input.keys().end()};
  623. ps_->DoEmbeddingLookup(key, keys, &res_data);
  624. res->resize(res_data.ByteSizeLong());
  625. int ret =
  626. memcpy_s(res->data(), res_data.ByteSizeLong(), res_data.SerializeAsString().data(), res_data.ByteSizeLong());
  627. if (ret != 0) {
  628. MS_LOG(EXCEPTION) << "The memcpy_s error, errorno(" << ret << ")";
  629. }
  630. }
  631. void ParameterServer::ServerHandler::HandleUpdateEmbeddings(DataPtr data, size_t size, VectorPtr res) {
  632. std::unique_lock<std::mutex> lock(ps_->mutex());
  633. MS_EXCEPTION_IF_NULL(res);
  634. KVMessage input;
  635. input.ParseFromArray(data.get(), size);
  636. const Key &key = input.keys()[0];
  637. const LookupIds &lookup_ids = {input.keys().begin() + 1, input.keys().end()};
  638. const Values &update_vals = {input.values().begin(), input.values().end()};
  639. ps_->UpdateEmbeddings(key, lookup_ids, update_vals);
  640. }
  641. void ParameterServer::ServerHandler::HandleFinalize(DataPtr data, size_t size, VectorPtr res) {
  642. MS_EXCEPTION_IF_NULL(res);
  643. ps_->Finalize();
  644. }
  645. } // namespace ps
  646. } // namespace mindspore