Merge pull request !3017 from ZPaC/add-ps-optimizer-kerneltags/v0.6.0-beta
| @@ -29,7 +29,10 @@ if (ENABLE_CPU) | |||||
| list(REMOVE_ITEM CPU_SRC_LIST "cpu/ps/push_kernel.cc" | list(REMOVE_ITEM CPU_SRC_LIST "cpu/ps/push_kernel.cc" | ||||
| "cpu/ps/pull_kernel.cc" | "cpu/ps/pull_kernel.cc" | ||||
| "cpu/ps/embedding_look_up_ps_kernel.cc" | "cpu/ps/embedding_look_up_ps_kernel.cc" | ||||
| "cpu/ps/embedding_look_up_proxy_kernel.cc") | |||||
| "cpu/ps/embedding_look_up_proxy_kernel.cc" | |||||
| "cpu/ps/apply_momentum_ps_kernel.cc" | |||||
| "cpu/ps/sparse_apply_adam_ps_kernel.cc" | |||||
| "cpu/ps/sparse_apply_ftrl_ps_kernel.cc") | |||||
| if (NOT ENABLE_MPI) | if (NOT ENABLE_MPI) | ||||
| list(REMOVE_ITEM CPU_SRC_LIST "cpu/allgather_cpu_kernel.cc") | list(REMOVE_ITEM CPU_SRC_LIST "cpu/allgather_cpu_kernel.cc") | ||||
| @@ -0,0 +1,33 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #include "kernel/cpu/ps/apply_momentum_ps_kernel.h" | |||||
| namespace mindspore { | |||||
| namespace kernel { | |||||
| namespace ps { | |||||
| bool ApplyMomentumPSKernel::Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||||
| const std::vector<AddressPtr> &outputs) { | |||||
| return Launch(inputs, workspace, outputs); | |||||
| } | |||||
| const std::vector<size_t> &ApplyMomentumPSKernel::input_sizes() const { return GetInputSizeList(); } | |||||
| const std::vector<size_t> &ApplyMomentumPSKernel::output_sizes() const { return GetOutputSizeList(); } | |||||
| const std::vector<size_t> &ApplyMomentumPSKernel::workspace_sizes() const { return GetWorkspaceSizeList(); } | |||||
| } // namespace ps | |||||
| } // namespace kernel | |||||
| } // namespace mindspore | |||||
| @@ -0,0 +1,43 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #ifndef MINDSPORE_CCSRC_KERNEL_CPU_APPLY_MOMENTUM_PS_KERNEL_H_ | |||||
| #define MINDSPORE_CCSRC_KERNEL_CPU_APPLY_MOMENTUM_PS_KERNEL_H_ | |||||
| #include <vector> | |||||
| #include <memory> | |||||
| #include "kernel/cpu/ps/pserver_kernel.h" | |||||
| #include "kernel/cpu/apply_momentum_cpu_kernel.h" | |||||
| namespace mindspore { | |||||
| namespace kernel { | |||||
| namespace ps { | |||||
| class ApplyMomentumPSKernel : public ApplyMomentumCPUKernel, public PServerKernel { | |||||
| public: | |||||
| ApplyMomentumPSKernel(size_t rank_id, size_t pserver_num) : PServerKernel(rank_id, pserver_num) {} | |||||
| ~ApplyMomentumPSKernel() override = default; | |||||
| bool Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||||
| const std::vector<AddressPtr> &outputs) override; | |||||
| const std::vector<size_t> &input_sizes() const override; | |||||
| const std::vector<size_t> &output_sizes() const override; | |||||
| const std::vector<size_t> &workspace_sizes() const override; | |||||
| }; | |||||
| } // namespace ps | |||||
| } // namespace kernel | |||||
| } // namespace mindspore | |||||
| #endif // MINDSPORE_CCSRC_KERNEL_CPU_APPLY_MOMENTUM_PS_KERNEL_H_ | |||||
| @@ -0,0 +1,24 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #include "kernel/cpu/ps/pserver_kernel.h" | |||||
| #include "parallel/ps/util.h" | |||||
| namespace mindspore { | |||||
| namespace kernel { | |||||
| namespace ps {} // namespace ps | |||||
| } // namespace kernel | |||||
| } // namespace mindspore | |||||
| @@ -0,0 +1,57 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #ifndef MINDSPORE_CCSRC_KERNEL_PS_PSERVER_KERNEL_H_ | |||||
| #define MINDSPORE_CCSRC_KERNEL_PS_PSERVER_KERNEL_H_ | |||||
| #include <vector> | |||||
| #include <memory> | |||||
| #include "kernel/kernel.h" | |||||
| #include "parallel/ps/util.h" | |||||
| namespace mindspore { | |||||
| namespace kernel { | |||||
| namespace ps { | |||||
| using mindspore::parallel::ps::Util; | |||||
| class PServerKernel { | |||||
| public: | |||||
| PServerKernel(size_t rank_id, size_t pserver_num) : rank_id_(rank_id), pserver_num_(pserver_num) {} | |||||
| ~PServerKernel() = default; | |||||
| PServerKernel(const PServerKernel &) = delete; | |||||
| PServerKernel &operator=(const PServerKernel &) = delete; | |||||
| virtual void InitKernel(const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &) {} | |||||
| virtual void ReInit(const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &) {} | |||||
| virtual bool Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||||
| const std::vector<AddressPtr> &outputs) = 0; | |||||
| virtual const std::vector<size_t> &input_sizes() const = 0; | |||||
| virtual const std::vector<size_t> &output_sizes() const = 0; | |||||
| virtual const std::vector<size_t> &workspace_sizes() const = 0; | |||||
| protected: | |||||
| virtual void ReInit(const std::vector<AddressPtr> &) {} | |||||
| void Shard(std::vector<size_t> *shape, int axis) { | |||||
| (*shape)[axis] = Util::LocalShard((*shape)[axis], rank_id_, pserver_num_); | |||||
| } | |||||
| size_t rank_id_; | |||||
| size_t pserver_num_; | |||||
| }; | |||||
| } // namespace ps | |||||
| } // namespace kernel | |||||
| } // namespace mindspore | |||||
| #endif // MINDSPORE_CCSRC_KERNEL_PS_PSERVER_KERNEL_H_ | |||||
| @@ -0,0 +1,100 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #include "kernel/cpu/ps/sparse_apply_adam_ps_kernel.h" | |||||
| #include <memory> | |||||
| #include "kernel/common_utils.h" | |||||
| #include "device/cpu/cpu_device_address.h" | |||||
| #include "parallel/ps/util.h" | |||||
| namespace mindspore { | |||||
| namespace kernel { | |||||
| namespace ps { | |||||
| void SparseApplyAdamPSKernel::InitKernel( | |||||
| const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &shapes) { | |||||
| const std::vector<std::shared_ptr<std::vector<size_t>>> &shape_vec = *shapes; | |||||
| std::vector<size_t> &var_shape = *(shape_vec[0]); | |||||
| std::vector<size_t> &m_shape = *(shape_vec[1]); | |||||
| std::vector<size_t> &v_shape = *(shape_vec[2]); | |||||
| const std::vector<size_t> &grad_shape = *(shape_vec[9]); | |||||
| const std::vector<size_t> &indices_shape = *(shape_vec[10]); | |||||
| Shard(&var_shape, 0); | |||||
| Shard(&m_shape, 0); | |||||
| Shard(&v_shape, 0); | |||||
| if (!IsSameShape(var_shape, m_shape)) { | |||||
| MS_LOG(EXCEPTION) << "var and m should have the same shape"; | |||||
| } | |||||
| if (!IsSameShape(var_shape, v_shape)) { | |||||
| MS_LOG(EXCEPTION) << "var and v should have the same shape"; | |||||
| } | |||||
| var_first_dim_size_ = var_shape[0]; | |||||
| for (size_t i = 1; i < var_shape.size(); ++i) { | |||||
| if (var_shape[i] != grad_shape[i]) { | |||||
| MS_LOG(EXCEPTION) << "The shape of var and grad must equal in dimension " << i; | |||||
| } | |||||
| var_outer_dim_size_ *= var_shape[i]; | |||||
| } | |||||
| if (indices_shape.size() != 1) { | |||||
| MS_LOG(EXCEPTION) << "indices must be 1D"; | |||||
| } | |||||
| indices_size_ = indices_shape[0]; | |||||
| if (grad_shape[0] != indices_size_) { | |||||
| MS_LOG(ERROR) << "The first dimension of grad shape must be equal to indices"; | |||||
| } | |||||
| /* | |||||
| if (AnfAlgo::HasNodeAttr(USE_NESTEROV, kernel_node)) { | |||||
| use_nesterov_ = AnfAlgo::GetNodeAttr<bool>(kernel_node, "use_nesterov"); | |||||
| } | |||||
| */ | |||||
| workspace_size_list_.emplace_back(indices_size_ * var_outer_dim_size_ * sizeof(float)); | |||||
| workspace_size_list_.emplace_back(indices_size_ * sizeof(int)); | |||||
| workspace_size_list_.emplace_back(var_first_dim_size_ * var_outer_dim_size_ * sizeof(float)); | |||||
| } | |||||
| void SparseApplyAdamPSKernel::ReInit(const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &shapes) { | |||||
| const std::vector<std::shared_ptr<std::vector<size_t>>> &shape_vec = *shapes; | |||||
| const std::vector<size_t> &indices_shape = *(shape_vec[0]); | |||||
| indices_size_ = indices_shape[0]; | |||||
| workspace_size_list_[0] = indices_size_ * var_outer_dim_size_ * sizeof(float); | |||||
| workspace_size_list_[1] = indices_size_ * sizeof(int); | |||||
| } | |||||
| void SparseApplyAdamPSKernel::ReInit(const std::vector<AddressPtr> &inputs) { | |||||
| const auto &indices_addr = inputs[10]; | |||||
| indices_size_ = indices_addr->size; | |||||
| workspace_size_list_[0] = indices_size_ * var_outer_dim_size_ * sizeof(float); | |||||
| workspace_size_list_[1] = indices_size_ * sizeof(int); | |||||
| } | |||||
| bool SparseApplyAdamPSKernel::Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||||
| const std::vector<AddressPtr> &outputs) { | |||||
| ReInit(inputs); | |||||
| int *indices = reinterpret_cast<int *>(inputs[10]->addr); | |||||
| for (size_t i = 0; i < inputs[10]->size / sizeof(int); i++) { | |||||
| indices[i] -= rank_id_ * var_first_dim_size_; | |||||
| } | |||||
| return Launch(inputs, workspace, outputs); | |||||
| } | |||||
| const std::vector<size_t> &SparseApplyAdamPSKernel::input_sizes() const { return GetInputSizeList(); } | |||||
| const std::vector<size_t> &SparseApplyAdamPSKernel::output_sizes() const { return GetOutputSizeList(); } | |||||
| const std::vector<size_t> &SparseApplyAdamPSKernel::workspace_sizes() const { return GetWorkspaceSizeList(); } | |||||
| } // namespace ps | |||||
| } // namespace kernel | |||||
| } // namespace mindspore | |||||
| @@ -0,0 +1,49 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #ifndef MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_ADAM_CPU_KERNEL_H_ | |||||
| #define MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_ADAM_PS_KERNEL_H_ | |||||
| #include <vector> | |||||
| #include <memory> | |||||
| #include "kernel/cpu/ps/pserver_kernel.h" | |||||
| #include "kernel/cpu/sparse_apply_adam_cpu_kernel.h" | |||||
| namespace mindspore { | |||||
| namespace kernel { | |||||
| namespace ps { | |||||
| using mindspore::kernel::SparseApplyAdamCPUKernel; | |||||
| class SparseApplyAdamPSKernel : public SparseApplyAdamCPUKernel, public PServerKernel { | |||||
| public: | |||||
| SparseApplyAdamPSKernel(size_t rank_id, size_t pserver_num) : PServerKernel(rank_id, pserver_num) {} | |||||
| ~SparseApplyAdamPSKernel() override = default; | |||||
| void InitKernel(const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &) override; | |||||
| void ReInit(const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &) override; | |||||
| bool Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||||
| const std::vector<AddressPtr> &outputs) override; | |||||
| const std::vector<size_t> &input_sizes() const override; | |||||
| const std::vector<size_t> &output_sizes() const override; | |||||
| const std::vector<size_t> &workspace_sizes() const override; | |||||
| protected: | |||||
| void ReInit(const std::vector<AddressPtr> &) override; | |||||
| }; | |||||
| } // namespace ps | |||||
| } // namespace kernel | |||||
| } // namespace mindspore | |||||
| #endif // MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_ADAM_PS_KERNEL_H_ | |||||
| @@ -0,0 +1,103 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #include "kernel/cpu/ps/sparse_apply_ftrl_ps_kernel.h" | |||||
| #include "device/cpu/cpu_device_address.h" | |||||
| namespace mindspore { | |||||
| namespace kernel { | |||||
| namespace ps { | |||||
| void SparseApplyFtrlPSKernel::InitKernel( | |||||
| const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &shapes) { | |||||
| const std::vector<std::shared_ptr<std::vector<size_t>>> &shape_vec = *shapes; | |||||
| std::vector<size_t> var_shape = *(shape_vec[0]); | |||||
| std::vector<size_t> accum_shape = *(shape_vec[1]); | |||||
| std::vector<size_t> linear_shape = *(shape_vec[2]); | |||||
| std::vector<size_t> grad_shape = *(shape_vec[3]); | |||||
| std::vector<size_t> indices_shape = *(shape_vec[4]); | |||||
| Shard(&var_shape, 0); | |||||
| Shard(&accum_shape, 0); | |||||
| Shard(&linear_shape, 0); | |||||
| var_first_dim_size_ = var_shape[0]; | |||||
| for (size_t i = 1; i < var_shape.size(); ++i) { | |||||
| if (var_shape[i] != grad_shape[i]) { | |||||
| MS_LOG(EXCEPTION) << "The shape of var and grad must equal in dimension " << i; | |||||
| } | |||||
| var_outer_dim_size_ *= var_shape[i]; | |||||
| } | |||||
| if (indices_shape.size() != 1) { | |||||
| MS_LOG(EXCEPTION) << "indices must be a 1D vector"; | |||||
| } | |||||
| indices_size_ = indices_shape[0]; | |||||
| if (grad_shape[0] != indices_size_) { | |||||
| MS_LOG(EXCEPTION) << "The first dimension of grad shape must be equal to indices"; | |||||
| } | |||||
| /* | |||||
| lr_ = AnfAlgo::GetNodeAttr<float>(kernel_node, "lr"); | |||||
| if (lr_ <= 0) { | |||||
| MS_LOG(EXCEPTION) << "lr should be a positive scalar"; | |||||
| } | |||||
| l1_ = AnfAlgo::GetNodeAttr<float>(kernel_node, "l1"); | |||||
| if (l1_ < 0) { | |||||
| MS_LOG(EXCEPTION) << "l1 should be a non-negative scalar"; | |||||
| } | |||||
| l2_ = AnfAlgo::GetNodeAttr<float>(kernel_node, "l2"); | |||||
| if (l2_ < 0) { | |||||
| MS_LOG(EXCEPTION) << "l2 should be a non-negative scalar"; | |||||
| } | |||||
| lr_power_ = AnfAlgo::GetNodeAttr<float>(kernel_node, "lr_power"); | |||||
| if (lr_power_ > 0) { | |||||
| MS_LOG(EXCEPTION) << "lr_power should be a non-positive scalar"; | |||||
| } | |||||
| */ | |||||
| workspace_size_list_.emplace_back(indices_size_ * var_outer_dim_size_ * sizeof(float)); | |||||
| workspace_size_list_.emplace_back(indices_size_ * sizeof(int)); | |||||
| } | |||||
| void SparseApplyFtrlPSKernel::ReInit(const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &shapes) { | |||||
| const std::vector<std::shared_ptr<std::vector<size_t>>> &shape_vec = *shapes; | |||||
| std::vector<size_t> indices_shape = *(shape_vec[0]); | |||||
| indices_size_ = indices_shape[0]; | |||||
| workspace_size_list_[0] = indices_size_ * var_outer_dim_size_ * sizeof(float); | |||||
| workspace_size_list_[1] = indices_size_ * sizeof(int); | |||||
| } | |||||
| void SparseApplyFtrlPSKernel::ReInit(const std::vector<AddressPtr> &inputs) { | |||||
| const auto &indices_addr = inputs[4]; | |||||
| indices_size_ = indices_addr->size; | |||||
| workspace_size_list_[0] = indices_size_ * var_outer_dim_size_ * sizeof(float); | |||||
| workspace_size_list_[1] = indices_size_ * sizeof(int); | |||||
| } | |||||
| bool SparseApplyFtrlPSKernel::Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||||
| const std::vector<AddressPtr> &outputs) { | |||||
| ReInit(inputs); | |||||
| int *indices = reinterpret_cast<int *>(inputs[4]->addr); | |||||
| for (size_t i = 0; i < inputs[4]->size / sizeof(int); i++) { | |||||
| indices[i] -= rank_id_ * var_first_dim_size_; | |||||
| } | |||||
| return Launch(inputs, workspace, outputs); | |||||
| } | |||||
| const std::vector<size_t> &SparseApplyFtrlPSKernel::input_sizes() const { return GetInputSizeList(); } | |||||
| const std::vector<size_t> &SparseApplyFtrlPSKernel::output_sizes() const { return GetOutputSizeList(); } | |||||
| const std::vector<size_t> &SparseApplyFtrlPSKernel::workspace_sizes() const { return GetWorkspaceSizeList(); } | |||||
| } // namespace ps | |||||
| } // namespace kernel | |||||
| } // namespace mindspore | |||||
| @@ -0,0 +1,50 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #ifndef MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_FTRL_CPU_KERNEL_H_ | |||||
| #define MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_FTRL_PS_KERNEL_H_ | |||||
| #include <vector> | |||||
| #include <memory> | |||||
| #include "kernel/cpu/ps/pserver_kernel.h" | |||||
| #include "kernel/cpu/sparse_apply_ftrl_cpu_kernel.h" | |||||
| namespace mindspore { | |||||
| namespace kernel { | |||||
| namespace ps { | |||||
| using mindspore::kernel::SparseApplyFtrlCPUKernel; | |||||
| class SparseApplyFtrlPSKernel : public SparseApplyFtrlCPUKernel, public PServerKernel { | |||||
| public: | |||||
| SparseApplyFtrlPSKernel(size_t rank_id, size_t pserver_num) : PServerKernel(rank_id, pserver_num) {} | |||||
| ~SparseApplyFtrlPSKernel() override = default; | |||||
| void InitKernel(const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &) override; | |||||
| void ReInit(const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &) override; | |||||
| bool Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||||
| const std::vector<AddressPtr> &outputs) override; | |||||
| const std::vector<size_t> &input_sizes() const override; | |||||
| const std::vector<size_t> &output_sizes() const override; | |||||
| const std::vector<size_t> &workspace_sizes() const override; | |||||
| protected: | |||||
| void ReInit(const std::vector<AddressPtr> &) override; | |||||
| }; | |||||
| } // namespace ps | |||||
| } // namespace kernel | |||||
| } // namespace mindspore | |||||
| #endif // MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_FTRL_PS_KERNEL_H_ | |||||
| @@ -33,7 +33,7 @@ class SparseApplyAdamCPUKernel : public CPUKernel { | |||||
| bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | ||||
| const std::vector<AddressPtr> &outputs) override; | const std::vector<AddressPtr> &outputs) override; | ||||
| private: | |||||
| protected: | |||||
| size_t indices_size_{0}; | size_t indices_size_{0}; | ||||
| size_t var_first_dim_size_{0}; | size_t var_first_dim_size_{0}; | ||||
| size_t var_outer_dim_size_{1}; | size_t var_outer_dim_size_{1}; | ||||
| @@ -32,7 +32,7 @@ class SparseApplyFtrlCPUKernel : public CPUKernel { | |||||
| bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | ||||
| const std::vector<AddressPtr> &outputs) override; | const std::vector<AddressPtr> &outputs) override; | ||||
| private: | |||||
| protected: | |||||
| size_t indices_size_{0}; | size_t indices_size_{0}; | ||||
| size_t var_first_dim_size_{0}; | size_t var_first_dim_size_{0}; | ||||
| size_t var_outer_dim_size_{1}; | size_t var_outer_dim_size_{1}; | ||||