| @@ -18,6 +18,7 @@ | |||
| #include <unordered_map> | |||
| #include <map> | |||
| #include <iostream> | |||
| #include <utility> | |||
| #include <fstream> | |||
| #include "nlohmann/json.hpp" | |||
| #include "session/anf_runtime_algorithm.h" | |||
| @@ -583,5 +584,55 @@ void DeduplicateIndexedSlices(const SparseGradient &origin_sparse_grad, SparseGr | |||
| } | |||
| unique_grad->indices_size_ = unique_indices_size; | |||
| } | |||
| void ReduceSparseGradient(const SparseGradient &origin_sparse_grad, SparseGradient *unique_grad, size_t first_dim, | |||
| size_t outer_dim) { | |||
| MS_EXCEPTION_IF_NULL(origin_sparse_grad.value_); | |||
| MS_EXCEPTION_IF_NULL(origin_sparse_grad.indices_); | |||
| MS_EXCEPTION_IF_NULL(unique_grad); | |||
| MS_EXCEPTION_IF_NULL(unique_grad->value_); | |||
| MS_EXCEPTION_IF_NULL(unique_grad->indices_); | |||
| size_t unique_indices_size = 0; | |||
| std::vector<std::pair<int, size_t>> sorted_indices; | |||
| sorted_indices.reserve(origin_sparse_grad.indices_size_); | |||
| for (size_t i = 0; i < origin_sparse_grad.indices_size_; ++i) { | |||
| int index = origin_sparse_grad.indices_[i]; | |||
| if (index < 0 || IntToSize(index) >= first_dim) { | |||
| continue; | |||
| } | |||
| sorted_indices.emplace_back(std::pair<int, size_t>(index, i * outer_dim)); | |||
| } | |||
| std::sort( | |||
| sorted_indices.begin(), sorted_indices.end(), | |||
| [](const std::pair<int, size_t> &left, const std::pair<int, size_t> &right) { return left.first < right.first; }); | |||
| int last_index = 0; | |||
| size_t indices_size = sorted_indices.size(); | |||
| size_t start_index = 0; | |||
| size_t end_index = outer_dim; | |||
| size_t dst_len = indices_size * outer_dim; | |||
| for (size_t i = 0; i < indices_size; ++i) { | |||
| int index = sorted_indices[i].first; | |||
| if (i == 0 || last_index != index) { | |||
| if (i > 0 && last_index != index) { | |||
| unique_indices_size++; | |||
| start_index += outer_dim; | |||
| end_index += outer_dim; | |||
| } | |||
| unique_grad->indices_[unique_indices_size] = index; | |||
| auto ret_code = memcpy_s(unique_grad->value_ + start_index, dst_len - start_index, | |||
| origin_sparse_grad.value_ + sorted_indices[i].second, outer_dim); | |||
| if (ret_code != EOK) { | |||
| MS_LOG(EXCEPTION) << "Failed to copy data!"; | |||
| } | |||
| } else { | |||
| for (size_t j = start_index, k = sorted_indices[i].second; j < end_index; ++j, ++k) { | |||
| unique_grad->value_[j] += origin_sparse_grad.value_[k]; | |||
| } | |||
| } | |||
| last_index = index; | |||
| } | |||
| unique_grad->indices_size_ = unique_indices_size; | |||
| } | |||
| } // namespace kernel | |||
| } // namespace mindspore | |||
| @@ -92,6 +92,8 @@ bool IsSameShape(const std::vector<size_t> &shape_a, const std::vector<size_t> & | |||
| int Sign(float x); | |||
| void DeduplicateIndexedSlices(const SparseGradient &origin_sparse_grad, SparseGradient *unique_grad, size_t first_dim, | |||
| size_t outer_dim); | |||
| void ReduceSparseGradient(const SparseGradient &origin_sparse_grad, SparseGradient *unique_grad, size_t first_dim, | |||
| size_t outer_dim); | |||
| } // namespace kernel | |||
| } // namespace mindspore | |||
| @@ -37,8 +37,8 @@ void CPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) { | |||
| } | |||
| void CPUKernel::Init(const CNodePtr &kernel_node) { | |||
| InitInputOutputSize(kernel_node); | |||
| InitKernel(kernel_node); | |||
| InitInputOutputSize(kernel_node); | |||
| } | |||
| void CPUKernelUtils::ExpandDimsTo4(std::vector<size_t> *shape) { | |||
| @@ -23,6 +23,13 @@ namespace { | |||
| constexpr size_t kSparseApplyFtrlInputSize = 5; | |||
| } // namespace | |||
| void SparseApplyFtrlCPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) { | |||
| CPUKernel::InitInputOutputSize(kernel_node); | |||
| MS_EXCEPTION_IF_NULL(kernel_node); | |||
| workspace_size_list_.emplace_back(indices_size_ * var_outer_dim_size_ * sizeof(float)); | |||
| workspace_size_list_.emplace_back(indices_size_ * sizeof(int)); | |||
| } | |||
| void SparseApplyFtrlCPUKernel::InitKernel(const CNodePtr &kernel_node) { | |||
| MS_EXCEPTION_IF_NULL(kernel_node); | |||
| std::vector<size_t> var_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); | |||
| @@ -72,7 +79,7 @@ void SparseApplyFtrlCPUKernel::InitKernel(const CNodePtr &kernel_node) { | |||
| } | |||
| bool SparseApplyFtrlCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, | |||
| const std::vector<kernel::AddressPtr> & /*workspace*/, | |||
| const std::vector<kernel::AddressPtr> &workspace, | |||
| const std::vector<kernel::AddressPtr> & /*outputs*/) { | |||
| if (inputs.size() < kSparseApplyFtrlInputSize) { | |||
| MS_LOG(EXCEPTION) << "error input output size!"; | |||
| @@ -83,14 +90,11 @@ bool SparseApplyFtrlCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inp | |||
| auto linear = reinterpret_cast<float *>(inputs[2]->addr); | |||
| auto grad = reinterpret_cast<float *>(inputs[3]->addr); | |||
| auto indices = reinterpret_cast<int *>(inputs[4]->addr); | |||
| std::vector<float> new_grad; | |||
| new_grad.reserve(indices_size_ * var_outer_dim_size_); | |||
| std::vector<int> new_indices; | |||
| new_indices.reserve(indices_size_); | |||
| SparseGradient unique_sparse_grad({new_grad.data(), new_indices.data(), indices_size_}); | |||
| DeduplicateIndexedSlices(SparseGradient({grad, indices, indices_size_}), &unique_sparse_grad, var_first_dim_size_, | |||
| var_outer_dim_size_); | |||
| auto new_grad = reinterpret_cast<float *>(workspace[0]->addr); | |||
| auto new_indices = reinterpret_cast<int *>(workspace[1]->addr); | |||
| SparseGradient unique_sparse_grad({new_grad, new_indices, indices_size_}); | |||
| ReduceSparseGradient(SparseGradient({grad, indices, indices_size_}), &unique_sparse_grad, var_first_dim_size_, | |||
| var_outer_dim_size_); | |||
| for (size_t i = 0; i < unique_sparse_grad.indices_size_; ++i) { | |||
| int index = unique_sparse_grad.indices_[i]; | |||
| @@ -28,7 +28,7 @@ class SparseApplyFtrlCPUKernel : public CPUKernel { | |||
| ~SparseApplyFtrlCPUKernel() override = default; | |||
| void InitKernel(const CNodePtr &kernel_node) override; | |||
| void InitInputOutputSize(const CNodePtr &kernel_node) override; | |||
| bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||
| const std::vector<AddressPtr> &outputs) override; | |||