Browse Source

!16899 Fix master ps pc lint

From: @zpac
Reviewed-by: @limingqi107,@cristoval
Signed-off-by: @cristoval
tags/v1.3.0
mindspore-ci-bot Gitee 4 years ago
parent
commit
b444c42813
5 changed files with 37 additions and 42 deletions
  1. +16
    -15
      mindspore/ccsrc/ps/optimizer_info.cc
  2. +3
    -3
      mindspore/ccsrc/ps/optimizer_info.h
  3. +12
    -15
      mindspore/ccsrc/ps/optimizer_info_builder.cc
  4. +3
    -5
      mindspore/ccsrc/ps/ps_context.cc
  5. +3
    -4
      mindspore/ccsrc/ps/ps_context.h

+ 16
- 15
mindspore/ccsrc/ps/optimizer_info.cc View File

@@ -25,11 +25,11 @@ namespace mindspore {
namespace ps {
void OptimizerInfo::AddWorkspace(const AddressPtr &workspace) { workspaces_.push_back(workspace); }

const std::vector<AddressPtr> &OptimizerInfo::inputs() { return inputs_; }
const std::vector<AddressPtr> &OptimizerInfo::inputs() const { return inputs_; }

const std::vector<AddressPtr> &OptimizerInfo::workspaces() { return workspaces_; }
const std::vector<AddressPtr> &OptimizerInfo::workspaces() const { return workspaces_; }

const std::vector<AddressPtr> &OptimizerInfo::outputs() { return outputs_; }
const std::vector<AddressPtr> &OptimizerInfo::outputs() const { return outputs_; }

bool OptimizerInfo::IsSparse() const { return false; }

@@ -58,8 +58,8 @@ void OptimizerInfo::UpdateOptimInputValue(const std::string &optim_type, const s
<< ", ps_send_index:" << ps_send_index;
}
EXC_IF_VEC_IDX_OOB(lens, ps_send_index);
size_t size = lens[ps_send_index] * sizeof(T);
size_t offset = std::accumulate(lens.begin(), lens.begin() + ps_send_index, 0, std::plus<int>());
size_t size = IntToSize(lens[ps_send_index]) * sizeof(T);
int offset = std::accumulate(lens.begin(), lens.begin() + ps_send_index, 0, std::plus<int>());
AddressPtr optim_input = inputs_[origin_index];
MS_EXCEPTION_IF_NULL(optim_input);

@@ -82,11 +82,11 @@ void DenseOptimInfo::Accumulate(const Values &values, const Lengths &lengths) {
size_t grad_index = this->grad_index();
size_t grad_offset = 0;
for (size_t i = 0; i < grad_index; i++) {
grad_offset += lengths[i];
grad_offset += IntToSize(lengths[i]);
}
float *grad_data = const_cast<float *>(values.data()) + grad_offset;
#define google mindspore_private
CHECK_EQ(size, static_cast<size_t>(lengths[grad_index]));
CHECK_EQ(size, IntToSize(lengths[grad_index]));
#undef google
for (size_t i = 0; i < size; i++) {
accum_grad_data[i] += grad_data[i];
@@ -120,12 +120,12 @@ void SparseOptimInfo::Accumulate(const Values &values, const Lengths &lengths) {
size_t grad_index = this->grad_index();
size_t grad_offset = 0;
for (size_t i = 0; i < grad_index; i++) {
grad_offset += lengths[i];
grad_offset += SizeToInt(lengths[i]);
}
float *incr_grad_data = const_cast<float *>(values.data()) + grad_offset;
MS_EXCEPTION_IF_NULL(incr_grad_data);

size_t incr_grad_size = lengths[grad_index] * sizeof(float);
size_t incr_grad_size = SizeToInt(lengths[grad_index]) * sizeof(float);
size_t dst_size = incr_grad_size;
size_t src_size = incr_grad_size;
void *dst_data = accum_grad_data + grads_offset_;
@@ -147,7 +147,7 @@ void SparseOptimInfo::Accumulate(const Values &values, const Lengths &lengths) {
size_t indices_index = this->indices_index();
size_t indice_offset = 0;
for (size_t i = 0; i < indices_index; i++) {
indice_offset += lengths[i];
indice_offset += IntToSize(lengths[i]);
}

void *incr_indice_data_temp = const_cast<float *>(values.data()) + indice_offset;
@@ -168,7 +168,7 @@ void SparseOptimInfo::Accumulate(const Values &values, const Lengths &lengths) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret2 << ")";
return;
}
indices_offset_ += lengths[indices_index];
indices_offset_ += IntToSize(lengths[indices_index]);
indices()->size += incr_indice_data_size;
}

@@ -206,15 +206,16 @@ void SparseOptimInfo::ComputeMean(const std::vector<std::vector<size_t>> &shapes
size_t original_row_count = input_shapes.front();
if (original_row_count > 0) {
size_t offset = 0;
std::map<int64_t, int64_t> rank_dims = Util::AllRankLocalShard(original_row_count, rank_id, server_num);
std::map<int64_t, int64_t> rank_dims =
Util::AllRankLocalShard(SizeToLong(original_row_count), SizeToLong(rank_id), SizeToLong(server_num));
for (size_t i = 0; i < rank_id; i++) {
if (rank_dims.count(i) == 0) {
MS_LOG(EXCEPTION) << "No local shard number for rank " << i;
}
offset += rank_dims[i];
offset += LongToSize(rank_dims[i]);
}
for (size_t i = 0; i < indices_size; i++) {
indices_data[i] -= offset;
indices_data[i] -= SizeToInt(offset);
}
}
}
@@ -224,7 +225,7 @@ void SparseOptimInfo::ComputeMean(const std::vector<std::vector<size_t>> &shapes

int64_t reduced_grad_size = unique_sparse_grad.indices_size_ * segment_size * sizeof(float);
MS_EXCEPTION_IF_NULL(unique_sparse_grad.value_);
int64_t ret = memcpy_s(gradient()->addr, gradient()->size, unique_sparse_grad.value_, reduced_grad_size);
int ret = memcpy_s(gradient()->addr, gradient()->size, unique_sparse_grad.value_, reduced_grad_size);
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret << ")";
return;


+ 3
- 3
mindspore/ccsrc/ps/optimizer_info.h View File

@@ -40,9 +40,9 @@ class OptimizerInfo {
virtual const AddressPtr &gradient() = 0;
virtual const AddressPtr &indices() = 0;
virtual const size_t indice_size() const;
const std::vector<AddressPtr> &inputs();
const std::vector<AddressPtr> &workspaces();
const std::vector<AddressPtr> &outputs();
const std::vector<AddressPtr> &inputs() const;
const std::vector<AddressPtr> &workspaces() const;
const std::vector<AddressPtr> &outputs() const;

virtual bool IsSparse() const;
virtual size_t grad_index();


+ 12
- 15
mindspore/ccsrc/ps/optimizer_info_builder.cc View File

@@ -38,8 +38,7 @@ OptimizerInfo *OptimizerInfoBuilder::Build(const std::shared_ptr<PServerKernel>
return optim_info;
}

void OptimizerInfoBuilder::BuildWorkspaces(OptimizerInfo *info, const std::vector<size_t> &ws_sizes,
size_t worker_num) {
void OptimizerInfoBuilder::BuildWorkspaces(OptimizerInfo *info, const std::vector<size_t> &ws_sizes, size_t) {
for (size_t i = 0; i < ws_sizes.size(); i++) {
size_t size = ws_sizes[i];
AddressPtr workspace = std::make_shared<kernel::Address>();
@@ -83,13 +82,13 @@ AddressPtr OptimizerInfoBuilder::GenInputAddrPtr(const std::string &optim_type,
addr_data_size = std::accumulate(shape.begin(), shape.end(), worker_num_, std::multiplies<size_t>());
} else {
EXC_IF_VEC_IDX_OOB(ps_lens, ps_index);
addr_data_size = ps_lens[ps_index];
addr_data_size = IntToSize(ps_lens[ps_index]);
}
addr_data_offset = std::accumulate(ps_lens.begin(), ps_lens.begin() + ps_index, 0, std::plus<int>());
addr_data_offset = IntToSize(std::accumulate(ps_lens.begin(), ps_lens.begin() + ps_index, 0, std::plus<int>()));

// The size in ps_lens instead of addr_data_size is the size of real data.
T *buffer = new T[addr_data_size];
addr_ptr->size = ps_lens[ps_index] * sizeof(T);
addr_ptr->size = IntToSize(ps_lens[ps_index] * sizeof(T));
addr_ptr->addr = buffer;

size_t dst_size = addr_ptr->size;
@@ -108,9 +107,9 @@ AddressPtr OptimizerInfoBuilder::GenInputAddrPtr(const std::string &optim_type,
return addr_ptr;
}

OptimizerInfo *MomentumOptimInfoBuilder::BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape,
size_t worker_num, const std::shared_ptr<PServerKernel> &, bool) {
OptimizerInfo *MomentumOptimInfoBuilder::BuildInputs(const WeightPtr &weight, const Keys &, const Values &values,
const Lengths &lens, const InputsShapePtr &, size_t,
const std::shared_ptr<PServerKernel> &, bool) {
AddressPtr weight_addr = std::make_shared<kernel::Address>();
MS_EXCEPTION_IF_NULL(weight_addr);
weight_addr->addr = weight->data();
@@ -135,10 +134,9 @@ OptimizerInfo *MomentumOptimInfoBuilder::BuildInputs(const WeightPtr &weight, co
return new MomentumOptimInfo(weight_addr, accumulate, learning_rate, gradient, momentum);
}

OptimizerInfo *SparseAdamOptimInfoBuilder::BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape,
size_t worker_num, const std::shared_ptr<PServerKernel> &,
bool sharded) {
OptimizerInfo *SparseAdamOptimInfoBuilder::BuildInputs(const WeightPtr &weight, const Keys &, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape, size_t,
const std::shared_ptr<PServerKernel> &, bool sharded) {
AddressPtr weight_addr = std::make_shared<kernel::Address>();
MS_EXCEPTION_IF_NULL(weight_addr);
weight_addr->addr = weight->data();
@@ -185,9 +183,8 @@ OptimizerInfo *SparseAdamOptimInfoBuilder::BuildInputs(const WeightPtr &weight,
grad, indices, sharded);
}

OptimizerInfo *SparseFtrlOptimInfoBuilder::BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape,
size_t worker_num,
OptimizerInfo *SparseFtrlOptimInfoBuilder::BuildInputs(const WeightPtr &weight, const Keys &, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape, size_t,
const std::shared_ptr<PServerKernel> &pserver_kernel,
bool sharded) {
MS_EXCEPTION_IF_NULL(inputs_shape);


+ 3
- 5
mindspore/ccsrc/ps/ps_context.cc View File

@@ -117,13 +117,11 @@ bool PSContext::is_scheduler() const {
return is_sched_;
}

uint32_t PSContext::initial_worker_num() { return worker_num_; }
uint32_t PSContext::initial_worker_num() const { return worker_num_; }

uint32_t PSContext::initial_server_num() { return server_num_; }
uint32_t PSContext::initial_server_num() const { return server_num_; }

std::string PSContext::scheduler_host() { return scheduler_host_; }

uint16_t PSContext::scheduler_port() { return scheduler_port_; }
std::string PSContext::scheduler_host() const { return scheduler_host_; }

void PSContext::SetPSRankId(int rank_id) { rank_id_ = rank_id; }



+ 3
- 4
mindspore/ccsrc/ps/ps_context.h View File

@@ -64,10 +64,9 @@ class PSContext {
bool is_worker() const;
bool is_server() const;
bool is_scheduler() const;
uint32_t initial_worker_num();
uint32_t initial_server_num();
std::string scheduler_host();
uint16_t scheduler_port();
uint32_t initial_worker_num() const;
uint32_t initial_server_num() const;
std::string scheduler_host() const;
void SetPSRankId(int rank_id);
int ps_rank_id() const;
void InsertHashTableSize(const std::string &param_name, size_t cache_vocab_size, size_t embedding_size,


Loading…
Cancel
Save