You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

batch_op.cc 14 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "dataset/engine/datasetops/batch_op.h"
  17. #include <utility>
  18. #include "common/utils.h"
  19. #include "dataset/engine/data_buffer.h"
  20. #include "dataset/engine/db_connector.h"
  21. namespace mindspore {
  22. namespace dataset {
  23. BatchOp::Builder::Builder(int32_t batch_size) : builder_drop_(false) {
  24. builder_batch_size_ = batch_size;
  25. std::shared_ptr<ConfigManager> cfg = GlobalContext::config_manager();
  26. builder_num_workers_ = cfg->num_parallel_workers();
  27. builder_op_connector_size_ = cfg->op_connector_size();
  28. }
  29. Status BatchOp::Builder::Build(std::shared_ptr<BatchOp> *ptr) {
  30. RETURN_IF_NOT_OK(SanityCheck());
  31. *ptr = std::make_shared<BatchOp>(builder_batch_size_, builder_drop_, builder_op_connector_size_, builder_num_workers_,
  32. builder_cols_to_map_, builder_batch_size_func_, builder_batch_map_func_);
  33. return Status::OK();
  34. }
  35. Status BatchOp::Builder::SanityCheck() {
  36. std::string err;
  37. err += builder_op_connector_size_ <= 0 ? "connector size <= 0\n" : "";
  38. err += builder_batch_size_ <= 0 ? "batch size <= 0\n" : "";
  39. err += builder_num_workers_ <= 0 ? "batch num_parallel_workers <= 0\n" : "";
  40. return err.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, common::SafeCStr(err));
  41. }
  42. BatchOp::BatchOp(int32_t batch_size, bool drop, int32_t op_queue_size, int32_t num_workers,
  43. const std::vector<std::string> &cols_to_map, py::function batch_size_func, py::function batch_map_func)
  44. : ParallelOp(num_workers, op_queue_size),
  45. start_batch_size_(batch_size),
  46. drop_(drop),
  47. input_column_names_(cols_to_map),
  48. batch_size_func_(batch_size_func),
  49. batch_map_func_(batch_map_func) {
  50. worker_queues_.Init(num_workers, op_queue_size);
  51. }
  52. Status BatchOp::operator()() {
  53. RETURN_IF_NOT_OK(LaunchThreadsAndInitOp());
  54. TaskManager::FindMe()->Post();
  55. int64_t epoch_num = 0, batch_num = 0, cnt = 0;
  56. TensorRow new_row;
  57. std::unique_ptr<TensorQTable> table = std::make_unique<TensorQTable>();
  58. child_iterator_ = std::make_unique<ChildIterator>(this, 0, 0);
  59. RETURN_IF_NOT_OK(child_iterator_->FetchNextTensorRow(&new_row));
  60. column_name_map_ = child_iterator_->col_name_id_map();
  61. int32_t cur_batch_size = 0;
  62. RETURN_IF_NOT_OK(GetBatchSize(&cur_batch_size, CBatchInfo(0, 0, 0)));
  63. while (child_iterator_->eof_handled() == false) {
  64. while (new_row.empty() == false) {
  65. table->emplace_back(new_row);
  66. // if # of rows is enough to make 1 batch (1 batch is buffer), send it to worker_queue
  67. if (table->size() == static_cast<size_t>(cur_batch_size)) {
  68. RETURN_IF_NOT_OK(worker_queues_[cnt++ % num_workers_]->EmplaceBack(
  69. std::make_pair(std::move(table), CBatchInfo(epoch_num, batch_num++, cnt - epoch_num))));
  70. table = std::make_unique<TensorQTable>();
  71. RETURN_IF_NOT_OK(GetBatchSize(&cur_batch_size, CBatchInfo(epoch_num, batch_num, cnt - epoch_num)));
  72. }
  73. RETURN_IF_NOT_OK(child_iterator_->FetchNextTensorRow(&new_row));
  74. }
  75. // Reminder logic, execute only when there is a remainder (table is non empty) and don't drop
  76. if (drop_ == false && table->empty() == false) {
  77. RETURN_IF_NOT_OK(worker_queues_[cnt++ % num_workers_]->EmplaceBack(
  78. std::make_pair(std::move(table), CBatchInfo(epoch_num, batch_num++, cnt - epoch_num))));
  79. }
  80. table = std::make_unique<TensorQTable>(); // this drops when drop == true
  81. // end of the current epoch, batch_num should start from 0 again
  82. batch_num = 0;
  83. epoch_num++;
  84. RETURN_IF_NOT_OK(
  85. worker_queues_[cnt++ % num_workers_]->EmplaceBack(std::make_pair(nullptr, CBatchInfo(batchCtrl::kEOE))));
  86. RETURN_IF_NOT_OK(GetBatchSize(&cur_batch_size, CBatchInfo(epoch_num, batch_num, cnt - epoch_num)));
  87. RETURN_IF_NOT_OK(child_iterator_->FetchNextTensorRow(&new_row));
  88. } // end of eof_handled() == false
  89. RETURN_IF_NOT_OK(
  90. worker_queues_[cnt++ % num_workers_]->EmplaceBack(std::make_pair(nullptr, CBatchInfo(batchCtrl::kEOF))));
  91. // EOF received, send quit signal (an empty buffer) to all workers
  92. for (int32_t ind = 0; ind < num_workers_; ind++) {
  93. RETURN_IF_NOT_OK(
  94. worker_queues_[cnt++ % num_workers_]->EmplaceBack(std::make_pair(nullptr, CBatchInfo(batchCtrl::kQuit))));
  95. }
  96. return Status::OK();
  97. }
  98. void BatchOp::Print(std::ostream &out, bool show_all) const {
  99. ParallelOp::Print(out, show_all);
  100. out << "\nBatchOp:\n"
  101. << "number of parallel workers: " << num_workers_ << "\nBatch size: " << start_batch_size_
  102. << "\nDrop remainder: " << (drop_ ? "yes" : "no") << "\n\n";
  103. }
  104. Status BatchOp::BatchRows(const std::unique_ptr<TensorQTable> *source_table,
  105. const std::unique_ptr<TensorQTable> *dest_table, size_t batch_size) {
  106. if ((*source_table)->size() < batch_size || (*source_table)->size() == 0) {
  107. RETURN_STATUS_UNEXPECTED("[Internal Batch ERROR] Insufficient rows in source_table\n");
  108. }
  109. TensorRow row = std::move((*source_table)->front());
  110. (*source_table)->pop_front();
  111. if (batch_size == 1) {
  112. for (std::shared_ptr<Tensor> tensor : row) {
  113. RETURN_IF_NOT_OK(tensor->ExpandDim(0));
  114. }
  115. (*dest_table)->push_back(row);
  116. } else { // batch_size > 1
  117. std::vector<TensorShape> row_shapes;
  118. TensorRow batched_row;
  119. for (size_t i = 0; i < row.size(); i++) { // Handle the first row popped
  120. row_shapes.push_back(row[i]->shape());
  121. std::shared_ptr<Tensor> ts;
  122. RETURN_IF_NOT_OK(Tensor::CreateTensor(
  123. &ts, TensorImpl::kFlexible, row[i]->shape().PrependDim(static_cast<int64_t>(batch_size)), row[i]->type()));
  124. batched_row.emplace_back(ts);
  125. RETURN_IF_NOT_OK(batched_row[i]->InsertTensor(std::vector<dsize_t>(1, 0), row[i])); // {j} = 0
  126. }
  127. for (size_t j = 1; j < batch_size; j++) { // Handle the rest of the rows
  128. row = std::move((*source_table)->front());
  129. (*source_table)->pop_front();
  130. for (size_t i = 0; i < row.size(); i++) {
  131. if (row[i]->shape() == row_shapes[i]) { // check the newly popped rows have the same dim as the first
  132. RETURN_IF_NOT_OK(batched_row[i]->InsertTensor(std::vector<dsize_t>(1, j), row[i]));
  133. } else {
  134. RETURN_STATUS_UNEXPECTED("[Batch ERROR] Inconsistent TensorShapes\n");
  135. }
  136. }
  137. }
  138. (*dest_table)->emplace_back(batched_row);
  139. }
  140. return Status::OK();
  141. }
  142. Status BatchOp::WorkerEntry(int32_t workerId) {
  143. TaskManager::FindMe()->Post();
  144. std::pair<std::unique_ptr<TensorQTable>, CBatchInfo> table_pair;
  145. RETURN_IF_NOT_OK(worker_queues_[workerId]->PopFront(&table_pair));
  146. while (table_pair.second.ctrl_ != batchCtrl::kQuit) {
  147. if (table_pair.second.ctrl_ == batchCtrl::kEOE) {
  148. RETURN_IF_NOT_OK(out_connector_->Add(workerId, std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOE)));
  149. } else if (table_pair.second.ctrl_ == batchCtrl::kEOF) {
  150. RETURN_IF_NOT_OK(out_connector_->Add(workerId, std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOF)));
  151. } else if (table_pair.second.ctrl_ == batchCtrl::kNoCtrl) {
  152. std::unique_ptr<DataBuffer> db = nullptr;
  153. RETURN_IF_NOT_OK(MakeBatchedBuffer(std::move(table_pair), &db));
  154. RETURN_IF_NOT_OK(out_connector_->Add(workerId, std::move(db)));
  155. }
  156. RETURN_IF_NOT_OK(worker_queues_[workerId]->PopFront(&table_pair));
  157. }
  158. return Status::OK();
  159. }
  160. Status BatchOp::MakeBatchedBuffer(std::pair<std::unique_ptr<TensorQTable>, CBatchInfo> table_pair,
  161. std::unique_ptr<DataBuffer> *db) {
  162. RETURN_UNEXPECTED_IF_NULL(table_pair.first);
  163. if (!input_column_names_.empty()) RETURN_IF_NOT_OK(MapColumns(&table_pair)); // pass it through pyfunc
  164. (*db) = std::make_unique<DataBuffer>(table_pair.second.batch_num_, DataBuffer::kDeBFlagNone);
  165. std::unique_ptr<TensorQTable> dest_table = std::make_unique<TensorQTable>();
  166. RETURN_IF_NOT_OK(BatchRows(&table_pair.first, &dest_table, table_pair.first->size()));
  167. (*db)->set_tensor_table(std::move(dest_table));
  168. (*db)->set_column_name_map(column_name_map_);
  169. return Status::OK();
  170. }
  171. Status BatchOp::LaunchThreadsAndInitOp() {
  172. RETURN_UNEXPECTED_IF_NULL(tree_);
  173. RETURN_IF_NOT_OK(worker_queues_.Register(tree_->AllTasks()));
  174. RETURN_IF_NOT_OK(tree_->LaunchWorkers(num_workers_, std::bind(&BatchOp::WorkerEntry, this, std::placeholders::_1)));
  175. return Status::OK();
  176. }
  177. Status BatchOp::EofReceived(int32_t) { return Status::OK(); }
  178. Status BatchOp::EoeReceived(int32_t) {
  179. state_ = OpState::kDeOpIdle;
  180. return Status::OK();
  181. }
  182. Status BatchOp::MapColumns(std::pair<std::unique_ptr<TensorQTable>, CBatchInfo> *table_pair) {
  183. TensorBatchTable input_table;
  184. input_table.reserve(input_column_names_.size());
  185. for (std::string col_name : input_column_names_) {
  186. if (column_name_map_.find(col_name) == column_name_map_.end()) {
  187. RETURN_STATUS_UNEXPECTED("column : '" + col_name + "' does not exist\n");
  188. }
  189. TensorBatch tensor_batch;
  190. tensor_batch.reserve(table_pair->first->size());
  191. size_t col_idx = static_cast<size_t>(column_name_map_[col_name]);
  192. for (size_t row_idx = 0; row_idx < table_pair->first->size(); row_idx++) {
  193. tensor_batch.push_back(std::move(table_pair->first->at(row_idx)[col_idx]));
  194. }
  195. input_table.push_back(std::move(tensor_batch));
  196. }
  197. // Perform batch map
  198. TensorBatchTable output_table;
  199. RETURN_IF_NOT_OK(InvokeBatchMapFunc(&input_table, &output_table, table_pair->second));
  200. // Write back to TensorQTable
  201. for (size_t input_idx = 0; input_idx < input_column_names_.size(); input_idx++) {
  202. size_t col_idx = static_cast<size_t>(column_name_map_[input_column_names_[input_idx]]);
  203. size_t row_id = 0;
  204. for (TensorRow &row : *(table_pair->first)) {
  205. row[col_idx] = std::move(output_table[input_idx][row_id++]);
  206. }
  207. }
  208. return Status::OK();
  209. }
  210. Status BatchOp::GetBatchSize(int32_t *batch_size, CBatchInfo info) {
  211. if (batch_size_func_ != nullptr) {
  212. RETURN_IF_NOT_OK(InvokeBatchSizeFunc(batch_size, info));
  213. } else {
  214. (*batch_size) = start_batch_size_;
  215. }
  216. return Status::OK();
  217. }
  218. Status BatchOp::InvokeBatchSizeFunc(int32_t *batch_size, CBatchInfo info) {
  219. {
  220. // Acquire Python GIL
  221. py::gil_scoped_acquire gil_acquire;
  222. if (Py_IsInitialized() == 0) {
  223. return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized");
  224. }
  225. try {
  226. py::object size = batch_size_func_(info);
  227. *batch_size = size.cast<int32_t>();
  228. if (*batch_size <= 0) {
  229. return Status(StatusCode::kPyFuncException, "Batch size function should return an integer > 0");
  230. }
  231. } catch (const py::error_already_set &e) {
  232. return Status(StatusCode::kPyFuncException, e.what());
  233. } catch (const py::cast_error &e) {
  234. return Status(StatusCode::kPyFuncException, "Batch size function should return an integer > 0");
  235. }
  236. }
  237. return Status(StatusCode::kOK, "Batch size func call succeed");
  238. }
  239. Status BatchOp::InvokeBatchMapFunc(TensorBatchTable *input, TensorBatchTable *output, CBatchInfo info) {
  240. {
  241. // Acquire Python GIL
  242. py::gil_scoped_acquire gil_acquire;
  243. if (Py_IsInitialized() == 0) {
  244. return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized");
  245. }
  246. try {
  247. // Prepare batch map call back parameters
  248. py::tuple input_args(input->size() + 1);
  249. for (size_t i = 0; i < input->size(); i++) {
  250. std::vector<py::array> np_batch;
  251. for (std::shared_ptr<Tensor> t : input->at(i)) {
  252. py::array np_array;
  253. RETURN_IF_NOT_OK(t->GetDataAsNumpy(&np_array));
  254. np_batch.push_back(std::move(np_array));
  255. }
  256. input_args[i] = np_batch;
  257. }
  258. input_args[input->size()] = info;
  259. // Invoke batch map func
  260. py::object ret_py_obj = batch_map_func_(*input_args);
  261. // Parse batch map return value
  262. py::tuple ret_tuple = py::cast<py::tuple>(ret_py_obj);
  263. if (ret_tuple.size() != input_column_names_.size() || !py::isinstance<py::tuple>(ret_tuple)) {
  264. return Status(StatusCode::kPyFuncException, "Batch map function should return an tuple if size(input_columns)");
  265. }
  266. for (size_t i = 0; i < ret_tuple.size(); i++) {
  267. TensorBatch output_batch;
  268. py::list output_list = py::cast<py::list>(ret_tuple[i]);
  269. for (size_t j = 0; j < output_list.size(); j++) {
  270. std::shared_ptr<Tensor> out;
  271. RETURN_IF_NOT_OK(Tensor::CreateTensor(&out, py::cast<py::array>(output_list[j])));
  272. output_batch.push_back(std::move(out));
  273. }
  274. output->push_back(std::move(output_batch));
  275. }
  276. } catch (const py::error_already_set &e) {
  277. return Status(StatusCode::kPyFuncException, e.what());
  278. } catch (const py::cast_error &e) {
  279. return Status(StatusCode::kPyFuncException, "Batch map function should return an tuple of list of numpy array");
  280. }
  281. }
  282. return Status(StatusCode::kOK);
  283. }
  284. } // namespace dataset
  285. } // namespace mindspore