From: @ezphlow Reviewed-by: @nsyca,@robingrosman Signed-off-by: @robingrosmantags/v1.2.0-rc1
| @@ -31,6 +31,9 @@ namespace dataset { | |||||
| Execute::Execute(std::shared_ptr<TensorOperation> op) : op_(std::move(op)) {} | Execute::Execute(std::shared_ptr<TensorOperation> op) : op_(std::move(op)) {} | ||||
| /// \brief Destructor | |||||
| Execute::~Execute() = default; | |||||
| #ifdef ENABLE_ANDROID | #ifdef ENABLE_ANDROID | ||||
| std::shared_ptr<tensor::MSTensor> Execute::operator()(std::shared_ptr<tensor::MSTensor> input) { | std::shared_ptr<tensor::MSTensor> Execute::operator()(std::shared_ptr<tensor::MSTensor> input) { | ||||
| // Build the op | // Build the op | ||||
| @@ -53,37 +53,37 @@ PYBIND_REGISTER(TreeGetters, 1, ([](const py::module *m) { | |||||
| [](PythonTreeGetters &self, std::shared_ptr<DatasetNode> d) { THROW_IF_ERROR(self.Init(d)); }) | [](PythonTreeGetters &self, std::shared_ptr<DatasetNode> d) { THROW_IF_ERROR(self.Init(d)); }) | ||||
| .def("GetOutputShapes", | .def("GetOutputShapes", | ||||
| [](PythonTreeGetters &self) { | [](PythonTreeGetters &self) { | ||||
| std::vector<TensorShape> shapes; | |||||
| std::vector<TensorShape> shapes = {}; | |||||
| THROW_IF_ERROR(self.GetOutputShapes(&shapes)); | THROW_IF_ERROR(self.GetOutputShapes(&shapes)); | ||||
| return shapesToListOfShape(shapes); | return shapesToListOfShape(shapes); | ||||
| }) | }) | ||||
| .def("GetOutputTypes", | .def("GetOutputTypes", | ||||
| [](PythonTreeGetters &self) { | [](PythonTreeGetters &self) { | ||||
| std::vector<DataType> types; | |||||
| std::vector<DataType> types = {}; | |||||
| THROW_IF_ERROR(self.GetOutputTypes(&types)); | THROW_IF_ERROR(self.GetOutputTypes(&types)); | ||||
| return typesToListOfType(types); | return typesToListOfType(types); | ||||
| }) | }) | ||||
| .def("GetNumClasses", | .def("GetNumClasses", | ||||
| [](PythonTreeGetters &self) { | [](PythonTreeGetters &self) { | ||||
| int64_t num_classes; | |||||
| int64_t num_classes = -1; | |||||
| THROW_IF_ERROR(self.GetNumClasses(&num_classes)); | THROW_IF_ERROR(self.GetNumClasses(&num_classes)); | ||||
| return num_classes; | return num_classes; | ||||
| }) | }) | ||||
| .def("GetRepeatCount", | .def("GetRepeatCount", | ||||
| [](PythonTreeGetters &self) { | [](PythonTreeGetters &self) { | ||||
| int64_t repeat_count; | |||||
| int64_t repeat_count = -1; | |||||
| THROW_IF_ERROR(self.GetRepeatCount(&repeat_count)); | THROW_IF_ERROR(self.GetRepeatCount(&repeat_count)); | ||||
| return repeat_count; | return repeat_count; | ||||
| }) | }) | ||||
| .def("GetBatchSize", | .def("GetBatchSize", | ||||
| [](PythonTreeGetters &self) { | [](PythonTreeGetters &self) { | ||||
| int64_t batch_size; | |||||
| int64_t batch_size = -1; | |||||
| THROW_IF_ERROR(self.GetBatchSize(&batch_size)); | THROW_IF_ERROR(self.GetBatchSize(&batch_size)); | ||||
| return batch_size; | return batch_size; | ||||
| }) | }) | ||||
| .def("GetColumnNames", | .def("GetColumnNames", | ||||
| [](PythonTreeGetters &self) { | [](PythonTreeGetters &self) { | ||||
| std::vector<std::string> col_names; | |||||
| std::vector<std::string> col_names = {}; | |||||
| THROW_IF_ERROR(self.GetColumnNames(&col_names)); | THROW_IF_ERROR(self.GetColumnNames(&col_names)); | ||||
| return col_names; | return col_names; | ||||
| }) | }) | ||||
| @@ -202,7 +202,7 @@ std::vector<std::shared_ptr<CsvBase>> toCSVBase(py::list csv_bases) { | |||||
| return vector; | return vector; | ||||
| } | } | ||||
| Status ToJson(const py::handle &padded_sample, nlohmann::json *padded_sample_json, | |||||
| Status ToJson(const py::handle &padded_sample, nlohmann::json *const padded_sample_json, | |||||
| std::map<std::string, std::string> *sample_bytes) { | std::map<std::string, std::string> *sample_bytes) { | ||||
| for (const py::handle &key : padded_sample) { | for (const py::handle &key : padded_sample) { | ||||
| if (py::isinstance<py::bytes>(padded_sample[key])) { | if (py::isinstance<py::bytes>(padded_sample[key])) { | ||||
| @@ -73,7 +73,7 @@ std::vector<std::shared_ptr<CsvBase>> toCSVBase(py::list csv_bases); | |||||
| std::shared_ptr<TensorOp> toPyFuncOp(py::object func, DataType::Type data_type); | std::shared_ptr<TensorOp> toPyFuncOp(py::object func, DataType::Type data_type); | ||||
| Status ToJson(const py::handle &padded_sample, nlohmann::json *padded_sample_json, | |||||
| Status ToJson(const py::handle &padded_sample, nlohmann::json *const padded_sample_json, | |||||
| std::map<std::string, std::string> *sample_bytes); | std::map<std::string, std::string> *sample_bytes); | ||||
| Status toPadInfo(py::dict value, std::map<std::string, std::pair<TensorShape, std::shared_ptr<Tensor>>> *pad_info); | Status toPadInfo(py::dict value, std::map<std::string, std::pair<TensorShape, std::shared_ptr<Tensor>>> *pad_info); | ||||
| @@ -61,7 +61,7 @@ Status PythonSaveToDisk::Save() { | |||||
| PythonSaveToDisk::PythonSaveToDisk(const std::string &datasetPath, int32_t numFiles, const std::string &datasetType) | PythonSaveToDisk::PythonSaveToDisk(const std::string &datasetPath, int32_t numFiles, const std::string &datasetType) | ||||
| : SaveToDisk(datasetPath, numFiles, datasetType) {} | : SaveToDisk(datasetPath, numFiles, datasetType) {} | ||||
| Status PythonTreeGetters::GetRow(TensorRow *r) { | |||||
| Status PythonTreeGetters::GetRow(TensorRow *const r) { | |||||
| py::gil_scoped_release gil_release; | py::gil_scoped_release gil_release; | ||||
| return TreeGetters::GetRow(r); | return TreeGetters::GetRow(r); | ||||
| } | } | ||||
| @@ -53,16 +53,19 @@ class PythonBuildVocabConsumer : public BuildVocabConsumer { | |||||
| class PythonSaveToDisk : public SaveToDisk { | class PythonSaveToDisk : public SaveToDisk { | ||||
| public: | public: | ||||
| PythonSaveToDisk(const std::string &datasetPath, int32_t numFiles, const std::string &datasetType); | PythonSaveToDisk(const std::string &datasetPath, int32_t numFiles, const std::string &datasetType); | ||||
| ~PythonSaveToDisk() = default; | |||||
| Status Save() override; | Status Save() override; | ||||
| }; | }; | ||||
| class PythonTreeGetters : public TreeGetters { | class PythonTreeGetters : public TreeGetters { | ||||
| public: | public: | ||||
| Status GetRow(TensorRow *r) override; | |||||
| Status GetRow(TensorRow *const r) override; | |||||
| ~PythonTreeGetters() = default; | |||||
| }; | }; | ||||
| class PythonDatasetSizeGetter : public DatasetSizeGetter { | class PythonDatasetSizeGetter : public DatasetSizeGetter { | ||||
| public: | public: | ||||
| Status GetRow(const std::shared_ptr<TreeAdapter> &tree_adapter, TensorRow *r) override; | Status GetRow(const std::shared_ptr<TreeAdapter> &tree_adapter, TensorRow *r) override; | ||||
| ~PythonDatasetSizeGetter() = default; | |||||
| }; | }; | ||||
| } // namespace mindspore::dataset | } // namespace mindspore::dataset | ||||
| #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CONSUMERS_PYTHON_TREE_CONSUMER_H_ | #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CONSUMERS_PYTHON_TREE_CONSUMER_H_ | ||||
| @@ -62,7 +62,7 @@ Status IteratorConsumer::GetNextAsVector(std::vector<TensorPtr> *out) { | |||||
| return Status::OK(); | return Status::OK(); | ||||
| } | } | ||||
| Status IteratorConsumer::GetNextAsMap(std::unordered_map<std::string, TensorPtr> *out_map) { | |||||
| Status IteratorConsumer::GetNextAsMap(std::unordered_map<std::string, TensorPtr> *const out_map) { | |||||
| RETURN_UNEXPECTED_IF_NULL(out_map); | RETURN_UNEXPECTED_IF_NULL(out_map); | ||||
| out_map->clear(); | out_map->clear(); | ||||
| @@ -79,7 +79,7 @@ Status IteratorConsumer::GetNextAsMap(std::unordered_map<std::string, TensorPtr> | |||||
| return Status::OK(); | return Status::OK(); | ||||
| } | } | ||||
| Status IteratorConsumer::GetNextAsOrderedPair(std::vector<std::pair<std::string, std::shared_ptr<Tensor>>> *vec) { | |||||
| Status IteratorConsumer::GetNextAsOrderedPair(std::vector<std::pair<std::string, std::shared_ptr<Tensor>>> *const vec) { | |||||
| CHECK_FAIL_RETURN_UNEXPECTED(vec != nullptr && vec->empty(), "vec is null or non-empty."); | CHECK_FAIL_RETURN_UNEXPECTED(vec != nullptr && vec->empty(), "vec is null or non-empty."); | ||||
| TensorRow curr_row; | TensorRow curr_row; | ||||
| @@ -142,7 +142,7 @@ Status ToDevice::Stop() { | |||||
| return Status::OK(); | return Status::OK(); | ||||
| } | } | ||||
| Status ToDevice::GetDataInfo(std::vector<DataType> *types, std::vector<TensorShape> *shapes) { | |||||
| Status ToDevice::GetDataInfo(std::vector<DataType> *const types, std::vector<TensorShape> *const shapes) { | |||||
| // tree_.root() must be DeviceQueueOp | // tree_.root() must be DeviceQueueOp | ||||
| std::shared_ptr<DatasetOp> root = std::shared_ptr<DatasetOp>(tree_adapter_->GetRoot()); | std::shared_ptr<DatasetOp> root = std::shared_ptr<DatasetOp>(tree_adapter_->GetRoot()); | ||||
| CHECK_FAIL_RETURN_UNEXPECTED(root != nullptr, "Root is a nullptr."); | CHECK_FAIL_RETURN_UNEXPECTED(root != nullptr, "Root is a nullptr."); | ||||
| @@ -72,12 +72,12 @@ class IteratorConsumer : public TreeConsumer { | |||||
| /// Returns the next row in as a map | /// Returns the next row in as a map | ||||
| /// \param[out] out std::map of string to Tensor | /// \param[out] out std::map of string to Tensor | ||||
| /// \return Status error code | /// \return Status error code | ||||
| Status GetNextAsMap(std::unordered_map<std::string, TensorPtr> *out); | |||||
| Status GetNextAsMap(std::unordered_map<std::string, TensorPtr> *const out); | |||||
| /// Returns the next row in as a vector | /// Returns the next row in as a vector | ||||
| /// \param[out] out std::vector of pairs of string to Tensor | /// \param[out] out std::vector of pairs of string to Tensor | ||||
| /// \return Status error code | /// \return Status error code | ||||
| Status GetNextAsOrderedPair(std::vector<std::pair<std::string, std::shared_ptr<Tensor>>> *vec); | |||||
| Status GetNextAsOrderedPair(std::vector<std::pair<std::string, std::shared_ptr<Tensor>>> *const vec); | |||||
| protected: | protected: | ||||
| /// Method to return the name of the consumer | /// Method to return the name of the consumer | ||||
| @@ -161,7 +161,7 @@ class ToDevice : public TreeConsumer { | |||||
| /// Get data info from TDT | /// Get data info from TDT | ||||
| /// \return Status error code | /// \return Status error code | ||||
| virtual Status GetDataInfo(std::vector<DataType> *types, std::vector<TensorShape> *shapes); | |||||
| virtual Status GetDataInfo(std::vector<DataType> *const types, std::vector<TensorShape> *const shapes); | |||||
| protected: | protected: | ||||
| /// Method to return the name of the consumer | /// Method to return the name of the consumer | ||||
| @@ -27,7 +27,7 @@ Status PreBuiltDatasetCache::Build() { | |||||
| return Status::OK(); | return Status::OK(); | ||||
| } | } | ||||
| Status PreBuiltDatasetCache::CreateCacheOp(int32_t num_workers, std::shared_ptr<DatasetOp> *ds) { | |||||
| Status PreBuiltDatasetCache::CreateCacheOp(int32_t num_workers, std::shared_ptr<DatasetOp> *const ds) { | |||||
| CHECK_FAIL_RETURN_UNEXPECTED(cache_client_ != nullptr, "Cache client has not been created yet."); | CHECK_FAIL_RETURN_UNEXPECTED(cache_client_ != nullptr, "Cache client has not been created yet."); | ||||
| std::shared_ptr<CacheOp> cache_op = nullptr; | std::shared_ptr<CacheOp> cache_op = nullptr; | ||||
| RETURN_IF_NOT_OK(CacheOp::Builder().SetNumWorkers(num_workers).SetClient(cache_client_).Build(&cache_op)); | RETURN_IF_NOT_OK(CacheOp::Builder().SetNumWorkers(num_workers).SetClient(cache_client_).Build(&cache_op)); | ||||
| @@ -32,11 +32,13 @@ class PreBuiltDatasetCache : public DatasetCache { | |||||
| /// \param cc a pre-built cache client | /// \param cc a pre-built cache client | ||||
| explicit PreBuiltDatasetCache(std::shared_ptr<CacheClient> cc) : cache_client_(std::move(cc)) {} | explicit PreBuiltDatasetCache(std::shared_ptr<CacheClient> cc) : cache_client_(std::move(cc)) {} | ||||
| ~PreBuiltDatasetCache() = default; | |||||
| /// Method to initialize the DatasetCache by creating an instance of a CacheClient | /// Method to initialize the DatasetCache by creating an instance of a CacheClient | ||||
| /// \return Status Error code | /// \return Status Error code | ||||
| Status Build() override; | Status Build() override; | ||||
| Status CreateCacheOp(int32_t num_workers, std::shared_ptr<DatasetOp> *ds) override; | |||||
| Status CreateCacheOp(int32_t num_workers, std::shared_ptr<DatasetOp> *const ds) override; | |||||
| Status ValidateParams() override { return Status::OK(); } | Status ValidateParams() override { return Status::OK(); } | ||||
| @@ -388,7 +388,7 @@ Status DatasetNode::AcceptAfter(IRNodePass *const p, bool *const modified) { | |||||
| return p->VisitAfter(shared_from_this(), modified); | return p->VisitAfter(shared_from_this(), modified); | ||||
| } | } | ||||
| Status DatasetNode::GetShardId(int32_t *shard_id) { | |||||
| Status DatasetNode::GetShardId(int32_t *const shard_id) { | |||||
| if (!Children().empty()) { | if (!Children().empty()) { | ||||
| // Get shard id from the child node | // Get shard id from the child node | ||||
| return Children()[0]->GetShardId(shard_id); | return Children()[0]->GetShardId(shard_id); | ||||
| @@ -169,7 +169,7 @@ class DatasetNode : public std::enable_shared_from_this<DatasetNode> { | |||||
| /// \brief Pure virtual function for derived class to get the shard id of specific node | /// \brief Pure virtual function for derived class to get the shard id of specific node | ||||
| /// \return Status Status::OK() if get shard id successfully | /// \return Status Status::OK() if get shard id successfully | ||||
| virtual Status GetShardId(int32_t *shard_id); | |||||
| virtual Status GetShardId(int32_t *const shard_id); | |||||
| /// \brief Gets the dataset size | /// \brief Gets the dataset size | ||||
| /// \param[in] size_getter Shared pointer to DatasetSizeGetter | /// \param[in] size_getter Shared pointer to DatasetSizeGetter | ||||
| @@ -38,6 +38,9 @@ class Execute { | |||||
| /// \brief Constructor | /// \brief Constructor | ||||
| explicit Execute(std::shared_ptr<TensorOperation> op); | explicit Execute(std::shared_ptr<TensorOperation> op); | ||||
| /// \brief Destructor | |||||
| ~Execute(); | |||||
| #ifdef ENABLE_ANDROID | #ifdef ENABLE_ANDROID | ||||
| /// \brief callable function to execute the TensorOperation in eager mode | /// \brief callable function to execute the TensorOperation in eager mode | ||||
| /// \param[inout] input - the tensor to be transformed | /// \param[inout] input - the tensor to be transformed | ||||
| @@ -220,7 +220,7 @@ static Status JpegReadScanlines(jpeg_decompress_struct *const cinfo, int max_sca | |||||
| } | } | ||||
| if (cinfo->out_color_space == JCS_CMYK && num_lines_read > 0) { | if (cinfo->out_color_space == JCS_CMYK && num_lines_read > 0) { | ||||
| for (int i = 0; i < crop_w; ++i) { | for (int i = 0; i < crop_w; ++i) { | ||||
| int cmyk_pixel = 4 * i + offset; | |||||
| const int cmyk_pixel = 4 * i + offset; | |||||
| const int c = scanline_ptr[cmyk_pixel]; | const int c = scanline_ptr[cmyk_pixel]; | ||||
| const int m = scanline_ptr[cmyk_pixel + 1]; | const int m = scanline_ptr[cmyk_pixel + 1]; | ||||
| const int y = scanline_ptr[cmyk_pixel + 2]; | const int y = scanline_ptr[cmyk_pixel + 2]; | ||||
| @@ -99,7 +99,7 @@ static Status JpegReadScanlines(jpeg_decompress_struct *const cinfo, int max_sca | |||||
| } | } | ||||
| if (cinfo->out_color_space == JCS_CMYK && num_lines_read > 0) { | if (cinfo->out_color_space == JCS_CMYK && num_lines_read > 0) { | ||||
| for (int i = 0; i < crop_w; ++i) { | for (int i = 0; i < crop_w; ++i) { | ||||
| int cmyk_pixel = 4 * i + offset; | |||||
| const int cmyk_pixel = 4 * i + offset; | |||||
| const int c = scanline_ptr[cmyk_pixel]; | const int c = scanline_ptr[cmyk_pixel]; | ||||
| const int m = scanline_ptr[cmyk_pixel + 1]; | const int m = scanline_ptr[cmyk_pixel + 1]; | ||||
| const int y = scanline_ptr[cmyk_pixel + 2]; | const int y = scanline_ptr[cmyk_pixel + 2]; | ||||
| @@ -119,7 +119,7 @@ static Status JpegReadScanlines(jpeg_decompress_struct *const cinfo, int max_sca | |||||
| buffer[3 * i + 2] = b; | buffer[3 * i + 2] = b; | ||||
| } | } | ||||
| } else if (num_lines_read > 0) { | } else if (num_lines_read > 0) { | ||||
| int copy_status = memcpy_s(buffer, buffer_size, scanline_ptr + offset, stride); | |||||
| auto copy_status = memcpy_s(buffer, buffer_size, scanline_ptr + offset, stride); | |||||
| if (copy_status != 0) { | if (copy_status != 0) { | ||||
| jpeg_destroy_decompress(cinfo); | jpeg_destroy_decompress(cinfo); | ||||
| RETURN_STATUS_UNEXPECTED("memcpy failed"); | RETURN_STATUS_UNEXPECTED("memcpy failed"); | ||||