| @@ -29,17 +29,21 @@ | |||
| #include "minddata/dataset/engine/datasetops/source/coco_op.h" | |||
| #include "minddata/dataset/engine/datasetops/source/csv_op.h" | |||
| #include "minddata/dataset/engine/datasetops/source/image_folder_op.h" | |||
| #ifndef ENABLE_ANDROID | |||
| #include "minddata/dataset/engine/datasetops/source/manifest_op.h" | |||
| #endif | |||
| #include "minddata/dataset/engine/datasetops/source/mnist_op.h" | |||
| #include "minddata/dataset/engine/datasetops/source/random_data_op.h" | |||
| #include "minddata/dataset/engine/datasetops/source/text_file_op.h" | |||
| #include "minddata/dataset/engine/datasetops/source/tf_reader_op.h" | |||
| #ifndef ENABLE_ANDROID | |||
| #include "minddata/dataset/engine/datasetops/source/tf_reader_op.h" | |||
| #include "minddata/dataset/engine/datasetops/source/voc_op.h" | |||
| #endif | |||
| // Dataset operator headers (in alphabetical order) | |||
| #include "minddata/dataset/engine/datasetops/batch_op.h" | |||
| #ifndef ENABLE_ANDROID | |||
| #include "minddata/dataset/engine/datasetops/bucket_batch_by_length_op.h" | |||
| #endif | |||
| #include "minddata/dataset/engine/datasetops/build_vocab_op.h" | |||
| #include "minddata/dataset/engine/datasetops/concat_op.h" | |||
| #include "minddata/dataset/engine/datasetops/map_op/map_op.h" | |||
| @@ -286,6 +290,7 @@ std::shared_ptr<BatchDataset> Dataset::Batch(int32_t batch_size, bool drop_remai | |||
| return ds; | |||
| } | |||
| #ifndef ENABLE_ANDROID | |||
| // Function to create a BucketBatchByLength dataset | |||
| std::shared_ptr<BucketBatchByLengthDataset> Dataset::BucketBatchByLength( | |||
| const std::vector<std::string> &column_names, const std::vector<int32_t> &bucket_boundaries, | |||
| @@ -305,7 +310,6 @@ std::shared_ptr<BucketBatchByLengthDataset> Dataset::BucketBatchByLength( | |||
| return ds; | |||
| } | |||
| #ifndef ENABLE_ANDROID | |||
| // Function to create a Vocab from dataset | |||
| std::shared_ptr<Vocab> Dataset::BuildVocab(const std::vector<std::string> &columns, | |||
| const std::pair<int64_t, int64_t> &freq_range, int64_t top_k, | |||
| @@ -1520,6 +1524,7 @@ std::vector<std::shared_ptr<DatasetOp>> TextFileDataset::Build() { | |||
| return node_ops; | |||
| } | |||
| #ifndef ENABLE_ANDROID | |||
| // Validator for TFRecordDataset | |||
| bool TFRecordDataset::ValidateParams() { return true; } | |||
| @@ -1570,7 +1575,6 @@ std::vector<std::shared_ptr<DatasetOp>> TFRecordDataset::Build() { | |||
| return node_ops; | |||
| } | |||
| #ifndef ENABLE_ANDROID | |||
| // Constructor for VOCDataset | |||
| VOCDataset::VOCDataset(const std::string &dataset_dir, const std::string &task, const std::string &usage, | |||
| const std::map<std::string, int32_t> &class_indexing, bool decode, | |||
| @@ -1685,6 +1689,7 @@ bool BatchDataset::ValidateParams() { | |||
| return true; | |||
| } | |||
| #ifndef ENABLE_ANDROID | |||
| BucketBatchByLengthDataset::BucketBatchByLengthDataset( | |||
| const std::vector<std::string> &column_names, const std::vector<int32_t> &bucket_boundaries, | |||
| const std::vector<int32_t> &bucket_batch_sizes, TensorRow (*element_length_function)(TensorRow), | |||
| @@ -1758,7 +1763,6 @@ bool BucketBatchByLengthDataset::ValidateParams() { | |||
| return true; | |||
| } | |||
| #ifndef ENABLE_ANDROID | |||
| BuildVocabDataset::BuildVocabDataset(std::shared_ptr<Vocab> vocab, const std::vector<std::string> &columns, | |||
| const std::pair<int64_t, int64_t> &freq_range, int64_t top_k, | |||
| const std::vector<std::string> &special_tokens, bool special_first) | |||
| @@ -271,8 +271,6 @@ class ExecutionTree { | |||
| std::unique_ptr<ProfilingManager> profiling_manager_; // Profiling manager | |||
| bool optimize_; // Flag to enable optional optimizations | |||
| }; | |||
| inline bool operator==(const ExecutionTree::Iterator &lhs, const ExecutionTree::Iterator &rhs) { return lhs == rhs; } | |||
| } // namespace dataset | |||
| } // namespace mindspore | |||
| @@ -62,7 +62,7 @@ class Cifar100Dataset; | |||
| class CLUEDataset; | |||
| class CocoDataset; | |||
| class CSVDataset; | |||
| struct CsvBase; | |||
| class CsvBase; | |||
| class ImageFolderDataset; | |||
| #ifndef ENABLE_ANDROID | |||
| class ManifestDataset; | |||
| @@ -70,14 +70,14 @@ class ManifestDataset; | |||
| class MnistDataset; | |||
| class RandomDataset; | |||
| class TextFileDataset; | |||
| class TFRecordDataset; | |||
| #ifndef ENABLE_ANDROID | |||
| class TFRecordDataset; | |||
| class VOCDataset; | |||
| #endif | |||
| // Dataset Op classes (in alphabetical order) | |||
| class BatchDataset; | |||
| class BucketBatchByLengthDataset; | |||
| #ifndef ENABLE_ANDROID | |||
| class BucketBatchByLengthDataset; | |||
| class BuildVocabDataset; | |||
| #endif | |||
| class ConcatDataset; | |||
| @@ -325,6 +325,7 @@ std::shared_ptr<TextFileDataset> TextFile(const std::vector<std::string> &datase | |||
| ShuffleMode shuffle = ShuffleMode::kGlobal, int32_t num_shards = 1, | |||
| int32_t shard_id = 0); | |||
| #ifndef ENABLE_ANDROID | |||
| /// \brief Function to create a TFRecordDataset | |||
| /// \param[in] dataset_files List of files to be read to search for a pattern of files. The list | |||
| /// will be sorted in a lexicographical order. | |||
| @@ -399,7 +400,6 @@ std::shared_ptr<TFRecordDataset> TFRecord(const std::vector<std::string> &datase | |||
| return ds; | |||
| } | |||
| #ifndef ENABLE_ANDROID | |||
| /// \brief Function to create a VOCDataset | |||
| /// \notes The generated dataset has multi-columns : | |||
| /// - task='Detection', column: [['image', dtype=uint8], ['bbox', dtype=float32], ['label', dtype=uint32], | |||
| @@ -481,6 +481,7 @@ class Dataset : public std::enable_shared_from_this<Dataset> { | |||
| /// \return Shared pointer to the current BatchDataset | |||
| std::shared_ptr<BatchDataset> Batch(int32_t batch_size, bool drop_remainder = false); | |||
| #ifndef ENABLE_ANDROID | |||
| /// \brief Function to create a BucketBatchByLengthDataset | |||
| /// \notes Combines batch_size number of consecutive rows into batches | |||
| /// \param[in] column_names Columns passed to element_length_function | |||
| @@ -510,7 +511,6 @@ class Dataset : public std::enable_shared_from_this<Dataset> { | |||
| const std::map<std::string, std::pair<TensorShape, std::shared_ptr<Tensor>>> &pad_info = {}, | |||
| bool pad_to_bucket_boundary = false, bool drop_remainder = false); | |||
| #ifndef ENABLE_ANDROID | |||
| /// \brief Function to create a Vocab from source dataset | |||
| /// \notes Build a vocab from a dataset. This would collect all the unique words in a dataset and return a vocab | |||
| /// which contains top_k most frequent words (if top_k is specified) | |||
| @@ -1150,6 +1150,7 @@ class BatchDataset : public Dataset { | |||
| std::map<std::string, std::pair<TensorShape, std::shared_ptr<Tensor>>> pad_map_; | |||
| }; | |||
| #ifndef ENABLE_ANDROID | |||
| class BucketBatchByLengthDataset : public Dataset { | |||
| public: | |||
| /// \brief Constructor | |||
| @@ -1180,7 +1181,6 @@ class BucketBatchByLengthDataset : public Dataset { | |||
| bool drop_remainder_; | |||
| }; | |||
| #ifndef ENABLE_ANDROID | |||
| class BuildVocabDataset : public Dataset { | |||
| public: | |||
| /// \brief Constructor | |||