From c24870604efeb3450f1e6a370cfd1c0fd487bd53 Mon Sep 17 00:00:00 2001 From: Eric Date: Thu, 17 Sep 2020 17:14:29 -0400 Subject: [PATCH] Added fix for arm 64 build --- mindspore/ccsrc/minddata/dataset/api/datasets.cc | 12 ++++++++---- .../ccsrc/minddata/dataset/engine/execution_tree.h | 2 -- mindspore/ccsrc/minddata/dataset/include/datasets.h | 12 ++++++------ 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/mindspore/ccsrc/minddata/dataset/api/datasets.cc b/mindspore/ccsrc/minddata/dataset/api/datasets.cc index 648d566935..cb7b5f287a 100644 --- a/mindspore/ccsrc/minddata/dataset/api/datasets.cc +++ b/mindspore/ccsrc/minddata/dataset/api/datasets.cc @@ -29,17 +29,21 @@ #include "minddata/dataset/engine/datasetops/source/coco_op.h" #include "minddata/dataset/engine/datasetops/source/csv_op.h" #include "minddata/dataset/engine/datasetops/source/image_folder_op.h" +#ifndef ENABLE_ANDROID #include "minddata/dataset/engine/datasetops/source/manifest_op.h" +#endif #include "minddata/dataset/engine/datasetops/source/mnist_op.h" #include "minddata/dataset/engine/datasetops/source/random_data_op.h" #include "minddata/dataset/engine/datasetops/source/text_file_op.h" -#include "minddata/dataset/engine/datasetops/source/tf_reader_op.h" #ifndef ENABLE_ANDROID +#include "minddata/dataset/engine/datasetops/source/tf_reader_op.h" #include "minddata/dataset/engine/datasetops/source/voc_op.h" #endif // Dataset operator headers (in alphabetical order) #include "minddata/dataset/engine/datasetops/batch_op.h" +#ifndef ENABLE_ANDROID #include "minddata/dataset/engine/datasetops/bucket_batch_by_length_op.h" +#endif #include "minddata/dataset/engine/datasetops/build_vocab_op.h" #include "minddata/dataset/engine/datasetops/concat_op.h" #include "minddata/dataset/engine/datasetops/map_op/map_op.h" @@ -286,6 +290,7 @@ std::shared_ptr Dataset::Batch(int32_t batch_size, bool drop_remai return ds; } +#ifndef ENABLE_ANDROID // Function to create a BucketBatchByLength dataset std::shared_ptr Dataset::BucketBatchByLength( const std::vector &column_names, const std::vector &bucket_boundaries, @@ -305,7 +310,6 @@ std::shared_ptr Dataset::BucketBatchByLength( return ds; } -#ifndef ENABLE_ANDROID // Function to create a Vocab from dataset std::shared_ptr Dataset::BuildVocab(const std::vector &columns, const std::pair &freq_range, int64_t top_k, @@ -1520,6 +1524,7 @@ std::vector> TextFileDataset::Build() { return node_ops; } +#ifndef ENABLE_ANDROID // Validator for TFRecordDataset bool TFRecordDataset::ValidateParams() { return true; } @@ -1570,7 +1575,6 @@ std::vector> TFRecordDataset::Build() { return node_ops; } -#ifndef ENABLE_ANDROID // Constructor for VOCDataset VOCDataset::VOCDataset(const std::string &dataset_dir, const std::string &task, const std::string &usage, const std::map &class_indexing, bool decode, @@ -1685,6 +1689,7 @@ bool BatchDataset::ValidateParams() { return true; } +#ifndef ENABLE_ANDROID BucketBatchByLengthDataset::BucketBatchByLengthDataset( const std::vector &column_names, const std::vector &bucket_boundaries, const std::vector &bucket_batch_sizes, TensorRow (*element_length_function)(TensorRow), @@ -1758,7 +1763,6 @@ bool BucketBatchByLengthDataset::ValidateParams() { return true; } -#ifndef ENABLE_ANDROID BuildVocabDataset::BuildVocabDataset(std::shared_ptr vocab, const std::vector &columns, const std::pair &freq_range, int64_t top_k, const std::vector &special_tokens, bool special_first) diff --git a/mindspore/ccsrc/minddata/dataset/engine/execution_tree.h b/mindspore/ccsrc/minddata/dataset/engine/execution_tree.h index 0525395c05..ed58b79a84 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/execution_tree.h +++ b/mindspore/ccsrc/minddata/dataset/engine/execution_tree.h @@ -271,8 +271,6 @@ class ExecutionTree { std::unique_ptr profiling_manager_; // Profiling manager bool optimize_; // Flag to enable optional optimizations }; - -inline bool operator==(const ExecutionTree::Iterator &lhs, const ExecutionTree::Iterator &rhs) { return lhs == rhs; } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/include/datasets.h b/mindspore/ccsrc/minddata/dataset/include/datasets.h index e9b6160240..965cec366d 100644 --- a/mindspore/ccsrc/minddata/dataset/include/datasets.h +++ b/mindspore/ccsrc/minddata/dataset/include/datasets.h @@ -62,7 +62,7 @@ class Cifar100Dataset; class CLUEDataset; class CocoDataset; class CSVDataset; -struct CsvBase; +class CsvBase; class ImageFolderDataset; #ifndef ENABLE_ANDROID class ManifestDataset; @@ -70,14 +70,14 @@ class ManifestDataset; class MnistDataset; class RandomDataset; class TextFileDataset; -class TFRecordDataset; #ifndef ENABLE_ANDROID +class TFRecordDataset; class VOCDataset; #endif // Dataset Op classes (in alphabetical order) class BatchDataset; -class BucketBatchByLengthDataset; #ifndef ENABLE_ANDROID +class BucketBatchByLengthDataset; class BuildVocabDataset; #endif class ConcatDataset; @@ -325,6 +325,7 @@ std::shared_ptr TextFile(const std::vector &datase ShuffleMode shuffle = ShuffleMode::kGlobal, int32_t num_shards = 1, int32_t shard_id = 0); +#ifndef ENABLE_ANDROID /// \brief Function to create a TFRecordDataset /// \param[in] dataset_files List of files to be read to search for a pattern of files. The list /// will be sorted in a lexicographical order. @@ -399,7 +400,6 @@ std::shared_ptr TFRecord(const std::vector &datase return ds; } -#ifndef ENABLE_ANDROID /// \brief Function to create a VOCDataset /// \notes The generated dataset has multi-columns : /// - task='Detection', column: [['image', dtype=uint8], ['bbox', dtype=float32], ['label', dtype=uint32], @@ -481,6 +481,7 @@ class Dataset : public std::enable_shared_from_this { /// \return Shared pointer to the current BatchDataset std::shared_ptr Batch(int32_t batch_size, bool drop_remainder = false); +#ifndef ENABLE_ANDROID /// \brief Function to create a BucketBatchByLengthDataset /// \notes Combines batch_size number of consecutive rows into batches /// \param[in] column_names Columns passed to element_length_function @@ -510,7 +511,6 @@ class Dataset : public std::enable_shared_from_this { const std::map>> &pad_info = {}, bool pad_to_bucket_boundary = false, bool drop_remainder = false); -#ifndef ENABLE_ANDROID /// \brief Function to create a Vocab from source dataset /// \notes Build a vocab from a dataset. This would collect all the unique words in a dataset and return a vocab /// which contains top_k most frequent words (if top_k is specified) @@ -1150,6 +1150,7 @@ class BatchDataset : public Dataset { std::map>> pad_map_; }; +#ifndef ENABLE_ANDROID class BucketBatchByLengthDataset : public Dataset { public: /// \brief Constructor @@ -1180,7 +1181,6 @@ class BucketBatchByLengthDataset : public Dataset { bool drop_remainder_; }; -#ifndef ENABLE_ANDROID class BuildVocabDataset : public Dataset { public: /// \brief Constructor