Browse Source

!18173 [MD] Apply r1.2 codecheck fixes to master

Merge pull request !18173 from harshvardhangupta/codecheck_fix_master
tags/v1.3.0
i-robot Gitee 4 years ago
parent
commit
791d6ae0a2
5 changed files with 12 additions and 16 deletions
  1. +3
    -1
      mindspore/ccsrc/minddata/dataset/api/python/pybind_conversion.cc
  2. +8
    -8
      mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc
  3. +1
    -1
      mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/transfer_node.cc
  4. +0
    -3
      mindspore/dataset/engine/datasets.py
  5. +0
    -3
      mindspore/dataset/engine/iterators.py

+ 3
- 1
mindspore/ccsrc/minddata/dataset/api/python/pybind_conversion.cc View File

@@ -248,10 +248,12 @@ Status ToJson(const py::handle &padded_sample, nlohmann::json *const padded_samp
}

Status toPadInfo(py::dict value, std::map<std::string, std::pair<TensorShape, std::shared_ptr<Tensor>>> *pad_info) {
constexpr size_t kExpectedTupleSize = 2;
for (auto p : value) {
if (!p.second.is_none()) {
auto tp = py::reinterpret_borrow<py::tuple>(p.second);
CHECK_FAIL_RETURN_UNEXPECTED(tp.size() == 2, "tuple in pad_info must be (list,int) or (list,float)");
CHECK_FAIL_RETURN_UNEXPECTED(tp.size() == kExpectedTupleSize,
"tuple in pad_info must be (list,int) or (list,float)");
TensorShape shape = tp[0].is_none() ? TensorShape::CreateUnknownRankShape() : TensorShape(tp[0]);
std::shared_ptr<Tensor> pad_val = nullptr;
if (py::isinstance<py::str>(tp[1])) {


+ 8
- 8
mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc View File

@@ -205,9 +205,9 @@ Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, uint32_t col_
std::vector<int32_t> data;

// Iterate over the integer list and add those values to the output shape tensor
auto items = json_obj.items();
using it_type = decltype(items.begin());
(void)std::transform(items.begin(), items.end(), std::back_inserter(data), [](it_type j) { return j.value(); });
auto items1 = json_obj.items();
using it_type = decltype(items1.begin());
(void)std::transform(items1.begin(), items1.end(), std::back_inserter(data), [](it_type j) { return j.value(); });

RETURN_IF_NOT_OK(Tensor::CreateFromVector(data, &label));
} else {
@@ -234,9 +234,9 @@ Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, uint32_t co
std::vector<float> data;

// Iterate over the integer list and add those values to the output shape tensor
auto items = json_obj.items();
using it_type = decltype(items.begin());
(void)std::transform(items.begin(), items.end(), std::back_inserter(data), [](it_type j) { return j.value(); });
auto items1 = json_obj.items();
using it_type = decltype(items1.begin());
(void)std::transform(items1.begin(), items1.end(), std::back_inserter(data), [](it_type j) { return j.value(); });

RETURN_IF_NOT_OK(Tensor::CreateFromVector(data, &float_array));
} else {
@@ -282,8 +282,8 @@ Status AlbumOp::LoadFloatTensor(const nlohmann::json &json_obj, uint32_t col_num
MS_LOG(INFO) << "double found: " << json_obj << ".";
RETURN_IF_NOT_OK(Tensor::CreateScalar<double>(data, &float_tensor));
} else if (data_schema_->column(col_num).type() == DataType::DE_FLOAT32) {
float data = json_obj;
RETURN_IF_NOT_OK(Tensor::CreateScalar<float>(data, &float_tensor));
float data1 = json_obj;
RETURN_IF_NOT_OK(Tensor::CreateScalar<float>(data1, &float_tensor));
MS_LOG(INFO) << "float found: " << json_obj << ".";
}
row->push_back(std::move(float_tensor));


+ 1
- 1
mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/transfer_node.cc View File

@@ -33,7 +33,7 @@ namespace dataset {
// Constructor for TransferNode
TransferNode::TransferNode(std::shared_ptr<DatasetNode> child, std::string queue_name, std::string device_type,
int32_t device_id, bool send_epoch_end, int32_t total_batch, bool create_data_info_queue)
: prefetch_size_(16),
: prefetch_size_(GlobalContext::config_manager()->prefetch_size()),
queue_name_(std::move(queue_name)),
device_type_(std::move(device_type)),
send_epoch_end_(send_epoch_end),


+ 0
- 3
mindspore/dataset/engine/datasets.py View File

@@ -204,7 +204,6 @@ class Dataset:
self.num_parallel_workers = num_parallel_workers
self.cache = cache

# todo check the following:
self._device_iter = 0
self._input_indexs = ()
self.saved_output_types = None
@@ -2922,7 +2921,6 @@ class _ToDevice:
self._to_device.Init(ir_tree)
self._runtime_context.AssignConsumer(self._to_device)

# todo remove next when ContextManager is done
ITERATORS_LIST.append(weakref.ref(self))
_unset_iterator_cleanup()

@@ -4043,7 +4041,6 @@ class TFRecordDataset(SourceDataset):
shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
# todo push down to c++
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()



+ 0
- 3
mindspore/dataset/engine/iterators.py View File

@@ -86,10 +86,8 @@ class Iterator:
self._transform_tensor = lambda t: Tensor.from_numpy(t.as_array())
self.__index = 0

# todo remove next when ContextManager is done
ITERATORS_LIST.append(weakref.ref(self))
_unset_iterator_cleanup()
#######

def __iter__(self):
return self
@@ -184,7 +182,6 @@ class TupleIterator(Iterator):
if columns is not None:
if not isinstance(columns, list):
columns = [columns]
# todo: move next to IR
dataset = dataset.project(columns)
super().__init__(dataset, num_epochs, output_numpy, do_copy)



Loading…
Cancel
Save