From: @luoyang42 Reviewed-by: @liucunwei,@heleiwang Signed-off-by: @liucunweipull/14976/MERGE
| @@ -191,7 +191,7 @@ Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, | |||||
| } | } | ||||
| // Execute function for the example vector case: auto decode(new vision::Decode()); | // Execute function for the example vector case: auto decode(new vision::Decode()); | ||||
| Execute::Execute(std::vector<TensorTransform *> ops, MapTargetDevice deviceType, uint32_t device_id) { | |||||
| Execute::Execute(const std::vector<TensorTransform *> &ops, MapTargetDevice deviceType, uint32_t device_id) { | |||||
| // Initialize the transforms_ and other context | // Initialize the transforms_ and other context | ||||
| for (auto &op : ops) { | for (auto &op : ops) { | ||||
| std::shared_ptr<TensorTransform> smart_ptr_op(op); | std::shared_ptr<TensorTransform> smart_ptr_op(op); | ||||
| @@ -79,7 +79,7 @@ size_t DETensor::DataSize() const { | |||||
| } | } | ||||
| #endif | #endif | ||||
| EXCEPTION_IF_NULL(tensor_impl_); | EXCEPTION_IF_NULL(tensor_impl_); | ||||
| return tensor_impl_->SizeInBytes(); | |||||
| return static_cast<uint32_t>(tensor_impl_->SizeInBytes()); | |||||
| } | } | ||||
| const std::vector<int64_t> &DETensor::Shape() const { return shape_; } | const std::vector<int64_t> &DETensor::Shape() const { return shape_; } | ||||
| @@ -77,14 +77,6 @@ Status TransferNode::Build(std::vector<std::shared_ptr<DatasetOp>> *const node_o | |||||
| // https://gitee.com/mindspore/mindspore/issues/I39J9A | // https://gitee.com/mindspore/mindspore/issues/I39J9A | ||||
| // Link _c_expression.so and _c_dataengine.so simultaneously will cause heap overflow because MindData uses MSContext. | // Link _c_expression.so and _c_dataengine.so simultaneously will cause heap overflow because MindData uses MSContext. | ||||
| // We should find a new way to get device_type here. | // We should find a new way to get device_type here. | ||||
| // if (device_type_.empty()) { | |||||
| // auto context = MsContext::GetInstance(); | |||||
| // if (context == nullptr) { | |||||
| // device_type_ = kCPUDevice; | |||||
| // } else { | |||||
| // device_type_ = context->get_param<std::string>(MS_CTX_DEVICE_TARGET); | |||||
| // } | |||||
| // } | |||||
| if (device_type_.empty()) { | if (device_type_.empty()) { | ||||
| device_type_ = kCPUDevice; | device_type_ = kCPUDevice; | ||||
| } | } | ||||
| @@ -48,7 +48,7 @@ class Execute { | |||||
| MapTargetDevice deviceType = MapTargetDevice::kCpu, uint32_t device_id = 0); | MapTargetDevice deviceType = MapTargetDevice::kCpu, uint32_t device_id = 0); | ||||
| explicit Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, | explicit Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, | ||||
| MapTargetDevice deviceType = MapTargetDevice::kCpu, uint32_t device_id = 0); | MapTargetDevice deviceType = MapTargetDevice::kCpu, uint32_t device_id = 0); | ||||
| explicit Execute(std::vector<TensorTransform *> ops, MapTargetDevice deviceType = MapTargetDevice::kCpu, | |||||
| explicit Execute(const std::vector<TensorTransform *> &ops, MapTargetDevice deviceType = MapTargetDevice::kCpu, | |||||
| uint32_t device_id = 0); | uint32_t device_id = 0); | ||||
| /// \brief Destructor | /// \brief Destructor | ||||
| @@ -2004,7 +2004,6 @@ class BatchDataset(Dataset): | |||||
| def __del__(self): | def __del__(self): | ||||
| if hasattr(self, 'process_pool') and self.process_pool is not None: | if hasattr(self, 'process_pool') and self.process_pool is not None: | ||||
| logger.info("Batch process pool is being terminated.") | |||||
| self.process_pool.close() | self.process_pool.close() | ||||
| @@ -2398,7 +2397,6 @@ class MapDataset(Dataset): | |||||
| def __del__(self): | def __del__(self): | ||||
| if hasattr(self, 'process_pool') and self.process_pool is not None: | if hasattr(self, 'process_pool') and self.process_pool is not None: | ||||
| logger.info("Map process pool is being terminated.") | |||||
| self.process_pool.close() | self.process_pool.close() | ||||
| self.process_pool.join() | self.process_pool.join() | ||||
| @@ -3247,7 +3245,6 @@ class SamplerFn: | |||||
| def _subprocess_handle(eof, signum, frame): | def _subprocess_handle(eof, signum, frame): | ||||
| logger.info("The subprocess receives a termination signal.") | |||||
| eof.set() | eof.set() | ||||