Merge pull request !2900 from islam_amin/object_ops_cc_floattags/v0.6.0-beta
| @@ -60,7 +60,7 @@ | |||
| #include "dataset/kernels/image/random_crop_decode_resize_op.h" | |||
| #include "dataset/kernels/image/random_crop_op.h" | |||
| #include "dataset/kernels/image/random_crop_with_bbox_op.h" | |||
| #include "dataset/kernels/image/random_horizontal_flip_bbox_op.h" | |||
| #include "dataset/kernels/image/random_horizontal_flip_with_bbox_op.h" | |||
| #include "dataset/kernels/image/random_horizontal_flip_op.h" | |||
| #include "dataset/kernels/image/random_resize_op.h" | |||
| #include "dataset/kernels/image/random_resize_with_bbox_op.h" | |||
| @@ -15,7 +15,7 @@ add_library(kernels-image OBJECT | |||
| random_crop_op.cc | |||
| random_crop_with_bbox_op.cc | |||
| random_horizontal_flip_op.cc | |||
| random_horizontal_flip_bbox_op.cc | |||
| random_horizontal_flip_with_bbox_op.cc | |||
| bounding_box_augment_op.cc | |||
| random_resize_op.cc | |||
| random_rotation_op.cc | |||
| @@ -43,28 +43,29 @@ Status BoundingBoxAugmentOp::Compute(const TensorRow &input, TensorRow *output) | |||
| std::shared_ptr<Tensor> crop_out; | |||
| std::shared_ptr<Tensor> res_out; | |||
| std::shared_ptr<CVTensor> input_restore = CVTensor::AsCVTensor(input[0]); | |||
| for (uint32_t i = 0; i < num_to_aug; i++) { | |||
| uint32_t min_x = 0; | |||
| uint32_t min_y = 0; | |||
| uint32_t b_w = 0; | |||
| uint32_t b_h = 0; | |||
| float min_x = 0; | |||
| float min_y = 0; | |||
| float b_w = 0; | |||
| float b_h = 0; | |||
| // get the required items | |||
| input[1]->GetItemAt<uint32_t>(&min_x, {selected_boxes[i], 0}); | |||
| input[1]->GetItemAt<uint32_t>(&min_y, {selected_boxes[i], 1}); | |||
| input[1]->GetItemAt<uint32_t>(&b_w, {selected_boxes[i], 2}); | |||
| input[1]->GetItemAt<uint32_t>(&b_h, {selected_boxes[i], 3}); | |||
| Crop(input_restore, &crop_out, min_x, min_y, b_w, b_h); | |||
| RETURN_IF_NOT_OK(input[1]->GetItemAt<float>(&min_x, {selected_boxes[i], 0})); | |||
| RETURN_IF_NOT_OK(input[1]->GetItemAt<float>(&min_y, {selected_boxes[i], 1})); | |||
| RETURN_IF_NOT_OK(input[1]->GetItemAt<float>(&b_w, {selected_boxes[i], 2})); | |||
| RETURN_IF_NOT_OK(input[1]->GetItemAt<float>(&b_h, {selected_boxes[i], 3})); | |||
| RETURN_IF_NOT_OK(Crop(input_restore, &crop_out, static_cast<int>(min_x), static_cast<int>(min_y), | |||
| static_cast<int>(b_w), static_cast<int>(b_h))); | |||
| // transform the cropped bbox region | |||
| transform_->Compute(crop_out, &res_out); | |||
| RETURN_IF_NOT_OK(transform_->Compute(crop_out, &res_out)); | |||
| // place the transformed region back in the restored input | |||
| std::shared_ptr<CVTensor> res_img = CVTensor::AsCVTensor(res_out); | |||
| // check if transformed crop is out of bounds of the box | |||
| if (res_img->mat().cols > b_w || res_img->mat().rows > b_h || res_img->mat().cols < b_w || | |||
| res_img->mat().rows < b_h) { | |||
| // if so, resize to fit in the box | |||
| std::shared_ptr<TensorOp> resize_op = std::make_shared<ResizeOp>(b_h, b_w); | |||
| resize_op->Compute(std::static_pointer_cast<Tensor>(res_img), &res_out); | |||
| std::shared_ptr<TensorOp> resize_op = | |||
| std::make_shared<ResizeOp>(static_cast<int32_t>(b_h), static_cast<int32_t>(b_w)); | |||
| RETURN_IF_NOT_OK(resize_op->Compute(std::static_pointer_cast<Tensor>(res_img), &res_out)); | |||
| res_img = CVTensor::AsCVTensor(res_out); | |||
| } | |||
| res_img->mat().copyTo(input_restore->mat()(cv::Rect(min_x, min_y, res_img->mat().cols, res_img->mat().rows))); | |||
| @@ -14,7 +14,7 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include <utility> | |||
| #include "dataset/kernels/image/random_horizontal_flip_bbox_op.h" | |||
| #include "dataset/kernels/image/random_horizontal_flip_with_bbox_op.h" | |||
| #include "dataset/kernels/image/image_utils.h" | |||
| #include "dataset/util/status.h" | |||
| #include "dataset/core/cv_tensor.h" | |||
| @@ -31,21 +31,19 @@ Status RandomHorizontalFlipWithBBoxOp::Compute(const TensorRow &input, TensorRow | |||
| // To test bounding boxes algorithm, create random bboxes from image dims | |||
| size_t num_of_boxes = input[1]->shape()[0]; // set to give number of bboxes | |||
| float img_center = (input[0]->shape()[1] / 2.); // get the center of the image | |||
| for (int i = 0; i < num_of_boxes; i++) { | |||
| uint32_t b_w = 0; // bounding box width | |||
| uint32_t min_x = 0; | |||
| float b_w = 0; // bounding box width | |||
| float min_x = 0; | |||
| // get the required items | |||
| input[1]->GetItemAt<uint32_t>(&min_x, {i, 0}); | |||
| input[1]->GetItemAt<uint32_t>(&b_w, {i, 2}); | |||
| RETURN_IF_NOT_OK(input[1]->GetItemAt<float>(&min_x, {i, 0})); | |||
| RETURN_IF_NOT_OK(input[1]->GetItemAt<float>(&b_w, {i, 2})); | |||
| // do the flip | |||
| float diff = img_center - min_x; // get distance from min_x to center | |||
| uint32_t refl_min_x = diff + img_center; // get reflection of min_x | |||
| uint32_t new_min_x = refl_min_x - b_w; // subtract from the reflected min_x to get the new one | |||
| input[1]->SetItemAt<uint32_t>({i, 0}, new_min_x); | |||
| float diff = img_center - min_x; // get distance from min_x to center | |||
| float refl_min_x = diff + img_center; // get reflection of min_x | |||
| float new_min_x = refl_min_x - b_w; // subtract from the reflected min_x to get the new one | |||
| RETURN_IF_NOT_OK(input[1]->SetItemAt<float>({i, 0}, new_min_x)); | |||
| } | |||
| (*output).push_back(nullptr); | |||
| (*output).push_back(nullptr); | |||
| (*output).resize(2); | |||
| // move input to output pointer of bounding boxes | |||
| (*output)[1] = std::move(input[1]); | |||
| // perform HorizontalFlip on the image | |||
| @@ -55,6 +53,5 @@ Status RandomHorizontalFlipWithBBoxOp::Compute(const TensorRow &input, TensorRow | |||
| *output = input; | |||
| return Status::OK(); | |||
| } | |||
| } // namespace dataset | |||
| } // namespace mindspore | |||
| @@ -0,0 +1,274 @@ | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================== | |||
| """ | |||
| Testing the bounding box augment op in DE | |||
| """ | |||
| from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \ | |||
| config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5 | |||
| import numpy as np | |||
| import mindspore.log as logger | |||
| import mindspore.dataset as ds | |||
| import mindspore.dataset.transforms.vision.c_transforms as c_vision | |||
| GENERATE_GOLDEN = False | |||
| # updated VOC dataset with correct annotations | |||
| DATA_DIR = "../data/dataset/testVOC2012_2" | |||
| DATA_DIR_2 = ["../data/dataset/testCOCO/train/", | |||
| "../data/dataset/testCOCO/annotations/train.json"] # DATA_DIR, ANNOTATION_DIR | |||
| def test_bounding_box_augment_with_rotation_op(plot_vis=False): | |||
| """ | |||
| Test BoundingBoxAugment op (passing rotation op as transform) | |||
| Prints images side by side with and without Aug applied + bboxes to compare and test | |||
| """ | |||
| logger.info("test_bounding_box_augment_with_rotation_op") | |||
| original_seed = config_get_set_seed(0) | |||
| original_num_parallel_workers = config_get_set_num_parallel_workers(1) | |||
| dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| # Ratio is set to 1 to apply rotation on all bounding boxes. | |||
| test_op = c_vision.BoundingBoxAugment(c_vision.RandomRotation(90), 1) | |||
| # map to apply ops | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=[test_op]) | |||
| filename = "bounding_box_augment_rotation_c_result.npz" | |||
| save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) | |||
| unaugSamp, augSamp = [], [] | |||
| for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): | |||
| unaugSamp.append(unAug) | |||
| augSamp.append(Aug) | |||
| if plot_vis: | |||
| visualize_with_bounding_boxes(unaugSamp, augSamp) | |||
| # Restore config setting | |||
| ds.config.set_seed(original_seed) | |||
| ds.config.set_num_parallel_workers(original_num_parallel_workers) | |||
| def test_bounding_box_augment_with_crop_op(plot_vis=False): | |||
| """ | |||
| Test BoundingBoxAugment op (passing crop op as transform) | |||
| Prints images side by side with and without Aug applied + bboxes to compare and test | |||
| """ | |||
| logger.info("test_bounding_box_augment_with_crop_op") | |||
| original_seed = config_get_set_seed(0) | |||
| original_num_parallel_workers = config_get_set_num_parallel_workers(1) | |||
| dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| # Ratio is set to 1 to apply rotation on all bounding boxes. | |||
| test_op = c_vision.BoundingBoxAugment(c_vision.RandomCrop(50), 0.5) | |||
| # map to apply ops | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=[test_op]) | |||
| filename = "bounding_box_augment_crop_c_result.npz" | |||
| save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) | |||
| unaugSamp, augSamp = [], [] | |||
| for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): | |||
| unaugSamp.append(unAug) | |||
| augSamp.append(Aug) | |||
| if plot_vis: | |||
| visualize_with_bounding_boxes(unaugSamp, augSamp) | |||
| # Restore config setting | |||
| ds.config.set_seed(original_seed) | |||
| ds.config.set_num_parallel_workers(original_num_parallel_workers) | |||
| def test_bounding_box_augment_valid_ratio_c(plot_vis=False): | |||
| """ | |||
| Test BoundingBoxAugment op (testing with valid ratio, less than 1. | |||
| Prints images side by side with and without Aug applied + bboxes to compare and test | |||
| """ | |||
| logger.info("test_bounding_box_augment_valid_ratio_c") | |||
| original_seed = config_get_set_seed(1) | |||
| original_num_parallel_workers = config_get_set_num_parallel_workers(1) | |||
| dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), 0.9) | |||
| # map to apply ops | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=[test_op]) # Add column for "annotation" | |||
| filename = "bounding_box_augment_valid_ratio_c_result.npz" | |||
| save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) | |||
| unaugSamp, augSamp = [], [] | |||
| for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): | |||
| unaugSamp.append(unAug) | |||
| augSamp.append(Aug) | |||
| if plot_vis: | |||
| visualize_with_bounding_boxes(unaugSamp, augSamp) | |||
| # Restore config setting | |||
| ds.config.set_seed(original_seed) | |||
| ds.config.set_num_parallel_workers(original_num_parallel_workers) | |||
| def test_bounding_box_augment_op_coco_c(plot_vis=False): | |||
| """ | |||
| Prints images and bboxes side by side with and without BoundingBoxAugment Op applied, | |||
| Testing with COCO dataset | |||
| """ | |||
| logger.info("test_bounding_box_augment_op_coco_c") | |||
| dataCoco1 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection", | |||
| decode=True, shuffle=False) | |||
| dataCoco2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection", | |||
| decode=True, shuffle=False) | |||
| test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), 1) | |||
| dataCoco2 = dataCoco2.map(input_columns=["image", "bbox"], | |||
| output_columns=["image", "bbox"], | |||
| columns_order=["image", "bbox"], | |||
| operations=[test_op]) | |||
| unaugSamp, augSamp = [], [] | |||
| for unAug, Aug in zip(dataCoco1.create_dict_iterator(), dataCoco2.create_dict_iterator()): | |||
| unaugSamp.append(unAug) | |||
| augSamp.append(Aug) | |||
| if plot_vis: | |||
| visualize_with_bounding_boxes(unaugSamp, augSamp, "bbox") | |||
| def test_bounding_box_augment_valid_edge_c(plot_vis=False): | |||
| """ | |||
| Test BoundingBoxAugment op (testing with valid edge case, box covering full image). | |||
| Prints images side by side with and without Aug applied + bboxes to compare and test | |||
| """ | |||
| logger.info("test_bounding_box_augment_valid_edge_c") | |||
| original_seed = config_get_set_seed(1) | |||
| original_num_parallel_workers = config_get_set_num_parallel_workers(1) | |||
| dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), 1) | |||
| # map to apply ops | |||
| # Add column for "annotation" | |||
| dataVoc1 = dataVoc1.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=lambda img, bbox: | |||
| (img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32))) | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=lambda img, bbox: | |||
| (img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32))) | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=[test_op]) | |||
| filename = "bounding_box_augment_valid_edge_c_result.npz" | |||
| save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) | |||
| unaugSamp, augSamp = [], [] | |||
| for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): | |||
| unaugSamp.append(unAug) | |||
| augSamp.append(Aug) | |||
| if plot_vis: | |||
| visualize_with_bounding_boxes(unaugSamp, augSamp) | |||
| # Restore config setting | |||
| ds.config.set_seed(original_seed) | |||
| ds.config.set_num_parallel_workers(original_num_parallel_workers) | |||
| def test_bounding_box_augment_invalid_ratio_c(): | |||
| """ | |||
| Test BoundingBoxAugment op with invalid input ratio | |||
| """ | |||
| logger.info("test_bounding_box_augment_invalid_ratio_c") | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| try: | |||
| # ratio range is from 0 - 1 | |||
| test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), 1.5) | |||
| # map to apply ops | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=[test_op]) # Add column for "annotation" | |||
| except ValueError as error: | |||
| logger.info("Got an exception in DE: {}".format(str(error))) | |||
| assert "Input is not" in str(error) | |||
| def test_bounding_box_augment_invalid_bounds_c(): | |||
| """ | |||
| Test BoundingBoxAugment op with invalid bboxes. | |||
| """ | |||
| logger.info("test_bounding_box_augment_invalid_bounds_c") | |||
| test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), | |||
| 1) | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.HeightOverflow, "bounding boxes is out of bounds of the image") | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.NegativeXY, "min_x") | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WrongShape, "4 features") | |||
| if __name__ == "__main__": | |||
| # set to false to not show plots | |||
| test_bounding_box_augment_with_rotation_op(plot_vis=False) | |||
| test_bounding_box_augment_with_crop_op(plot_vis=False) | |||
| test_bounding_box_augment_op_coco_c(plot_vis=False) | |||
| test_bounding_box_augment_valid_ratio_c(plot_vis=False) | |||
| test_bounding_box_augment_valid_edge_c(plot_vis=False) | |||
| test_bounding_box_augment_invalid_ratio_c() | |||
| test_bounding_box_augment_invalid_bounds_c() | |||
| @@ -0,0 +1,221 @@ | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================== | |||
| """ | |||
| Testing the random horizontal flip with bounding boxes op in DE | |||
| """ | |||
| import numpy as np | |||
| import mindspore.log as logger | |||
| import mindspore.dataset as ds | |||
| import mindspore.dataset.transforms.vision.c_transforms as c_vision | |||
| from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \ | |||
| config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5 | |||
| GENERATE_GOLDEN = False | |||
| # updated VOC dataset with correct annotations | |||
| DATA_DIR = "../data/dataset/testVOC2012_2" | |||
| DATA_DIR_2 = ["../data/dataset/testCOCO/train/", | |||
| "../data/dataset/testCOCO/annotations/train.json"] # DATA_DIR, ANNOTATION_DIR | |||
| def test_random_horizontal_flip_with_bbox_op_c(plot_vis=False): | |||
| """ | |||
| Prints images and bboxes side by side with and without RandomHorizontalFlipWithBBox Op applied | |||
| """ | |||
| logger.info("test_random_horizontal_flip_with_bbox_op_c") | |||
| # Load dataset | |||
| dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", | |||
| decode=True, shuffle=False) | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", | |||
| decode=True, shuffle=False) | |||
| test_op = c_vision.RandomHorizontalFlipWithBBox(1) | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=[test_op]) | |||
| unaugSamp, augSamp = [], [] | |||
| for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): | |||
| unaugSamp.append(unAug) | |||
| augSamp.append(Aug) | |||
| if plot_vis: | |||
| visualize_with_bounding_boxes(unaugSamp, augSamp) | |||
| def test_random_horizontal_flip_with_bbox_op_coco_c(plot_vis=False): | |||
| """ | |||
| Prints images and bboxes side by side with and without RandomHorizontalFlipWithBBox Op applied, | |||
| Testing with COCO dataset | |||
| """ | |||
| logger.info("test_random_horizontal_flip_with_bbox_op_coco_c") | |||
| dataCoco1 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection", | |||
| decode=True, shuffle=False) | |||
| dataCoco2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection", | |||
| decode=True, shuffle=False) | |||
| test_op = c_vision.RandomHorizontalFlipWithBBox(1) | |||
| dataCoco2 = dataCoco2.map(input_columns=["image", "bbox"], | |||
| output_columns=["image", "bbox"], | |||
| columns_order=["image", "bbox"], | |||
| operations=[test_op]) | |||
| unaugSamp, augSamp = [], [] | |||
| for unAug, Aug in zip(dataCoco1.create_dict_iterator(), dataCoco2.create_dict_iterator()): | |||
| unaugSamp.append(unAug) | |||
| augSamp.append(Aug) | |||
| if plot_vis: | |||
| visualize_with_bounding_boxes(unaugSamp, augSamp, "bbox") | |||
| def test_random_horizontal_flip_with_bbox_valid_rand_c(plot_vis=False): | |||
| """ | |||
| Uses a valid non-default input, expect to pass | |||
| Prints images side by side with and without Aug applied + bboxes to | |||
| compare and test | |||
| """ | |||
| logger.info("test_random_horizontal_bbox_valid_rand_c") | |||
| original_seed = config_get_set_seed(1) | |||
| original_num_parallel_workers = config_get_set_num_parallel_workers(1) | |||
| # Load dataset | |||
| dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", | |||
| decode=True, shuffle=False) | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", | |||
| decode=True, shuffle=False) | |||
| test_op = c_vision.RandomHorizontalFlipWithBBox(0.6) | |||
| # map to apply ops | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=[test_op]) | |||
| filename = "random_horizontal_flip_with_bbox_01_c_result.npz" | |||
| save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) | |||
| unaugSamp, augSamp = [], [] | |||
| for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): | |||
| unaugSamp.append(unAug) | |||
| augSamp.append(Aug) | |||
| if plot_vis: | |||
| visualize_with_bounding_boxes(unaugSamp, augSamp) | |||
| # Restore config setting | |||
| ds.config.set_seed(original_seed) | |||
| ds.config.set_num_parallel_workers(original_num_parallel_workers) | |||
| def test_random_horizontal_flip_with_bbox_valid_edge_c(plot_vis=False): | |||
| """ | |||
| Test RandomHorizontalFlipWithBBox op (testing with valid edge case, box covering full image). | |||
| Prints images side by side with and without Aug applied + bboxes to compare and test | |||
| """ | |||
| logger.info("test_horizontal_flip_with_bbox_valid_edge_c") | |||
| dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| test_op = c_vision.RandomHorizontalFlipWithBBox(1) | |||
| # map to apply ops | |||
| # Add column for "annotation" | |||
| dataVoc1 = dataVoc1.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=lambda img, bbox: | |||
| (img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32))) | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=lambda img, bbox: | |||
| (img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32))) | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=[test_op]) | |||
| unaugSamp, augSamp = [], [] | |||
| for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): | |||
| unaugSamp.append(unAug) | |||
| augSamp.append(Aug) | |||
| if plot_vis: | |||
| visualize_with_bounding_boxes(unaugSamp, augSamp) | |||
| def test_random_horizontal_flip_with_bbox_invalid_prob_c(): | |||
| """ | |||
| Test RandomHorizontalFlipWithBBox op with invalid input probability | |||
| """ | |||
| logger.info("test_random_horizontal_bbox_invalid_prob_c") | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| try: | |||
| # Note: Valid range of prob should be [0.0, 1.0] | |||
| test_op = c_vision.RandomHorizontalFlipWithBBox(1.5) | |||
| # map to apply ops | |||
| dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], | |||
| output_columns=["image", "annotation"], | |||
| columns_order=["image", "annotation"], | |||
| operations=[test_op]) # Add column for "annotation" | |||
| except ValueError as error: | |||
| logger.info("Got an exception in DE: {}".format(str(error))) | |||
| assert "Input is not" in str(error) | |||
| def test_random_horizontal_flip_with_bbox_invalid_bounds_c(): | |||
| """ | |||
| Test RandomHorizontalFlipWithBBox op with invalid bounding boxes | |||
| """ | |||
| logger.info("test_random_horizontal_bbox_invalid_bounds_c") | |||
| test_op = c_vision.RandomHorizontalFlipWithBBox(1) | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.HeightOverflow, "bounding boxes is out of bounds of the image") | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.NegativeXY, "min_x") | |||
| dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) | |||
| check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WrongShape, "4 features") | |||
| if __name__ == "__main__": | |||
| # set to false to not show plots | |||
| test_random_horizontal_flip_with_bbox_op_c(plot_vis=False) | |||
| test_random_horizontal_flip_with_bbox_op_coco_c(plot_vis=False) | |||
| test_random_horizontal_flip_with_bbox_valid_rand_c(plot_vis=False) | |||
| test_random_horizontal_flip_with_bbox_valid_edge_c(plot_vis=False) | |||
| test_random_horizontal_flip_with_bbox_invalid_prob_c() | |||
| test_random_horizontal_flip_with_bbox_invalid_bounds_c() | |||