|
|
@@ -15,92 +15,18 @@ |
|
|
""" |
|
|
""" |
|
|
Testing the bounding box augment op in DE |
|
|
Testing the bounding box augment op in DE |
|
|
""" |
|
|
""" |
|
|
from enum import Enum |
|
|
|
|
|
|
|
|
from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \ |
|
|
|
|
|
config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5 |
|
|
|
|
|
import numpy as np |
|
|
import mindspore.log as logger |
|
|
import mindspore.log as logger |
|
|
import mindspore.dataset as ds |
|
|
import mindspore.dataset as ds |
|
|
import mindspore.dataset.transforms.vision.c_transforms as c_vision |
|
|
import mindspore.dataset.transforms.vision.c_transforms as c_vision |
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
import matplotlib.patches as patches |
|
|
|
|
|
import numpy as np |
|
|
|
|
|
|
|
|
|
|
|
GENERATE_GOLDEN = False |
|
|
GENERATE_GOLDEN = False |
|
|
|
|
|
|
|
|
DATA_DIR = "../data/dataset/testVOC2012_2" |
|
|
DATA_DIR = "../data/dataset/testVOC2012_2" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class BoxType(Enum): |
|
|
|
|
|
""" |
|
|
|
|
|
Defines box types for test cases |
|
|
|
|
|
""" |
|
|
|
|
|
WidthOverflow = 1 |
|
|
|
|
|
HeightOverflow = 2 |
|
|
|
|
|
NegativeXY = 3 |
|
|
|
|
|
OnEdge = 4 |
|
|
|
|
|
WrongShape = 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def add_bad_annotation(img, bboxes, box_type): |
|
|
|
|
|
""" |
|
|
|
|
|
Used to generate erroneous bounding box examples on given img. |
|
|
|
|
|
:param img: image where the bounding boxes are. |
|
|
|
|
|
:param bboxes: in [x_min, y_min, w, h, label, truncate, difficult] format |
|
|
|
|
|
:param box_type: type of bad box |
|
|
|
|
|
:return: bboxes with bad examples added |
|
|
|
|
|
""" |
|
|
|
|
|
height = img.shape[0] |
|
|
|
|
|
width = img.shape[1] |
|
|
|
|
|
if box_type == BoxType.WidthOverflow: |
|
|
|
|
|
# use box that overflows on width |
|
|
|
|
|
return img, np.array([[0, 0, width + 1, height, 0, 0, 0]]).astype(np.uint32) |
|
|
|
|
|
|
|
|
|
|
|
if box_type == BoxType.HeightOverflow: |
|
|
|
|
|
# use box that overflows on height |
|
|
|
|
|
return img, np.array([[0, 0, width, height + 1, 0, 0, 0]]).astype(np.uint32) |
|
|
|
|
|
|
|
|
|
|
|
if box_type == BoxType.NegativeXY: |
|
|
|
|
|
# use box with negative xy |
|
|
|
|
|
return img, np.array([[-10, -10, width, height, 0, 0, 0]]).astype(np.uint32) |
|
|
|
|
|
|
|
|
|
|
|
if box_type == BoxType.OnEdge: |
|
|
|
|
|
# use box that covers the whole image |
|
|
|
|
|
return img, np.array([[0, 0, width, height, 0, 0, 0]]).astype(np.uint32) |
|
|
|
|
|
|
|
|
|
|
|
if box_type == BoxType.WrongShape: |
|
|
|
|
|
# use box that covers the whole image |
|
|
|
|
|
return img, np.array([[0, 0, width - 1]]).astype(np.uint32) |
|
|
|
|
|
return img, bboxes |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def check_bad_box(data, box_type, expected_error): |
|
|
|
|
|
""" |
|
|
|
|
|
:param data: de object detection pipeline |
|
|
|
|
|
:param box_type: type of bad box |
|
|
|
|
|
:param expected_error: error expected to get due to bad box |
|
|
|
|
|
:return: None |
|
|
|
|
|
""" |
|
|
|
|
|
try: |
|
|
|
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), |
|
|
|
|
|
1) # DEFINE TEST OP HERE -- (PROB 1 IN CASE OF RANDOM) |
|
|
|
|
|
data = data.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
# map to use width overflow |
|
|
|
|
|
data = data.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=lambda img, bboxes: add_bad_annotation(img, bboxes, box_type)) |
|
|
|
|
|
# map to apply ops |
|
|
|
|
|
data = data.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) # Add column for "annotation" |
|
|
|
|
|
for _, _ in enumerate(data.create_dict_iterator()): |
|
|
|
|
|
break |
|
|
|
|
|
except RuntimeError as error: |
|
|
|
|
|
logger.info("Got an exception in DE: {}".format(str(error))) |
|
|
|
|
|
assert expected_error in str(error) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def fix_annotate(bboxes): |
|
|
def fix_annotate(bboxes): |
|
|
""" |
|
|
""" |
|
|
Fix annotations to format followed by mindspore. |
|
|
Fix annotations to format followed by mindspore. |
|
|
@@ -117,153 +43,217 @@ def fix_annotate(bboxes): |
|
|
return bboxes |
|
|
return bboxes |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def add_bounding_boxes(axis, bboxes): |
|
|
|
|
|
""" |
|
|
|
|
|
:param axis: axis to modify |
|
|
|
|
|
:param bboxes: bounding boxes to draw on the axis |
|
|
|
|
|
:return: None |
|
|
|
|
|
""" |
|
|
|
|
|
for bbox in bboxes: |
|
|
|
|
|
rect = patches.Rectangle((bbox[0], bbox[1]), |
|
|
|
|
|
bbox[2], bbox[3], |
|
|
|
|
|
linewidth=1, edgecolor='r', facecolor='none') |
|
|
|
|
|
# Add the patch to the Axes |
|
|
|
|
|
axis.add_patch(rect) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def visualize(unaugmented_data, augment_data): |
|
|
|
|
|
|
|
|
def test_bounding_box_augment_with_rotation_op(plot_vis=False): |
|
|
""" |
|
|
""" |
|
|
:param unaugmented_data: original data |
|
|
|
|
|
:param augment_data: data after augmentations |
|
|
|
|
|
:return: None |
|
|
|
|
|
""" |
|
|
|
|
|
for idx, (un_aug_item, aug_item) in \ |
|
|
|
|
|
enumerate(zip(unaugmented_data.create_dict_iterator(), |
|
|
|
|
|
augment_data.create_dict_iterator())): |
|
|
|
|
|
axis = plt.subplot(141) |
|
|
|
|
|
plt.imshow(un_aug_item["image"]) |
|
|
|
|
|
add_bounding_boxes(axis, un_aug_item["annotation"]) # add Orig BBoxes |
|
|
|
|
|
plt.title("Original" + str(idx + 1)) |
|
|
|
|
|
logger.info("Original ", str(idx + 1), " :", un_aug_item["annotation"]) |
|
|
|
|
|
|
|
|
|
|
|
axis = plt.subplot(142) |
|
|
|
|
|
plt.imshow(aug_item["image"]) |
|
|
|
|
|
add_bounding_boxes(axis, aug_item["annotation"]) # add AugBBoxes |
|
|
|
|
|
plt.title("Augmented" + str(idx + 1)) |
|
|
|
|
|
logger.info("Augmented ", str(idx + 1), " ", aug_item["annotation"], "\n") |
|
|
|
|
|
plt.show() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_bounding_box_augment_with_rotation_op(plot=False): |
|
|
|
|
|
""" |
|
|
|
|
|
Test BoundingBoxAugment op |
|
|
|
|
|
|
|
|
Test BoundingBoxAugment op (passing rotation op as transform) |
|
|
Prints images side by side with and without Aug applied + bboxes to compare and test |
|
|
Prints images side by side with and without Aug applied + bboxes to compare and test |
|
|
""" |
|
|
""" |
|
|
logger.info("test_bounding_box_augment_with_rotation_op") |
|
|
logger.info("test_bounding_box_augment_with_rotation_op") |
|
|
|
|
|
|
|
|
data_voc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
|
|
|
original_seed = config_get_set_seed(0) |
|
|
|
|
|
original_num_parallel_workers = config_get_set_num_parallel_workers(1) |
|
|
|
|
|
|
|
|
|
|
|
dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
|
|
|
|
|
|
# Ratio is set to 1 to apply rotation on all bounding boxes. |
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomRotation(90), 1) |
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomRotation(90), 1) |
|
|
# DEFINE TEST OP HERE -- (PROB 1 IN CASE OF RANDOM) |
|
|
|
|
|
|
|
|
|
|
|
# maps to fix annotations to minddata standard |
|
|
# maps to fix annotations to minddata standard |
|
|
data_voc1 = data_voc1.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
data_voc2 = data_voc2.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
|
|
|
dataVoc1 = dataVoc1.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
# map to apply ops |
|
|
# map to apply ops |
|
|
data_voc2 = data_voc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) # Add column for "annotation" |
|
|
|
|
|
if plot: |
|
|
|
|
|
visualize(data_voc1, data_voc2) |
|
|
|
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) |
|
|
|
|
|
|
|
|
|
|
|
filename = "bounding_box_augment_rotation_c_result.npz" |
|
|
|
|
|
save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) |
|
|
|
|
|
|
|
|
def test_bounding_box_augment_with_crop_op(plot=False): |
|
|
|
|
|
|
|
|
unaugSamp, augSamp = [], [] |
|
|
|
|
|
|
|
|
|
|
|
for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): |
|
|
|
|
|
unaugSamp.append(unAug) |
|
|
|
|
|
augSamp.append(Aug) |
|
|
|
|
|
|
|
|
|
|
|
if plot_vis: |
|
|
|
|
|
visualize_with_bounding_boxes(unaugSamp, augSamp) |
|
|
|
|
|
|
|
|
|
|
|
# Restore config setting |
|
|
|
|
|
ds.config.set_seed(original_seed) |
|
|
|
|
|
ds.config.set_num_parallel_workers(original_num_parallel_workers) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_bounding_box_augment_with_crop_op(plot_vis=False): |
|
|
""" |
|
|
""" |
|
|
Test BoundingBoxAugment op |
|
|
|
|
|
|
|
|
Test BoundingBoxAugment op (passing crop op as transform) |
|
|
Prints images side by side with and without Aug applied + bboxes to compare and test |
|
|
Prints images side by side with and without Aug applied + bboxes to compare and test |
|
|
""" |
|
|
""" |
|
|
logger.info("test_bounding_box_augment_with_crop_op") |
|
|
logger.info("test_bounding_box_augment_with_crop_op") |
|
|
|
|
|
|
|
|
data_voc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
|
|
|
original_seed = config_get_set_seed(1) |
|
|
|
|
|
original_num_parallel_workers = config_get_set_num_parallel_workers(1) |
|
|
|
|
|
|
|
|
|
|
|
dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
|
|
|
|
|
|
# Ratio is set to 1 to apply rotation on all bounding boxes. |
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomCrop(90), 1) |
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomCrop(90), 1) |
|
|
|
|
|
|
|
|
# maps to fix annotations to minddata standard |
|
|
# maps to fix annotations to minddata standard |
|
|
data_voc1 = data_voc1.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
data_voc2 = data_voc2.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
|
|
|
dataVoc1 = dataVoc1.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
# map to apply ops |
|
|
# map to apply ops |
|
|
data_voc2 = data_voc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) # Add column for "annotation" |
|
|
|
|
|
if plot: |
|
|
|
|
|
visualize(data_voc1, data_voc2) |
|
|
|
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) |
|
|
|
|
|
|
|
|
|
|
|
filename = "bounding_box_augment_crop_c_result.npz" |
|
|
|
|
|
save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) |
|
|
|
|
|
|
|
|
|
|
|
unaugSamp, augSamp = [], [] |
|
|
|
|
|
|
|
|
def test_bounding_box_augment_valid_ratio_c(plot=False): |
|
|
|
|
|
|
|
|
for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): |
|
|
|
|
|
unaugSamp.append(unAug) |
|
|
|
|
|
augSamp.append(Aug) |
|
|
|
|
|
|
|
|
|
|
|
if plot_vis: |
|
|
|
|
|
visualize_with_bounding_boxes(unaugSamp, augSamp) |
|
|
|
|
|
|
|
|
|
|
|
# Restore config setting |
|
|
|
|
|
ds.config.set_seed(original_seed) |
|
|
|
|
|
ds.config.set_num_parallel_workers(original_num_parallel_workers) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_bounding_box_augment_valid_ratio_c(plot_vis=False): |
|
|
""" |
|
|
""" |
|
|
Test RandomHorizontalFlipWithBBox op |
|
|
|
|
|
|
|
|
Test BoundingBoxAugment op (testing with valid ratio, less than 1. |
|
|
Prints images side by side with and without Aug applied + bboxes to compare and test |
|
|
Prints images side by side with and without Aug applied + bboxes to compare and test |
|
|
""" |
|
|
""" |
|
|
logger.info("test_bounding_box_augment_valid_ratio_c") |
|
|
logger.info("test_bounding_box_augment_valid_ratio_c") |
|
|
|
|
|
|
|
|
data_voc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
|
|
|
original_seed = config_get_set_seed(1) |
|
|
|
|
|
original_num_parallel_workers = config_get_set_num_parallel_workers(1) |
|
|
|
|
|
|
|
|
|
|
|
dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
|
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), 0.9) |
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), 0.9) |
|
|
# DEFINE TEST OP HERE -- (PROB 1 IN CASE OF RANDOM) |
|
|
|
|
|
|
|
|
|
|
|
# maps to fix annotations to minddata standard |
|
|
# maps to fix annotations to minddata standard |
|
|
data_voc1 = data_voc1.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
data_voc2 = data_voc2.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
|
|
|
dataVoc1 = dataVoc1.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
# map to apply ops |
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) # Add column for "annotation" |
|
|
|
|
|
filename = "bounding_box_augment_valid_ratio_c_result.npz" |
|
|
|
|
|
save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) |
|
|
|
|
|
|
|
|
|
|
|
unaugSamp, augSamp = [], [] |
|
|
|
|
|
|
|
|
|
|
|
for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): |
|
|
|
|
|
unaugSamp.append(unAug) |
|
|
|
|
|
augSamp.append(Aug) |
|
|
|
|
|
|
|
|
|
|
|
if plot_vis: |
|
|
|
|
|
visualize_with_bounding_boxes(unaugSamp, augSamp) |
|
|
|
|
|
|
|
|
|
|
|
# Restore config setting |
|
|
|
|
|
ds.config.set_seed(original_seed) |
|
|
|
|
|
ds.config.set_num_parallel_workers(original_num_parallel_workers) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_bounding_box_augment_valid_edge_c(plot_vis=False): |
|
|
|
|
|
""" |
|
|
|
|
|
Test BoundingBoxAugment op (testing with valid edge case, box covering full image). |
|
|
|
|
|
Prints images side by side with and without Aug applied + bboxes to compare and test |
|
|
|
|
|
""" |
|
|
|
|
|
logger.info("test_bounding_box_augment_valid_edge_c") |
|
|
|
|
|
|
|
|
|
|
|
original_seed = config_get_set_seed(1) |
|
|
|
|
|
original_num_parallel_workers = config_get_set_num_parallel_workers(1) |
|
|
|
|
|
|
|
|
|
|
|
dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
|
|
|
|
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), 1) |
|
|
|
|
|
|
|
|
|
|
|
# maps to fix annotations to minddata standard |
|
|
|
|
|
dataVoc1 = dataVoc1.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
# map to apply ops |
|
|
# map to apply ops |
|
|
data_voc2 = data_voc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) # Add column for "annotation" |
|
|
|
|
|
if plot: |
|
|
|
|
|
visualize(data_voc1, data_voc2) |
|
|
|
|
|
|
|
|
# Add column for "annotation" |
|
|
|
|
|
dataVoc1 = dataVoc1.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=lambda img, bbox: |
|
|
|
|
|
(img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.uint32))) |
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=lambda img, bbox: |
|
|
|
|
|
(img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.uint32))) |
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) |
|
|
|
|
|
filename = "bounding_box_augment_valid_edge_c_result.npz" |
|
|
|
|
|
save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) |
|
|
|
|
|
|
|
|
|
|
|
unaugSamp, augSamp = [], [] |
|
|
|
|
|
|
|
|
|
|
|
for unAug, Aug in zip(dataVoc1.create_dict_iterator(), dataVoc2.create_dict_iterator()): |
|
|
|
|
|
unaugSamp.append(unAug) |
|
|
|
|
|
augSamp.append(Aug) |
|
|
|
|
|
|
|
|
|
|
|
if plot_vis: |
|
|
|
|
|
visualize_with_bounding_boxes(unaugSamp, augSamp) |
|
|
|
|
|
|
|
|
|
|
|
# Restore config setting |
|
|
|
|
|
ds.config.set_seed(original_seed) |
|
|
|
|
|
ds.config.set_num_parallel_workers(original_num_parallel_workers) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_bounding_box_augment_invalid_ratio_c(): |
|
|
def test_bounding_box_augment_invalid_ratio_c(): |
|
|
""" |
|
|
""" |
|
|
Test RandomHorizontalFlipWithBBox op with invalid input probability |
|
|
|
|
|
|
|
|
Test BoundingBoxAugment op with invalid input ratio |
|
|
""" |
|
|
""" |
|
|
logger.info("test_bounding_box_augment_invalid_ratio_c") |
|
|
logger.info("test_bounding_box_augment_invalid_ratio_c") |
|
|
|
|
|
|
|
|
data_voc1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
|
|
|
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
|
|
|
try: |
|
|
try: |
|
|
# ratio range is from 0 - 1 |
|
|
# ratio range is from 0 - 1 |
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), 1.5) |
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), 1.5) |
|
|
# maps to fix annotations to minddata standard |
|
|
# maps to fix annotations to minddata standard |
|
|
data_voc1 = data_voc1.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
data_voc2 = data_voc2.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
|
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["annotation"], |
|
|
|
|
|
output_columns=["annotation"], |
|
|
|
|
|
operations=fix_annotate) |
|
|
# map to apply ops |
|
|
# map to apply ops |
|
|
data_voc2 = data_voc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) # Add column for "annotation" |
|
|
|
|
|
|
|
|
dataVoc2 = dataVoc2.map(input_columns=["image", "annotation"], |
|
|
|
|
|
output_columns=["image", "annotation"], |
|
|
|
|
|
columns_order=["image", "annotation"], |
|
|
|
|
|
operations=[test_op]) # Add column for "annotation" |
|
|
except ValueError as error: |
|
|
except ValueError as error: |
|
|
logger.info("Got an exception in DE: {}".format(str(error))) |
|
|
logger.info("Got an exception in DE: {}".format(str(error))) |
|
|
assert "Input is not" in str(error) |
|
|
assert "Input is not" in str(error) |
|
|
@@ -275,20 +265,24 @@ def test_bounding_box_augment_invalid_bounds_c(): |
|
|
""" |
|
|
""" |
|
|
logger.info("test_bounding_box_augment_invalid_bounds_c") |
|
|
logger.info("test_bounding_box_augment_invalid_bounds_c") |
|
|
|
|
|
|
|
|
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
check_bad_box(data_voc2, BoxType.WidthOverflow, "bounding boxes is out of bounds of the image") |
|
|
|
|
|
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
check_bad_box(data_voc2, BoxType.HeightOverflow, "bounding boxes is out of bounds of the image") |
|
|
|
|
|
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
check_bad_box(data_voc2, BoxType.NegativeXY, "min_x") |
|
|
|
|
|
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
check_bad_box(data_voc2, BoxType.WrongShape, "4 features") |
|
|
|
|
|
|
|
|
test_op = c_vision.BoundingBoxAugment(c_vision.RandomHorizontalFlip(1), |
|
|
|
|
|
1) |
|
|
|
|
|
|
|
|
|
|
|
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") |
|
|
|
|
|
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.HeightOverflow, "bounding boxes is out of bounds of the image") |
|
|
|
|
|
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.NegativeXY, "min_x") |
|
|
|
|
|
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) |
|
|
|
|
|
check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WrongShape, "4 features") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
if __name__ == "__main__": |
|
|
# set to false to not show plots |
|
|
# set to false to not show plots |
|
|
test_bounding_box_augment_with_rotation_op(False) |
|
|
|
|
|
test_bounding_box_augment_with_crop_op(False) |
|
|
|
|
|
test_bounding_box_augment_valid_ratio_c(False) |
|
|
|
|
|
|
|
|
test_bounding_box_augment_with_rotation_op(plot_vis=False) |
|
|
|
|
|
test_bounding_box_augment_with_crop_op(plot_vis=False) |
|
|
|
|
|
test_bounding_box_augment_valid_ratio_c(plot_vis=False) |
|
|
|
|
|
test_bounding_box_augment_valid_edge_c(plot_vis=False) |
|
|
test_bounding_box_augment_invalid_ratio_c() |
|
|
test_bounding_box_augment_invalid_ratio_c() |
|
|
test_bounding_box_augment_invalid_bounds_c() |
|
|
test_bounding_box_augment_invalid_bounds_c() |