Browse Source

Simplify example node

tags/v0.3.6-rc0
haixuanTao 1 year ago
parent
commit
8e30506bd3
7 changed files with 114 additions and 103 deletions
  1. +16
    -6
      examples/python-dataflow/dataflow.yml
  2. +0
    -36
      examples/python-dataflow/dataflow_yolo.yml
  3. +0
    -3
      node-hub/opencv-plot/README.md
  4. +32
    -32
      node-hub/opencv-plot/main.py
  5. +0
    -1
      node-hub/opencv-video-capture/README.md
  6. +44
    -17
      node-hub/opencv-video-capture/main.py
  7. +22
    -8
      node-hub/ultralytics-yolo/main.py

+ 16
- 6
examples/python-dataflow/dataflow.yml View File

@@ -4,20 +4,30 @@ nodes:
path: opencv-video-capture
inputs:
tick: dora/timer/millis/16
stop: plot/end
outputs:
- image
- text
env:
CAPTURE_PATH: 0
IMAGE_WIDTH: 640
IMAGE_HEIGHT: 480

- id: object-detection
build: pip install ../../node-hub/ultralytics-yolo
path: ultralytics-yolo
inputs:
image:
source: camera/image
queue_size: 1
outputs:
- bbox
env:
MODEL: yolov8n.pt

- id: plot
build: pip install ../../node-hub/opencv-plot
path: opencv-plot
inputs:
image: camera/image
text: camera/text
outputs:
- end
image:
source: camera/image
queue_size: 1
bbox: object-detection/bbox

+ 0
- 36
examples/python-dataflow/dataflow_yolo.yml View File

@@ -1,36 +0,0 @@
nodes:
- id: camera
build: pip install ../../node-hub/opencv-video-capture
path: opencv-video-capture
inputs:
tick: dora/timer/millis/16
stop: plot/end
outputs:
- image
env:
CAPTURE_PATH: 0
IMAGE_WIDTH: 640
IMAGE_HEIGHT: 480

- id: object-detection
build: pip install ../../node-hub/ultralytics-yolo
path: ultralytics-yolo
inputs:
image:
source: camera/image
queue_size: 1
outputs:
- bbox
env:
MODEL: yolov8n.pt

- id: plot
build: pip install ../../node-hub/opencv-plot
path: opencv-plot
inputs:
image:
source: camera/image
queue_size: 1
bbox: object-detection/bbox
outputs:
- end

+ 0
- 3
node-hub/opencv-plot/README.md View File

@@ -13,9 +13,6 @@ This node is used to plot a text and a list of bbox on a base image (ideal for o
# bbox: Arrow array of bbox
# text: Arrow array of size 1 containing the text to be plotted

outputs:
- end

env:
PLOT_WIDTH: 640 # optional, default is image input width
PLOT_HEIGHT: 480 # optional, default is image input height


+ 32
- 32
node-hub/opencv-plot/main.py View File

@@ -7,7 +7,7 @@ import pyarrow as pa

from dora import Node

RUNNER_CI = True if os.getenv("CI", False) == "true" else False
RUNNER_CI = True if os.getenv("CI") == "true" else False


class Plot:
@@ -71,14 +71,33 @@ def plot_frame(plot):


def main():

# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
parser = argparse.ArgumentParser(
description="OpenCV Plotter: This node is used to plot text and bounding boxes on an image.")
description="OpenCV Plotter: This node is used to plot text and bounding boxes on an image."
)

parser.add_argument("--name", type=str, required=False, help="The name of the node in the dataflow.",
default="opencv-plot")
parser.add_argument("--plot-width", type=int, required=False, help="The width of the plot.", default=None)
parser.add_argument("--plot-height", type=int, required=False, help="The height of the plot.", default=None)
parser.add_argument(
"--name",
type=str,
required=False,
help="The name of the node in the dataflow.",
default="opencv-plot",
)
parser.add_argument(
"--plot-width",
type=int,
required=False,
help="The width of the plot.",
default=None,
)
parser.add_argument(
"--plot-height",
type=int,
required=False,
help="The height of the plot.",
default=None,
)

args = parser.parse_args()

@@ -93,7 +112,9 @@ def main():
if isinstance(plot_height, str) and plot_height.isnumeric():
plot_height = int(plot_height)

node = Node(args.name) # provide the name to connect to the dataflow if dynamic node
node = Node(
args.name
) # provide the name to connect to the dataflow if dynamic node
plot = Plot()

plot.width = plot_width
@@ -113,18 +134,17 @@ def main():
"width": np.uint32(arrow_image["width"].as_py()),
"height": np.uint32(arrow_image["height"].as_py()),
"channels": np.uint8(arrow_image["channels"].as_py()),
"data": arrow_image["data"].values.to_numpy().astype(np.uint8)
"data": arrow_image["data"].values.to_numpy().astype(np.uint8),
}

plot.frame = np.reshape(image["data"], (image["height"], image["width"], image["channels"]))
plot.frame = np.reshape(
image["data"], (image["height"], image["width"], image["channels"])
)

plot_frame(plot)
if not RUNNER_CI:
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
break # break the loop for CI

elif event_id == "bbox":
arrow_bbox = event["value"][0]
plot.bboxes = {
@@ -132,31 +152,11 @@ def main():
"conf": arrow_bbox["conf"].values.to_numpy(),
"names": arrow_bbox["names"].values.to_numpy(zero_copy_only=False),
}

plot_frame(plot)
if not RUNNER_CI:
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
break # break the loop for CI
elif event_id == "text":
plot.text = event["value"][0].as_py()

plot_frame(plot)
if not RUNNER_CI:
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
break # break the loop for CI

elif event_type == "ERROR":
raise Exception(event["error"])

node.send_output(
"end",
pa.array([0], type=pa.uint8())
)


if __name__ == "__main__":
main()

+ 0
- 1
node-hub/opencv-video-capture/README.md View File

@@ -10,7 +10,6 @@ This node is used to capture video from a camera using OpenCV.
path: opencv-video-capture
inputs:
tick: dora/timer/millis/16 # try to capture at 60fps
# stop: some stop signal from another node
outputs:
- image: # the captured image



+ 44
- 17
node-hub/opencv-video-capture/main.py View File

@@ -7,20 +7,45 @@ import pyarrow as pa

from dora import Node

import time

RUNNER_CI = True if os.getenv("CI") == "true" else False


def main():
# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
parser = argparse.ArgumentParser(
description="OpenCV Video Capture: This node is used to capture video from a camera.")

parser.add_argument("--name", type=str, required=False, help="The name of the node in the dataflow.",
default="opencv-video-capture")
parser.add_argument("--path", type=int, required=False,
help="The path of the device to capture (e.g. /dev/video1, or an index like 0, 1...", default=0)
parser.add_argument("--image-width", type=int, required=False,
help="The width of the image output. Default is the camera width.", default=None)
parser.add_argument("--image-height", type=int, required=False,
help="The height of the camera. Default is the camera height.", default=None)
description="OpenCV Video Capture: This node is used to capture video from a camera."
)

parser.add_argument(
"--name",
type=str,
required=False,
help="The name of the node in the dataflow.",
default="opencv-video-capture",
)
parser.add_argument(
"--path",
type=int,
required=False,
help="The path of the device to capture (e.g. /dev/video1, or an index like 0, 1...",
default=0,
)
parser.add_argument(
"--image-width",
type=int,
required=False,
help="The width of the image output. Default is the camera width.",
default=None,
)
parser.add_argument(
"--image-height",
type=int,
required=False,
help="The height of the camera. Default is the camera height.",
default=None,
)

args = parser.parse_args()

@@ -42,10 +67,16 @@ def main():

video_capture = cv2.VideoCapture(video_capture_path)
node = Node(args.name)
start_time = time.time()

pa.array([]) # initialize pyarrow array

for event in node:

# Run this eample in the CI for 20 seconds only.
if RUNNER_CI and time.time() - start_time > 20:
break

event_type = event["type"]

if event_type == "INPUT":
@@ -58,7 +89,7 @@ def main():
frame = np.zeros((480, 640, 3), dtype=np.uint8)
cv2.putText(
frame,
f'Error: no frame for camera at path {video_capture_path}.',
f"Error: no frame for camera at path {video_capture_path}.",
(int(30), int(30)),
cv2.FONT_HERSHEY_SIMPLEX,
0.50,
@@ -75,14 +106,10 @@ def main():
"width": np.uint32(frame.shape[1]),
"height": np.uint32(frame.shape[0]),
"channels": np.uint8(frame.shape[2]),
"data": frame.ravel()
"data": frame.ravel(),
}

node.send_output(
"image",
pa.array([image]),
event["metadata"]
)
node.send_output("image", pa.array([image]), event["metadata"])

if event_id == "stop":
video_capture.release()


+ 22
- 8
node-hub/ultralytics-yolo/main.py View File

@@ -7,15 +7,27 @@ import pyarrow as pa
from dora import Node
from ultralytics import YOLO


def main():
# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
parser = argparse.ArgumentParser(
description="UltraLytics YOLO: This node is used to perform object detection using the UltraLytics YOLO model.")

parser.add_argument("--name", type=str, required=False, help="The name of the node in the dataflow.",
default="ultralytics-yolo")
parser.add_argument("--model", type=str, required=False,
help="The name of the model file (e.g. yolov8n.pt).", default="yolov8n.pt")
description="UltraLytics YOLO: This node is used to perform object detection using the UltraLytics YOLO model."
)

parser.add_argument(
"--name",
type=str,
required=False,
help="The name of the node in the dataflow.",
default="ultralytics-yolo",
)
parser.add_argument(
"--model",
type=str,
required=False,
help="The name of the model file (e.g. yolov8n.pt).",
default="yolov8n.pt",
)

args = parser.parse_args()

@@ -38,10 +50,12 @@ def main():
"width": np.uint32(arrow_image["width"].as_py()),
"height": np.uint32(arrow_image["height"].as_py()),
"channels": np.uint8(arrow_image["channels"].as_py()),
"data": arrow_image["data"].values.to_numpy().astype(np.uint8)
"data": arrow_image["data"].values.to_numpy().astype(np.uint8),
}

frame = image["data"].reshape((image["height"], image["width"], image["channels"]))
frame = image["data"].reshape(
(image["height"], image["width"], image["channels"])
)

frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB)
results = model(frame, verbose=False) # includes NMS


Loading…
Cancel
Save