From b0df85c6ec09d513793c1ce8e4c423b618aaab48 Mon Sep 17 00:00:00 2001 From: haixuanTao Date: Thu, 27 Jun 2024 19:32:50 +0200 Subject: [PATCH] simplifying examples to use node hub --- Cargo.toml | 4 +- examples/python-dataflow/dataflow.yml | 38 ++--- examples/python-dataflow/dataflow_dynamic.yml | 23 ++- examples/python-dataflow/example.py | 5 - examples/python-dataflow/object_detection.py | 40 ----- examples/python-dataflow/plot.py | 96 ------------ examples/python-dataflow/plot_dynamic.py | 148 ++++++++---------- examples/python-dataflow/requirements.txt | 47 ------ examples/python-dataflow/run.rs | 15 -- examples/python-dataflow/utils.py | 82 ---------- examples/python-dataflow/webcam.py | 52 ------ 11 files changed, 93 insertions(+), 457 deletions(-) delete mode 100644 examples/python-dataflow/example.py delete mode 100755 examples/python-dataflow/object_detection.py delete mode 100755 examples/python-dataflow/plot.py mode change 100755 => 100644 examples/python-dataflow/plot_dynamic.py delete mode 100644 examples/python-dataflow/requirements.txt delete mode 100644 examples/python-dataflow/utils.py delete mode 100755 examples/python-dataflow/webcam.py diff --git a/Cargo.toml b/Cargo.toml index 798b7a2a..fa52a3b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,8 +30,8 @@ members = [ "libraries/shared-memory-server", "libraries/extensions/download", "libraries/extensions/telemetry/*", - "tool_nodes/dora-record", - "tool_nodes/dora-rerun", + "nodes_hub/dora-record", + "nodes_hub/dora-rerun", "libraries/extensions/ros2-bridge", "libraries/extensions/ros2-bridge/msg-gen", "libraries/extensions/ros2-bridge/python", diff --git a/examples/python-dataflow/dataflow.yml b/examples/python-dataflow/dataflow.yml index 612ce410..2b608ce7 100644 --- a/examples/python-dataflow/dataflow.yml +++ b/examples/python-dataflow/dataflow.yml @@ -1,25 +1,25 @@ nodes: - id: webcam - custom: - source: ./webcam.py - inputs: - tick: - source: dora/timer/millis/50 - queue_size: 1000 - outputs: - - image + build: pip install -r ../../nodes_hub/opencv-video-capture/requirements.txt + path: ../../nodes_hub/opencv-video-capture/video_capture.py + inputs: + tick: dora/timer/millis/50 + outputs: + - image + env: + DURATION: 100 - id: object_detection - custom: - source: ./object_detection.py - inputs: - image: webcam/image - outputs: - - bbox + build: pip install -r ../../nodes_hub/ultralytics-yolo/requirements.txt + path: ../../nodes_hub/ultralytics-yolo/yolo.py + inputs: + image: webcam/image + outputs: + - bbox - id: plot - custom: - source: ./plot.py - inputs: - image: webcam/image - bbox: object_detection/bbox + build: pip install -r ../../nodes_hub/opencv-plot/requirements.txt + path: ../../nodes_hub/opencv-plot/plot.py + inputs: + image: webcam/image + bbox: object_detection/bbox diff --git a/examples/python-dataflow/dataflow_dynamic.yml b/examples/python-dataflow/dataflow_dynamic.yml index 677f8a7f..3c9ae1cb 100644 --- a/examples/python-dataflow/dataflow_dynamic.yml +++ b/examples/python-dataflow/dataflow_dynamic.yml @@ -1,16 +1,15 @@ nodes: - id: webcam - custom: - source: ./webcam.py - inputs: - tick: - source: dora/timer/millis/50 - queue_size: 1000 - outputs: - - image + build: pip install -r ../../nodes_hub/opencv-webcam/requirements.txt + path: ../../nodes_hub/opencv-webcam/webcam.py + inputs: + tick: + source: dora/timer/millis/50 + queue_size: 1000 + outputs: + - image - id: plot - custom: - source: dynamic - inputs: - image: webcam/image + path: dynamic + inputs: + image: webcam/image diff --git a/examples/python-dataflow/example.py b/examples/python-dataflow/example.py deleted file mode 100644 index c9221a3a..00000000 --- a/examples/python-dataflow/example.py +++ /dev/null @@ -1,5 +0,0 @@ -from dora import Node - -node = Node("plot") - -event = node.next() diff --git a/examples/python-dataflow/object_detection.py b/examples/python-dataflow/object_detection.py deleted file mode 100755 index 70a0e712..00000000 --- a/examples/python-dataflow/object_detection.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import cv2 -import numpy as np -from ultralytics import YOLO - -from dora import Node -import pyarrow as pa - -model = YOLO("yolov8n.pt") - -node = Node() - -for event in node: - event_type = event["type"] - if event_type == "INPUT": - event_id = event["id"] - if event_id == "image": - print("[object detection] received image input") - frame = event["value"].to_numpy() - frame = cv2.imdecode(frame, -1) - frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) - results = model(frame) # includes NMS - # Process results - boxes = np.array(results[0].boxes.xyxy.cpu()) - conf = np.array(results[0].boxes.conf.cpu()) - label = np.array(results[0].boxes.cls.cpu()) - # concatenate them together - arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) - - node.send_output("bbox", pa.array(arrays.ravel()), event["metadata"]) - else: - print("[object detection] ignoring unexpected input:", event_id) - elif event_type == "STOP": - print("[object detection] received stop") - elif event_type == "ERROR": - print("[object detection] error: ", event["error"]) - else: - print("[object detection] received unexpected event:", event_type) diff --git a/examples/python-dataflow/plot.py b/examples/python-dataflow/plot.py deleted file mode 100755 index 035fc41d..00000000 --- a/examples/python-dataflow/plot.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import os -from dora import Node -from dora import DoraStatus - -import cv2 -import numpy as np -from utils import LABELS - -CI = os.environ.get("CI") - -font = cv2.FONT_HERSHEY_SIMPLEX - - -class Plotter: - """ - Plot image and bounding box - """ - - def __init__(self): - self.image = [] - self.bboxs = [] - - def on_input( - self, - dora_input, - ) -> DoraStatus: - """ - Put image and bounding box on cv2 window. - - Args: - dora_input["id"] (str): Id of the dora_input declared in the yaml configuration - dora_input["value"] (arrow array): message of the dora_input - """ - if dora_input["id"] == "image": - frame = dora_input["value"].to_numpy() - frame = cv2.imdecode(frame, -1) - self.image = frame - - elif dora_input["id"] == "bbox" and len(self.image) != 0: - bboxs = dora_input["value"].to_numpy() - self.bboxs = np.reshape(bboxs, (-1, 6)) - for bbox in self.bboxs: - [ - min_x, - min_y, - max_x, - max_y, - confidence, - label, - ] = bbox - cv2.rectangle( - self.image, - (int(min_x), int(min_y)), - (int(max_x), int(max_y)), - (0, 255, 0), - 2, - ) - - cv2.putText( - self.image, - LABELS[int(label)] + f", {confidence:0.2f}", - (int(max_x), int(max_y)), - font, - 0.75, - (0, 255, 0), - 2, - 1, - ) - - if CI != "true": - cv2.imshow("frame", self.image) - if cv2.waitKey(1) & 0xFF == ord("q"): - return DoraStatus.STOP - - return DoraStatus.CONTINUE - - -plotter = Plotter() -node = Node() - -for event in node: - event_type = event["type"] - if event_type == "INPUT": - status = plotter.on_input(event) - if status == DoraStatus.CONTINUE: - pass - elif status == DoraStatus.STOP: - print("plotter returned stop status") - break - elif event_type == "STOP": - print("received stop") - else: - print("received unexpected event:", event_type) diff --git a/examples/python-dataflow/plot_dynamic.py b/examples/python-dataflow/plot_dynamic.py old mode 100755 new mode 100644 index b3eda8b7..31c76ee2 --- a/examples/python-dataflow/plot_dynamic.py +++ b/examples/python-dataflow/plot_dynamic.py @@ -1,97 +1,71 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - import os -from dora import Node -from dora import DoraStatus +from dataclasses import dataclass import cv2 import numpy as np -from utils import LABELS - -CI = os.environ.get("CI") - -font = cv2.FONT_HERSHEY_SIMPLEX - - -class Plotter: - """ - Plot image and bounding box - """ - def __init__(self): - self.image = [] - self.bboxs = [] - - def on_input( - self, - dora_input, - ) -> DoraStatus: - """ - Put image and bounding box on cv2 window. - - Args: - dora_input["id"] (str): Id of the dora_input declared in the yaml configuration - dora_input["value"] (arrow array): message of the dora_input - """ - if dora_input["id"] == "image": - frame = dora_input["value"].to_numpy() - frame = cv2.imdecode(frame, -1) - self.image = frame - - elif dora_input["id"] == "bbox" and len(self.image) != 0: - bboxs = dora_input["value"].to_numpy() - self.bboxs = np.reshape(bboxs, (-1, 6)) - for bbox in self.bboxs: - [ - min_x, - min_y, - max_x, - max_y, - confidence, - label, - ] = bbox - cv2.rectangle( - self.image, - (int(min_x), int(min_y)), - (int(max_x), int(max_y)), - (0, 255, 0), - 2, - ) - - cv2.putText( - self.image, - LABELS[int(label)] + f", {confidence:0.2f}", - (int(max_x), int(max_y)), - font, - 0.75, - (0, 255, 0), - 2, - 1, - ) - - if CI != "true": - cv2.imshow("frame", self.image) - if cv2.waitKey(1) & 0xFF == ord("q"): - return DoraStatus.STOP +from dora import Node - return DoraStatus.CONTINUE +CI = os.environ.get("CI") +IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", "640")) +IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", "480")) -plotter = Plotter() +FONT = cv2.FONT_HERSHEY_SIMPLEX -node = Node("plot") -for event in node: - event_type = event["type"] - if event_type == "INPUT": - status = plotter.on_input(event) - if status == DoraStatus.CONTINUE: - pass - elif status == DoraStatus.STOP: - print("plotter returned stop status") - break - elif event_type == "STOP": - print("received stop") - else: - print("received unexpected event:", event_type) +@dataclass +class Plotter: + frame: np.array = np.array([]) + bboxes: np.array = np.array([]) + + +if __name__ == "__main__": + plotter = Plotter() + node = Node("plot") + + for event in node: + event_type = event["type"] + if event_type == "INPUT": + if event["id"] == "image": + frame = event["value"].to_numpy() + frame = ( + event["value"].to_numpy().reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) + ) + plotter.frame = frame + + elif event["id"] == "bbox" and len(plotter.frame) != 0: + bboxs = event["value"].to_numpy() + plotter.bboxes = np.reshape(bboxs, (-1, 6)) + for bbox in plotter.bboxs: + [ + min_x, + min_y, + max_x, + max_y, + confidence, + label, + ] = bbox + cv2.rectangle( + plotter.frame, + (int(min_x), int(min_y)), + (int(max_x), int(max_y)), + (0, 255, 0), + 2, + ) + + cv2.putText( + plotter.frame, + LABELS[int(label)] + f", {confidence:0.2f}", + (int(max_x), int(max_y)), + FONT, + 0.75, + (0, 255, 0), + 2, + 1, + ) + + if CI != "true": + cv2.imshow("frame", plotter.frame) + if cv2.waitKey(1) & 0xFF == ord("q"): + break diff --git a/examples/python-dataflow/requirements.txt b/examples/python-dataflow/requirements.txt deleted file mode 100644 index 9c1fc915..00000000 --- a/examples/python-dataflow/requirements.txt +++ /dev/null @@ -1,47 +0,0 @@ -# YOLOv5 requirements -# Usage: pip install -r requirements.txt - -# Base ---------------------------------------- -ultralytics -gitpython -ipython # interactive notebook -matplotlib>=3.2.2 -numpy<2.0.0 # See: https://github.com/opencv/opencv-python/issues/997 -opencv-python>=4.1.1 -Pillow>=7.1.2 -psutil # system resources -PyYAML>=5.3.1 -requests>=2.23.0 -scipy>=1.4.1 -thop>=0.1.1 # FLOPs computation -torch # see https://pytorch.org/get-started/locally (recommended) -torchvision -tqdm>=4.64.0 - -# Logging ------------------------------------- -tensorboard>=2.4.1 -# wandb -# clearml - -# Plotting ------------------------------------ -pandas>=1.1.4 -seaborn>=0.11.0 - -# Export -------------------------------------- -# coremltools>=5.2 # CoreML export -# onnx>=1.9.0 # ONNX export -# onnx-simplifier>=0.4.1 # ONNX simplifier -# nvidia-pyindex # TensorRT export -# nvidia-tensorrt # TensorRT export -# scikit-learn==0.19.2 # CoreML quantization -# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) -# tensorflowjs>=3.9.0 # TF.js export -# openvino-dev # OpenVINO export - -# Extras -------------------------------------- -# albumentations>=1.0.3 -# pycocotools>=2.0 # COCO mAP -# roboflow - -opencv-python>=4.1.1 -maturin diff --git a/examples/python-dataflow/run.rs b/examples/python-dataflow/run.rs index 65ae5831..20fdc399 100644 --- a/examples/python-dataflow/run.rs +++ b/examples/python-dataflow/run.rs @@ -50,21 +50,6 @@ async fn main() -> eyre::Result<()> { ); } - run( - get_python_path().context("Could not get pip binary")?, - &["-m", "pip", "install", "--upgrade", "pip"], - None, - ) - .await - .context("failed to install pip")?; - run( - get_pip_path().context("Could not get pip binary")?, - &["install", "-r", "requirements.txt"], - None, - ) - .await - .context("pip install failed")?; - run( "maturin", &["develop"], diff --git a/examples/python-dataflow/utils.py b/examples/python-dataflow/utils.py deleted file mode 100644 index dabc915e..00000000 --- a/examples/python-dataflow/utils.py +++ /dev/null @@ -1,82 +0,0 @@ -LABELS = [ - "ABC", - "bicycle", - "car", - "motorcycle", - "airplane", - "bus", - "train", - "truck", - "boat", - "traffic light", - "fire hydrant", - "stop sign", - "parking meter", - "bench", - "bird", - "cat", - "dog", - "horse", - "sheep", - "cow", - "elephant", - "bear", - "zebra", - "giraffe", - "backpack", - "umbrella", - "handbag", - "tie", - "suitcase", - "frisbee", - "skis", - "snowboard", - "sports ball", - "kite", - "baseball bat", - "baseball glove", - "skateboard", - "surfboard", - "tennis racket", - "bottle", - "wine glass", - "cup", - "fork", - "knife", - "spoon", - "bowl", - "banana", - "apple", - "sandwich", - "orange", - "broccoli", - "carrot", - "hot dog", - "pizza", - "donut", - "cake", - "chair", - "couch", - "potted plant", - "bed", - "dining table", - "toilet", - "tv", - "laptop", - "mouse", - "remote", - "keyboard", - "cell phone", - "microwave", - "oven", - "toaster", - "sink", - "refrigerator", - "book", - "clock", - "vase", - "scissors", - "teddy bear", - "hair drier", - "toothbrush", -] diff --git a/examples/python-dataflow/webcam.py b/examples/python-dataflow/webcam.py deleted file mode 100755 index 00b47f27..00000000 --- a/examples/python-dataflow/webcam.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import os -import time -import numpy as np -import cv2 - -from dora import Node - -node = Node() - -CAMERA_INDEX = int(os.getenv("CAMERA_INDEX", 0)) -CAMERA_WIDTH = 640 -CAMERA_HEIGHT = 480 -video_capture = cv2.VideoCapture(CAMERA_INDEX) -font = cv2.FONT_HERSHEY_SIMPLEX - -start = time.time() - -# Run for 20 seconds -while time.time() - start < 10: - # Wait next dora_input - event = node.next() - event_type = event["type"] - if event_type == "INPUT": - ret, frame = video_capture.read() - if not ret: - frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) - cv2.putText( - frame, - "No Webcam was found at index %d" % (CAMERA_INDEX), - (int(30), int(30)), - font, - 0.75, - (255, 255, 255), - 2, - 1, - ) - node.send_output( - "image", - cv2.imencode(".jpg", frame)[1].tobytes(), - event["metadata"], - ) - elif event_type == "STOP": - print("received stop") - break - else: - print("received unexpected event:", event_type) - break - -video_capture.release()