From a6b3bbdf3c79e4347a0702bc1ee910c87a14d638 Mon Sep 17 00:00:00 2001 From: Philipp Oppermann Date: Tue, 21 Feb 2023 15:08:46 +0100 Subject: [PATCH] Add python-operator-dataflow example --- examples/python-operator-dataflow/.gitignore | 1 + examples/python-operator-dataflow/README.md | 33 +++++++ .../python-operator-dataflow/dataflow.yml | 27 ++++++ .../dataflow_without_webcam.yml | 27 ++++++ .../python-operator-dataflow/no_webcam.py | 33 +++++++ .../object_detection.py | 42 +++++++++ examples/python-operator-dataflow/plot.py | 86 +++++++++++++++++++ .../python-operator-dataflow/requirements.txt | 45 ++++++++++ examples/python-operator-dataflow/run.rs | 46 ++++++++++ examples/python-operator-dataflow/run.sh | 15 ++++ examples/python-operator-dataflow/utils.py | 82 ++++++++++++++++++ examples/python-operator-dataflow/webcam.py | 31 +++++++ 12 files changed, 468 insertions(+) create mode 100644 examples/python-operator-dataflow/.gitignore create mode 100644 examples/python-operator-dataflow/README.md create mode 100644 examples/python-operator-dataflow/dataflow.yml create mode 100644 examples/python-operator-dataflow/dataflow_without_webcam.yml create mode 100755 examples/python-operator-dataflow/no_webcam.py create mode 100755 examples/python-operator-dataflow/object_detection.py create mode 100755 examples/python-operator-dataflow/plot.py create mode 100644 examples/python-operator-dataflow/requirements.txt create mode 100644 examples/python-operator-dataflow/run.rs create mode 100644 examples/python-operator-dataflow/run.sh create mode 100644 examples/python-operator-dataflow/utils.py create mode 100755 examples/python-operator-dataflow/webcam.py diff --git a/examples/python-operator-dataflow/.gitignore b/examples/python-operator-dataflow/.gitignore new file mode 100644 index 00000000..eede66d8 --- /dev/null +++ b/examples/python-operator-dataflow/.gitignore @@ -0,0 +1 @@ +*.pt \ No newline at end of file diff --git a/examples/python-operator-dataflow/README.md b/examples/python-operator-dataflow/README.md new file mode 100644 index 00000000..815a6f53 --- /dev/null +++ b/examples/python-operator-dataflow/README.md @@ -0,0 +1,33 @@ +# Python Dataflow Example + +This examples shows how to create and connect dora operators and custom nodes in Python. + +## Overview + +The [`dataflow.yml`](./dataflow.yml) defines a simple dataflow graph with the following three nodes: + +- a webcam node, that connects to your webcam and feed the dataflow with webcam frame as jpeg compressed bytearray. +- an object detection node, that apply Yolo v5 on the webcam image. The model is imported from Pytorch Hub. The output is the bouding box of each object detected, the confidence and the class. You can have more info here: https://pytorch.org/hub/ultralytics_yolov5/ +- a window plotting node, that will retrieve the webcam image and the Yolov5 bounding box and join the two together. + +## Getting started + +```bash +cargo run --example python-dataflow +``` + +## Installation + +To install, you should run the `install.sh` script. + +```bash +install.sh +``` + +## Run the dataflow as a standalone + +- Start the `dora-coordinator`, passing the paths to the dataflow file and the `dora-runtime` as arguments: + +``` +../../target/release/dora-coordinator --run-dataflow dataflow.yml ../../target/release/dora-runtime +``` diff --git a/examples/python-operator-dataflow/dataflow.yml b/examples/python-operator-dataflow/dataflow.yml new file mode 100644 index 00000000..86ac1422 --- /dev/null +++ b/examples/python-operator-dataflow/dataflow.yml @@ -0,0 +1,27 @@ +communication: + zenoh: + prefix: /example-python-dataflow + +nodes: + - id: webcam + custom: + source: webcam.py + inputs: + tick: dora/timer/millis/100 + outputs: + - image + + - id: object_detection + operator: + python: object_detection.py + inputs: + image: webcam/image + outputs: + - bbox + + - id: plot + operator: + python: plot.py + inputs: + image: webcam/image + bbox: object_detection/bbox diff --git a/examples/python-operator-dataflow/dataflow_without_webcam.yml b/examples/python-operator-dataflow/dataflow_without_webcam.yml new file mode 100644 index 00000000..6b9a00af --- /dev/null +++ b/examples/python-operator-dataflow/dataflow_without_webcam.yml @@ -0,0 +1,27 @@ +communication: + zenoh: + prefix: /example-python-no-webcam-dataflow + +nodes: + - id: no_webcam + custom: + source: ./no_webcam.py + inputs: + tick: dora/timer/millis/100 + outputs: + - image + + - id: object_detection + operator: + python: object_detection.py + inputs: + image: no_webcam/image + outputs: + - bbox + + - id: plot + operator: + python: plot.py + inputs: + image: no_webcam/image + bbox: object_detection/bbox diff --git a/examples/python-operator-dataflow/no_webcam.py b/examples/python-operator-dataflow/no_webcam.py new file mode 100755 index 00000000..3c322c24 --- /dev/null +++ b/examples/python-operator-dataflow/no_webcam.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import time +import urllib.request + +import cv2 +import numpy as np +from dora import Node + +print("Hello from no_webcam.py") + + +req = urllib.request.urlopen("https://ultralytics.com/images/zidane.jpg") + +arr = np.asarray(bytearray(req.read()), dtype=np.uint8) +node = Node() + +start = time.time() + +while time.time() - start < 20: + # Wait next dora_input + event = node.next() + match event["type"]: + case "INPUT": + print("received input", event["id"]) + node.send_output("image", arr.tobytes()) + case "STOP": + print("received stop") + case other: + print("received unexpected event:", other) + + time.sleep(1) diff --git a/examples/python-operator-dataflow/object_detection.py b/examples/python-operator-dataflow/object_detection.py new file mode 100755 index 00000000..098ec4d1 --- /dev/null +++ b/examples/python-operator-dataflow/object_detection.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +from enum import Enum +from typing import Callable + +import cv2 +import numpy as np +import torch + + +class DoraStatus(Enum): + CONTINUE = 0 + STOP = 1 + + +class Operator: + """ + Infering object from images + """ + + def __init__(self): + self.model = torch.hub.load("ultralytics/yolov5", "yolov5n") + + def on_input( + self, + dora_input: dict, + send_output: Callable[[str, bytes], None], + ) -> DoraStatus: + """Handle image + Args: + dora_input (dict): Dict containing the "id", "data", and "metadata" + send_output (Callable[[str, bytes]]): Function enabling sending output back to dora. + """ + + frame = np.frombuffer(dora_input["data"], dtype="uint8") + frame = cv2.imdecode(frame, -1) + frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) + results = self.model(frame) # includes NMS + arrays = np.array(results.xyxy[0].cpu()).tobytes() + send_output("bbox", arrays, dora_input["metadata"]) + return DoraStatus.CONTINUE diff --git a/examples/python-operator-dataflow/plot.py b/examples/python-operator-dataflow/plot.py new file mode 100755 index 00000000..57a2a293 --- /dev/null +++ b/examples/python-operator-dataflow/plot.py @@ -0,0 +1,86 @@ +import os +from enum import Enum +from typing import Callable + +import cv2 +import numpy as np + +from utils import LABELS + +CI = os.environ.get("CI") + +font = cv2.FONT_HERSHEY_SIMPLEX + + +class DoraStatus(Enum): + CONTINUE = 0 + STOP = 1 + + +class Operator: + """ + Plot image and bounding box + """ + + def __init__(self): + self.image = [] + self.bboxs = [] + + def on_input( + self, + dora_input: dict, + send_output: Callable[[str, bytes], None], + ) -> DoraStatus: + """ + Put image and bounding box on cv2 window. + + Args: + dora_input["id"] (str): Id of the dora_input declared in the yaml configuration + dora_input["data"] (bytes): Bytes message of the dora_input + send_output (Callable[[str, bytes]]): Function enabling sending output back to dora. + """ + if dora_input["id"] == "image": + frame = np.frombuffer(dora_input["data"], dtype="uint8") + frame = cv2.imdecode(frame, -1) + self.image = frame + + elif dora_input["id"] == "bbox" and len(self.image) != 0: + bboxs = np.frombuffer(dora_input["data"], dtype="float32") + self.bboxs = np.reshape(bboxs, (-1, 6)) + for bbox in self.bboxs: + [ + min_x, + min_y, + max_x, + max_y, + confidence, + label, + ] = bbox + cv2.rectangle( + self.image, + (int(min_x), int(min_y)), + (int(max_x), int(max_y)), + (0, 255, 0), + 2, + ) + + cv2.putText( + self.image, + LABELS[int(label)] + f", {confidence:0.2f}", + (int(max_x), int(max_y)), + font, + 0.75, + (0, 255, 0), + 2, + 1, + ) + + if CI != "true": + cv2.imshow("frame", self.image) + if cv2.waitKey(1) & 0xFF == ord("q"): + return DoraStatus.STOP + + return DoraStatus.CONTINUE + + def __del__(self): + cv2.destroyAllWindows() diff --git a/examples/python-operator-dataflow/requirements.txt b/examples/python-operator-dataflow/requirements.txt new file mode 100644 index 00000000..55f71178 --- /dev/null +++ b/examples/python-operator-dataflow/requirements.txt @@ -0,0 +1,45 @@ +# YOLOv5 requirements +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +matplotlib>=3.2.2 +numpy>=1.18.5 +opencv-python>=4.1.1 +Pillow>=7.1.2 +PyYAML>=5.3.1 +requests>=2.23.0 +scipy>=1.4.1 +torch>=1.7.0 +torchvision>=0.8.1 +tqdm>=4.64.0 +protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 + +# Logging ------------------------------------- +tensorboard>=2.4.1 +# wandb +# clearml + +# Plotting ------------------------------------ +pandas>=1.1.4 +seaborn>=0.11.0 + +# Export -------------------------------------- +# coremltools>=5.2 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export + +# Extras -------------------------------------- +ipython # interactive notebook +psutil # system utilization +thop>=0.1.1 # FLOPs computation +# albumentations>=1.0.3 +# pycocotools>=2.0 # COCO mAP +# roboflow + +opencv-python>=4.1.1 diff --git a/examples/python-operator-dataflow/run.rs b/examples/python-operator-dataflow/run.rs new file mode 100644 index 00000000..ac32ff00 --- /dev/null +++ b/examples/python-operator-dataflow/run.rs @@ -0,0 +1,46 @@ +use eyre::{bail, Context}; +use std::{env, path::Path}; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing().wrap_err("failed to set up tracing subscriber")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + build_package("dora-daemon").await?; + + run(root).await?; + + Ok(()) +} + +async fn build_package(package: &str) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("build"); + cmd.arg("--package").arg(package); + if !cmd.status().await?.success() { + bail!("failed to build {package}"); + }; + Ok(()) +} + +async fn run(_root: &Path) -> eyre::Result<()> { + let mut run = tokio::process::Command::new("sh"); + run.arg("./run.sh"); + if !run.status().await?.success() { + bail!("failed to run python example."); + }; + Ok(()) +} + +fn set_up_tracing() -> eyre::Result<()> { + use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt; + + let stdout_log = tracing_subscriber::fmt::layer().pretty(); + let subscriber = tracing_subscriber::Registry::default().with(stdout_log); + tracing::subscriber::set_global_default(subscriber) + .context("failed to set tracing global subscriber") +} diff --git a/examples/python-operator-dataflow/run.sh b/examples/python-operator-dataflow/run.sh new file mode 100644 index 00000000..4a8ac435 --- /dev/null +++ b/examples/python-operator-dataflow/run.sh @@ -0,0 +1,15 @@ +set -e + +python3 -m venv .env +. $(pwd)/.env/bin/activate +# Dev dependencies +pip install maturin +cd ../../apis/python/node +maturin develop +cd ../../../examples/python-operator-dataflow + +# Dependencies +pip install --upgrade pip +pip install -r requirements.txt + +cargo run -p dora-daemon -- --run-dataflow dataflow_without_webcam.yml diff --git a/examples/python-operator-dataflow/utils.py b/examples/python-operator-dataflow/utils.py new file mode 100644 index 00000000..dabc915e --- /dev/null +++ b/examples/python-operator-dataflow/utils.py @@ -0,0 +1,82 @@ +LABELS = [ + "ABC", + "bicycle", + "car", + "motorcycle", + "airplane", + "bus", + "train", + "truck", + "boat", + "traffic light", + "fire hydrant", + "stop sign", + "parking meter", + "bench", + "bird", + "cat", + "dog", + "horse", + "sheep", + "cow", + "elephant", + "bear", + "zebra", + "giraffe", + "backpack", + "umbrella", + "handbag", + "tie", + "suitcase", + "frisbee", + "skis", + "snowboard", + "sports ball", + "kite", + "baseball bat", + "baseball glove", + "skateboard", + "surfboard", + "tennis racket", + "bottle", + "wine glass", + "cup", + "fork", + "knife", + "spoon", + "bowl", + "banana", + "apple", + "sandwich", + "orange", + "broccoli", + "carrot", + "hot dog", + "pizza", + "donut", + "cake", + "chair", + "couch", + "potted plant", + "bed", + "dining table", + "toilet", + "tv", + "laptop", + "mouse", + "remote", + "keyboard", + "cell phone", + "microwave", + "oven", + "toaster", + "sink", + "refrigerator", + "book", + "clock", + "vase", + "scissors", + "teddy bear", + "hair drier", + "toothbrush", +] diff --git a/examples/python-operator-dataflow/webcam.py b/examples/python-operator-dataflow/webcam.py new file mode 100755 index 00000000..cbcaedfc --- /dev/null +++ b/examples/python-operator-dataflow/webcam.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import time + +import cv2 +from dora import Node + +node = Node() + +video_capture = cv2.VideoCapture(0) + +start = time.time() + +# Run for 20 seconds +while time.time() - start < 10: + # Wait next dora_input + event = node.next() + match event["type"]: + case "INPUT": + ret, frame = video_capture.read() + if ret: + node.send_output("image", cv2.imencode(".jpg", frame)[1].tobytes()) + case "STOP": + print("received stop") + break + case other: + print("received unexpected event:", other) + break + +video_capture.release()