Nodes hub to store and reuse commonly used nodetags/v0.3.6-rc0
| @@ -264,6 +264,22 @@ jobs: | |||
| - uses: actions/checkout@v3 | |||
| - uses: r7kamura/rust-problem-matchers@v1.1.0 | |||
| - run: cargo --version --verbose | |||
| - name: Free Disk Space (Ubuntu) | |||
| uses: jlumbroso/free-disk-space@main | |||
| if: runner.os == 'Linux' | |||
| with: | |||
| # this might remove tools that are actually needed, | |||
| # if set to "true" but frees about 6 GB | |||
| tool-cache: false | |||
| # all of these default to true, but feel free to set to | |||
| # "false" if necessary for your workflow | |||
| android: true | |||
| dotnet: true | |||
| haskell: true | |||
| large-packages: false | |||
| docker-images: true | |||
| swap-storage: false | |||
| - uses: Swatinem/rust-cache@v2 | |||
| with: | |||
| cache-provider: buildjet | |||
| @@ -318,14 +334,15 @@ jobs: | |||
| cd test_python_project | |||
| dora up | |||
| dora list | |||
| dora build dataflow.yml | |||
| dora start dataflow.yml --name ci-python-test --detach | |||
| sleep 10 | |||
| dora stop --name ci-python-test --grace-duration 5s | |||
| pip install "numpy<2.0.0" opencv-python | |||
| dora build ../examples/python-dataflow/dataflow_dynamic.yml | |||
| dora start ../examples/python-dataflow/dataflow_dynamic.yml --name ci-python-dynamic --detach | |||
| python ../examples/python-dataflow/plot_dynamic.py | |||
| opencv-plot --name plot | |||
| sleep 5 | |||
| dora stop --name ci-python-test --grace-duration 5s | |||
| dora stop --name ci-python-dynamic --grace-duration 5s | |||
| dora destroy | |||
| - name: "Test CLI (C)" | |||
| @@ -346,7 +363,7 @@ jobs: | |||
| sleep 10 | |||
| dora stop --name ci-c-test --grace-duration 5s | |||
| dora destroy | |||
| - name: "Test CLI (C++)" | |||
| timeout-minutes: 30 | |||
| # fail-fast by using bash shell explictly | |||
| @@ -30,8 +30,8 @@ members = [ | |||
| "libraries/shared-memory-server", | |||
| "libraries/extensions/download", | |||
| "libraries/extensions/telemetry/*", | |||
| "tool_nodes/dora-record", | |||
| "tool_nodes/dora-rerun", | |||
| "node-hub/dora-record", | |||
| "node-hub/dora-rerun", | |||
| "libraries/extensions/ros2-bridge", | |||
| "libraries/extensions/ros2-bridge/msg-gen", | |||
| "libraries/extensions/ros2-bridge/python", | |||
| @@ -1,20 +1,19 @@ | |||
| use std::{ | |||
| collections::HashMap, | |||
| collections::{BTreeMap, HashMap}, | |||
| sync::{Arc, Mutex}, | |||
| }; | |||
| use arrow::pyarrow::ToPyArrow; | |||
| use dora_node_api::{ | |||
| merged::{MergeExternalSend, MergedEvent}, | |||
| DoraNode, Event, EventStream, Metadata, MetadataParameters, | |||
| DoraNode, Event, EventStream, Metadata, MetadataParameters, Parameter, | |||
| }; | |||
| use eyre::{Context, Result}; | |||
| use futures::{Stream, StreamExt}; | |||
| use futures_concurrency::stream::Merge as _; | |||
| use pyo3::{ | |||
| prelude::*, | |||
| pybacked::PyBackedStr, | |||
| types::{IntoPyDict, PyDict}, | |||
| types::{IntoPyDict, PyBool, PyDict, PyInt, PyString}, | |||
| }; | |||
| /// Dora Event | |||
| @@ -94,7 +93,7 @@ impl PyEvent { | |||
| if let Some(value) = self.value(py)? { | |||
| pydict.insert("value", value); | |||
| } | |||
| if let Some(metadata) = Self::metadata(event, py) { | |||
| if let Some(metadata) = Self::metadata(event, py)? { | |||
| pydict.insert("metadata", metadata); | |||
| } | |||
| if let Some(error) = Self::error(event) { | |||
| @@ -143,10 +142,14 @@ impl PyEvent { | |||
| } | |||
| } | |||
| fn metadata(event: &Event, py: Python<'_>) -> Option<PyObject> { | |||
| fn metadata(event: &Event, py: Python<'_>) -> Result<Option<PyObject>> { | |||
| match event { | |||
| Event::Input { metadata, .. } => Some(metadata_to_pydict(metadata, py).to_object(py)), | |||
| _ => None, | |||
| Event::Input { metadata, .. } => Ok(Some( | |||
| metadata_to_pydict(metadata, py) | |||
| .context("Issue deserializing metadata")? | |||
| .to_object(py), | |||
| )), | |||
| _ => Ok(None), | |||
| } | |||
| } | |||
| @@ -159,44 +162,45 @@ impl PyEvent { | |||
| } | |||
| pub fn pydict_to_metadata(dict: Option<Bound<'_, PyDict>>) -> Result<MetadataParameters> { | |||
| let mut default_metadata = MetadataParameters::default(); | |||
| if let Some(metadata) = dict { | |||
| for (key, value) in metadata.iter() { | |||
| match key | |||
| .extract::<PyBackedStr>() | |||
| .context("Parsing metadata keys")? | |||
| .as_ref() | |||
| { | |||
| "watermark" => { | |||
| default_metadata.watermark = | |||
| value.extract().context("parsing watermark failed")?; | |||
| } | |||
| "deadline" => { | |||
| default_metadata.deadline = | |||
| value.extract().context("parsing deadline failed")?; | |||
| } | |||
| "open_telemetry_context" => { | |||
| let otel_context: PyBackedStr = value | |||
| .extract() | |||
| .context("parsing open telemetry context failed")?; | |||
| default_metadata.open_telemetry_context = otel_context.to_string(); | |||
| } | |||
| _ => (), | |||
| } | |||
| let mut parameters = BTreeMap::default(); | |||
| if let Some(pymetadata) = dict { | |||
| for (key, value) in pymetadata.iter() { | |||
| let key = key.extract::<String>().context("Parsing metadata keys")?; | |||
| if value.is_exact_instance_of::<PyBool>() { | |||
| parameters.insert(key, Parameter::Bool(value.extract()?)) | |||
| } else if value.is_instance_of::<PyInt>() { | |||
| parameters.insert(key, Parameter::Integer(value.extract::<i64>()?)) | |||
| } else if value.is_instance_of::<PyString>() { | |||
| parameters.insert(key, Parameter::String(value.extract()?)) | |||
| } else { | |||
| println!("could not convert type {value}"); | |||
| parameters.insert(key, Parameter::String(value.str()?.to_string())) | |||
| }; | |||
| } | |||
| } | |||
| Ok(default_metadata) | |||
| Ok(parameters) | |||
| } | |||
| pub fn metadata_to_pydict<'a>(metadata: &'a Metadata, py: Python<'a>) -> pyo3::Bound<'a, PyDict> { | |||
| pub fn metadata_to_pydict<'a>( | |||
| metadata: &'a Metadata, | |||
| py: Python<'a>, | |||
| ) -> Result<pyo3::Bound<'a, PyDict>> { | |||
| let dict = PyDict::new_bound(py); | |||
| dict.set_item( | |||
| "open_telemetry_context", | |||
| &metadata.parameters.open_telemetry_context, | |||
| ) | |||
| .wrap_err("could not make metadata a python dictionary item") | |||
| .unwrap(); | |||
| dict | |||
| for (k, v) in metadata.parameters.iter() { | |||
| match v { | |||
| Parameter::Bool(bool) => dict | |||
| .set_item(k, bool) | |||
| .context(format!("Could not insert metadata into python dictionary"))?, | |||
| Parameter::Integer(int) => dict | |||
| .set_item(k, int) | |||
| .context(format!("Could not insert metadata into python dictionary"))?, | |||
| Parameter::String(s) => dict | |||
| .set_item(k, s) | |||
| .context(format!("Could not insert metadata into python dictionary"))?, | |||
| } | |||
| } | |||
| Ok(dict) | |||
| } | |||
| #[cfg(test)] | |||
| @@ -16,7 +16,7 @@ | |||
| pub use arrow; | |||
| pub use dora_arrow_convert::*; | |||
| pub use dora_core; | |||
| pub use dora_core::message::{uhlc, Metadata, MetadataParameters}; | |||
| pub use dora_core::message::{uhlc, Metadata, MetadataParameters, Parameter}; | |||
| pub use event_stream::{merged, Event, EventStream, MappedInputData, RawData}; | |||
| pub use flume::Receiver; | |||
| pub use node::{arrow_utils, DataSample, DoraNode, ZERO_COPY_THRESHOLD}; | |||
| @@ -250,11 +250,7 @@ impl DoraNode { | |||
| if !self.node_config.outputs.contains(&output_id) { | |||
| eyre::bail!("unknown output"); | |||
| } | |||
| let metadata = Metadata::from_parameters( | |||
| self.clock.new_timestamp(), | |||
| type_info, | |||
| parameters.into_owned(), | |||
| ); | |||
| let metadata = Metadata::from_parameters(self.clock.new_timestamp(), type_info, parameters); | |||
| let (data, shmem) = match sample { | |||
| Some(sample) => sample.finalize(), | |||
| @@ -8,7 +8,7 @@ use dora_core::daemon_messages::{ | |||
| }; | |||
| use dora_core::descriptor::runtime_node_inputs; | |||
| use dora_core::message::uhlc::{self, HLC}; | |||
| use dora_core::message::{ArrowTypeInfo, Metadata, MetadataParameters}; | |||
| use dora_core::message::{ArrowTypeInfo, Metadata}; | |||
| use dora_core::topics::LOCALHOST; | |||
| use dora_core::topics::{ | |||
| DataflowDaemonResult, DataflowResult, NodeError, NodeErrorCause, NodeExitStatus, | |||
| @@ -23,6 +23,7 @@ use dora_core::{ | |||
| descriptor::{CoreNodeKind, Descriptor, ResolvedNode}, | |||
| }; | |||
| use dora_node_api::Parameter; | |||
| use eyre::{bail, eyre, Context, ContextCompat, Result}; | |||
| use futures::{future, stream, FutureExt, TryFutureExt}; | |||
| use futures_concurrency::stream::Merge; | |||
| @@ -1543,17 +1544,19 @@ impl RunningDataflow { | |||
| let span = tracing::span!(tracing::Level::TRACE, "tick"); | |||
| let _ = span.enter(); | |||
| let mut parameters = BTreeMap::new(); | |||
| parameters.insert( | |||
| "open_telemetry_context".to_string(), | |||
| #[cfg(feature = "telemetry")] | |||
| Parameter::String(serialize_context(&span.context())), | |||
| #[cfg(not(feature = "telemetry"))] | |||
| Parameter::String("".into()), | |||
| ); | |||
| let metadata = dora_core::message::Metadata::from_parameters( | |||
| hlc.new_timestamp(), | |||
| ArrowTypeInfo::empty(), | |||
| MetadataParameters { | |||
| watermark: 0, | |||
| deadline: 0, | |||
| #[cfg(feature = "telemetry")] | |||
| open_telemetry_context: serialize_context(&span.context()), | |||
| #[cfg(not(feature = "telemetry"))] | |||
| open_telemetry_context: "".into(), | |||
| }, | |||
| parameters, | |||
| ); | |||
| let event = Timestamped { | |||
| @@ -6,7 +6,7 @@ use dora_core::{ | |||
| descriptor::{source_is_url, Descriptor, PythonSource}, | |||
| }; | |||
| use dora_download::download_file; | |||
| use dora_node_api::{merged::MergedEvent, Event}; | |||
| use dora_node_api::{merged::MergedEvent, Event, Parameter}; | |||
| use dora_operator_api_python::PyEvent; | |||
| use dora_operator_api_types::DoraStatus; | |||
| use eyre::{bail, eyre, Context, Result}; | |||
| @@ -201,11 +201,15 @@ pub fn run( | |||
| use tracing_opentelemetry::OpenTelemetrySpanExt; | |||
| span.record("input_id", input_id.as_str()); | |||
| let cx = deserialize_context(&metadata.parameters.open_telemetry_context); | |||
| let otel = metadata.open_telemetry_context(); | |||
| let cx = deserialize_context(&otel); | |||
| span.set_parent(cx); | |||
| let cx = span.context(); | |||
| let string_cx = serialize_context(&cx); | |||
| metadata.parameters.open_telemetry_context = string_cx; | |||
| metadata.parameters.insert( | |||
| "open_telemetry_context".to_string(), | |||
| Parameter::String(string_cx), | |||
| ); | |||
| } | |||
| let py_event = PyEvent { | |||
| @@ -317,17 +321,22 @@ mod callback_impl { | |||
| metadata: Option<Bound<'_, PyDict>>, | |||
| py: Python, | |||
| ) -> Result<()> { | |||
| let parameters = pydict_to_metadata(metadata) | |||
| .wrap_err("failed to parse metadata")? | |||
| .into_owned(); | |||
| let parameters = pydict_to_metadata(metadata).wrap_err("failed to parse metadata")?; | |||
| let span = span!( | |||
| tracing::Level::TRACE, | |||
| "send_output", | |||
| output_id = field::Empty | |||
| ); | |||
| span.record("output_id", output); | |||
| let otel = if let Some(dora_node_api::Parameter::String(otel)) = | |||
| parameters.get("open_telemetry_context") | |||
| { | |||
| otel.to_string() | |||
| } else { | |||
| "".to_string() | |||
| }; | |||
| let cx = deserialize_context(¶meters.open_telemetry_context); | |||
| let cx = deserialize_context(&otel); | |||
| span.set_parent(cx); | |||
| let _ = span.enter(); | |||
| @@ -8,7 +8,7 @@ use dora_core::{ | |||
| use dora_download::download_file; | |||
| use dora_node_api::{ | |||
| arrow_utils::{copy_array_into_sample, required_data_size}, | |||
| Event, MetadataParameters, | |||
| Event, Parameter, | |||
| }; | |||
| use dora_operator_api_types::{ | |||
| safer_ffi::closure::ArcDynFn1, DoraDropOperator, DoraInitOperator, DoraInitResult, DoraOnEvent, | |||
| @@ -17,6 +17,7 @@ use dora_operator_api_types::{ | |||
| use eyre::{bail, eyre, Context, Result}; | |||
| use libloading::Symbol; | |||
| use std::{ | |||
| collections::BTreeMap, | |||
| ffi::c_void, | |||
| panic::{catch_unwind, AssertUnwindSafe}, | |||
| path::Path, | |||
| @@ -119,10 +120,11 @@ impl<'lib> SharedLibraryOperator<'lib> { | |||
| open_telemetry_context, | |||
| }, | |||
| } = output; | |||
| let parameters = MetadataParameters { | |||
| open_telemetry_context: open_telemetry_context.into(), | |||
| ..Default::default() | |||
| }; | |||
| let mut parameters = BTreeMap::new(); | |||
| parameters.insert( | |||
| "open_telemetry_context".to_string(), | |||
| Parameter::String(open_telemetry_context.to_string()), | |||
| ); | |||
| let arrow_array = match unsafe { arrow::ffi::from_ffi(data_array, &schema) } { | |||
| Ok(a) => a, | |||
| @@ -173,11 +175,15 @@ impl<'lib> SharedLibraryOperator<'lib> { | |||
| use tracing_opentelemetry::OpenTelemetrySpanExt; | |||
| span.record("input_id", input_id.as_str()); | |||
| let cx = deserialize_context(&metadata.parameters.open_telemetry_context); | |||
| let otel = metadata.open_telemetry_context(); | |||
| let cx = deserialize_context(&otel); | |||
| span.set_parent(cx); | |||
| let cx = span.context(); | |||
| let string_cx = serialize_context(&cx); | |||
| metadata.parameters.open_telemetry_context = string_cx; | |||
| metadata.parameters.insert( | |||
| "open_telemetry_context".to_string(), | |||
| Parameter::String(string_cx), | |||
| ); | |||
| } | |||
| let mut operator_event = match event { | |||
| @@ -193,16 +199,13 @@ impl<'lib> SharedLibraryOperator<'lib> { | |||
| data, | |||
| } => { | |||
| let (data_array, schema) = arrow::ffi::to_ffi(&data.to_data())?; | |||
| let otel = metadata.open_telemetry_context(); | |||
| let operator_input = dora_operator_api_types::Input { | |||
| id: String::from(input_id).into(), | |||
| data_array: Some(data_array), | |||
| schema, | |||
| metadata: Metadata { | |||
| open_telemetry_context: metadata | |||
| .parameters | |||
| .open_telemetry_context | |||
| .into(), | |||
| open_telemetry_context: otel.into(), | |||
| }, | |||
| }; | |||
| dora_operator_api_types::RawEvent { | |||
| @@ -1,25 +1,41 @@ | |||
| # Python Dataflow Example | |||
| This examples shows how to create and connect dora operators and custom nodes in Python. | |||
| This examples shows how to create and connect dora nodes in Python. | |||
| ## Overview | |||
| The [`dataflow.yml`](./dataflow.yml) defines a simple dataflow graph with the following three nodes: | |||
| - a webcam node, that connects to your webcam and feed the dataflow with webcam frame as jpeg compressed bytearray. | |||
| - an object detection node, that apply Yolo v5 on the webcam image. The model is imported from Pytorch Hub. The output is the bounding box of each object detected, the confidence and the class. You can have more info here: https://pytorch.org/hub/ultralytics_yolov5/ | |||
| - a window plotting node, that will retrieve the webcam image and the Yolov5 bounding box and join the two together. | |||
| - a window plotting node, that will retrieve the webcam image and plot it. | |||
| The same dataflow is implemented for a `dynamic-node` in [`dataflow_dynamic.yml`](./dataflow_dynamic.yml). It contains | |||
| the same nodes as the previous dataflow, but the plot node is a dynamic node. See the next section for more | |||
| information on how to start such a dataflow. | |||
| ## Getting started | |||
| After installing Rust, `dora-cli` and `Python >3.11`, you will need to **activate** (or create and **activate**) a | |||
| [Python virtual environment](https://docs.python.org/3/library/venv.html). | |||
| Then, you will need to install the dependencies: | |||
| ```bash | |||
| cargo run --example python-dataflow | |||
| cd examples/python-dataflow | |||
| dora build ./dataflow.yml (or dora build ./dataflow_dynamic.yml) | |||
| ``` | |||
| ## Run the dataflow as a standalone | |||
| It will install the required dependencies for the Python nodes. | |||
| - Start the `dora-daemon`: | |||
| Then you can run the dataflow: | |||
| ```bash | |||
| dora up | |||
| dora start ./dataflow.yml (or dora start ./dataflow_dynamic.yml) | |||
| ``` | |||
| ../../target/release/dora-daemon --run-dataflow dataflow.yml | |||
| ``` | |||
| **Note**: if you're running the dynamic dataflow, you will need to start manually the ultralytics-yolo node: | |||
| ```bash | |||
| # activate your virtual environment in another terminal | |||
| python opencv-plot --name plot | |||
| ``` | |||
| @@ -1,25 +1,33 @@ | |||
| nodes: | |||
| - id: webcam | |||
| custom: | |||
| source: ./webcam.py | |||
| inputs: | |||
| tick: | |||
| source: dora/timer/millis/50 | |||
| queue_size: 1000 | |||
| outputs: | |||
| - image | |||
| - id: camera | |||
| build: pip install ../../node-hub/opencv-video-capture | |||
| path: opencv-video-capture | |||
| inputs: | |||
| tick: dora/timer/millis/20 | |||
| outputs: | |||
| - image | |||
| env: | |||
| CAPTURE_PATH: 0 | |||
| IMAGE_WIDTH: 640 | |||
| IMAGE_HEIGHT: 480 | |||
| - id: object_detection | |||
| custom: | |||
| source: ./object_detection.py | |||
| inputs: | |||
| image: webcam/image | |||
| outputs: | |||
| - bbox | |||
| - id: object-detection | |||
| build: pip install ../../node-hub/ultralytics-yolo | |||
| path: ultralytics-yolo | |||
| inputs: | |||
| image: | |||
| source: camera/image | |||
| queue_size: 1 | |||
| outputs: | |||
| - bbox | |||
| env: | |||
| MODEL: yolov8n.pt | |||
| - id: plot | |||
| custom: | |||
| source: ./plot.py | |||
| inputs: | |||
| image: webcam/image | |||
| bbox: object_detection/bbox | |||
| build: pip install ../../node-hub/opencv-plot | |||
| path: opencv-plot | |||
| inputs: | |||
| image: | |||
| source: camera/image | |||
| queue_size: 1 | |||
| bbox: object-detection/bbox | |||
| @@ -1,16 +1,18 @@ | |||
| nodes: | |||
| - id: webcam | |||
| custom: | |||
| source: ./webcam.py | |||
| inputs: | |||
| tick: | |||
| source: dora/timer/millis/50 | |||
| queue_size: 1000 | |||
| outputs: | |||
| - image | |||
| - id: camera | |||
| build: pip install ../../node-hub/opencv-video-capture | |||
| path: opencv-video-capture | |||
| inputs: | |||
| tick: dora/timer/millis/16 | |||
| outputs: | |||
| - image | |||
| env: | |||
| CAPTURE_PATH: 0 | |||
| IMAGE_WIDTH: 640 | |||
| IMAGE_HEIGHT: 480 | |||
| - id: plot | |||
| custom: | |||
| source: dynamic | |||
| inputs: | |||
| image: webcam/image | |||
| build: pip install ../../node-hub/opencv-plot | |||
| path: dynamic | |||
| inputs: | |||
| image: camera/image | |||
| @@ -1,5 +0,0 @@ | |||
| from dora import Node | |||
| node = Node("plot") | |||
| event = node.next() | |||
| @@ -1,40 +0,0 @@ | |||
| #!/usr/bin/env python3 | |||
| # -*- coding: utf-8 -*- | |||
| import cv2 | |||
| import numpy as np | |||
| from ultralytics import YOLO | |||
| from dora import Node | |||
| import pyarrow as pa | |||
| model = YOLO("yolov8n.pt") | |||
| node = Node() | |||
| for event in node: | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| event_id = event["id"] | |||
| if event_id == "image": | |||
| print("[object detection] received image input") | |||
| frame = event["value"].to_numpy() | |||
| frame = cv2.imdecode(frame, -1) | |||
| frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) | |||
| results = model(frame) # includes NMS | |||
| # Process results | |||
| boxes = np.array(results[0].boxes.xyxy.cpu()) | |||
| conf = np.array(results[0].boxes.conf.cpu()) | |||
| label = np.array(results[0].boxes.cls.cpu()) | |||
| # concatenate them together | |||
| arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) | |||
| node.send_output("bbox", pa.array(arrays.ravel()), event["metadata"]) | |||
| else: | |||
| print("[object detection] ignoring unexpected input:", event_id) | |||
| elif event_type == "STOP": | |||
| print("[object detection] received stop") | |||
| elif event_type == "ERROR": | |||
| print("[object detection] error: ", event["error"]) | |||
| else: | |||
| print("[object detection] received unexpected event:", event_type) | |||
| @@ -1,96 +0,0 @@ | |||
| #!/usr/bin/env python3 | |||
| # -*- coding: utf-8 -*- | |||
| import os | |||
| from dora import Node | |||
| from dora import DoraStatus | |||
| import cv2 | |||
| import numpy as np | |||
| from utils import LABELS | |||
| CI = os.environ.get("CI") | |||
| font = cv2.FONT_HERSHEY_SIMPLEX | |||
| class Plotter: | |||
| """ | |||
| Plot image and bounding box | |||
| """ | |||
| def __init__(self): | |||
| self.image = [] | |||
| self.bboxs = [] | |||
| def on_input( | |||
| self, | |||
| dora_input, | |||
| ) -> DoraStatus: | |||
| """ | |||
| Put image and bounding box on cv2 window. | |||
| Args: | |||
| dora_input["id"] (str): Id of the dora_input declared in the yaml configuration | |||
| dora_input["value"] (arrow array): message of the dora_input | |||
| """ | |||
| if dora_input["id"] == "image": | |||
| frame = dora_input["value"].to_numpy() | |||
| frame = cv2.imdecode(frame, -1) | |||
| self.image = frame | |||
| elif dora_input["id"] == "bbox" and len(self.image) != 0: | |||
| bboxs = dora_input["value"].to_numpy() | |||
| self.bboxs = np.reshape(bboxs, (-1, 6)) | |||
| for bbox in self.bboxs: | |||
| [ | |||
| min_x, | |||
| min_y, | |||
| max_x, | |||
| max_y, | |||
| confidence, | |||
| label, | |||
| ] = bbox | |||
| cv2.rectangle( | |||
| self.image, | |||
| (int(min_x), int(min_y)), | |||
| (int(max_x), int(max_y)), | |||
| (0, 255, 0), | |||
| 2, | |||
| ) | |||
| cv2.putText( | |||
| self.image, | |||
| LABELS[int(label)] + f", {confidence:0.2f}", | |||
| (int(max_x), int(max_y)), | |||
| font, | |||
| 0.75, | |||
| (0, 255, 0), | |||
| 2, | |||
| 1, | |||
| ) | |||
| if CI != "true": | |||
| cv2.imshow("frame", self.image) | |||
| if cv2.waitKey(1) & 0xFF == ord("q"): | |||
| return DoraStatus.STOP | |||
| return DoraStatus.CONTINUE | |||
| plotter = Plotter() | |||
| node = Node() | |||
| for event in node: | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| status = plotter.on_input(event) | |||
| if status == DoraStatus.CONTINUE: | |||
| pass | |||
| elif status == DoraStatus.STOP: | |||
| print("plotter returned stop status") | |||
| break | |||
| elif event_type == "STOP": | |||
| print("received stop") | |||
| else: | |||
| print("received unexpected event:", event_type) | |||
| @@ -1,97 +0,0 @@ | |||
| #!/usr/bin/env python3 | |||
| # -*- coding: utf-8 -*- | |||
| import os | |||
| from dora import Node | |||
| from dora import DoraStatus | |||
| import cv2 | |||
| import numpy as np | |||
| from utils import LABELS | |||
| CI = os.environ.get("CI") | |||
| font = cv2.FONT_HERSHEY_SIMPLEX | |||
| class Plotter: | |||
| """ | |||
| Plot image and bounding box | |||
| """ | |||
| def __init__(self): | |||
| self.image = [] | |||
| self.bboxs = [] | |||
| def on_input( | |||
| self, | |||
| dora_input, | |||
| ) -> DoraStatus: | |||
| """ | |||
| Put image and bounding box on cv2 window. | |||
| Args: | |||
| dora_input["id"] (str): Id of the dora_input declared in the yaml configuration | |||
| dora_input["value"] (arrow array): message of the dora_input | |||
| """ | |||
| if dora_input["id"] == "image": | |||
| frame = dora_input["value"].to_numpy() | |||
| frame = cv2.imdecode(frame, -1) | |||
| self.image = frame | |||
| elif dora_input["id"] == "bbox" and len(self.image) != 0: | |||
| bboxs = dora_input["value"].to_numpy() | |||
| self.bboxs = np.reshape(bboxs, (-1, 6)) | |||
| for bbox in self.bboxs: | |||
| [ | |||
| min_x, | |||
| min_y, | |||
| max_x, | |||
| max_y, | |||
| confidence, | |||
| label, | |||
| ] = bbox | |||
| cv2.rectangle( | |||
| self.image, | |||
| (int(min_x), int(min_y)), | |||
| (int(max_x), int(max_y)), | |||
| (0, 255, 0), | |||
| 2, | |||
| ) | |||
| cv2.putText( | |||
| self.image, | |||
| LABELS[int(label)] + f", {confidence:0.2f}", | |||
| (int(max_x), int(max_y)), | |||
| font, | |||
| 0.75, | |||
| (0, 255, 0), | |||
| 2, | |||
| 1, | |||
| ) | |||
| if CI != "true": | |||
| cv2.imshow("frame", self.image) | |||
| if cv2.waitKey(1) & 0xFF == ord("q"): | |||
| return DoraStatus.STOP | |||
| return DoraStatus.CONTINUE | |||
| plotter = Plotter() | |||
| node = Node("plot") | |||
| for event in node: | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| status = plotter.on_input(event) | |||
| if status == DoraStatus.CONTINUE: | |||
| pass | |||
| elif status == DoraStatus.STOP: | |||
| print("plotter returned stop status") | |||
| break | |||
| elif event_type == "STOP": | |||
| print("received stop") | |||
| else: | |||
| print("received unexpected event:", event_type) | |||
| @@ -1,47 +0,0 @@ | |||
| # YOLOv5 requirements | |||
| # Usage: pip install -r requirements.txt | |||
| # Base ---------------------------------------- | |||
| ultralytics | |||
| gitpython | |||
| ipython # interactive notebook | |||
| matplotlib>=3.2.2 | |||
| numpy<2.0.0 # See: https://github.com/opencv/opencv-python/issues/997 | |||
| opencv-python>=4.1.1 | |||
| Pillow>=7.1.2 | |||
| psutil # system resources | |||
| PyYAML>=5.3.1 | |||
| requests>=2.23.0 | |||
| scipy>=1.4.1 | |||
| thop>=0.1.1 # FLOPs computation | |||
| torch # see https://pytorch.org/get-started/locally (recommended) | |||
| torchvision | |||
| tqdm>=4.64.0 | |||
| # Logging ------------------------------------- | |||
| tensorboard>=2.4.1 | |||
| # wandb | |||
| # clearml | |||
| # Plotting ------------------------------------ | |||
| pandas>=1.1.4 | |||
| seaborn>=0.11.0 | |||
| # Export -------------------------------------- | |||
| # coremltools>=5.2 # CoreML export | |||
| # onnx>=1.9.0 # ONNX export | |||
| # onnx-simplifier>=0.4.1 # ONNX simplifier | |||
| # nvidia-pyindex # TensorRT export | |||
| # nvidia-tensorrt # TensorRT export | |||
| # scikit-learn==0.19.2 # CoreML quantization | |||
| # tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) | |||
| # tensorflowjs>=3.9.0 # TF.js export | |||
| # openvino-dev # OpenVINO export | |||
| # Extras -------------------------------------- | |||
| # albumentations>=1.0.3 | |||
| # pycocotools>=2.0 # COCO mAP | |||
| # roboflow | |||
| opencv-python>=4.1.1 | |||
| maturin | |||
| @@ -50,20 +50,13 @@ async fn main() -> eyre::Result<()> { | |||
| ); | |||
| } | |||
| run( | |||
| get_python_path().context("Could not get pip binary")?, | |||
| &["-m", "pip", "install", "--upgrade", "pip"], | |||
| None, | |||
| ) | |||
| .await | |||
| .context("failed to install pip")?; | |||
| run( | |||
| get_pip_path().context("Could not get pip binary")?, | |||
| &["install", "-r", "requirements.txt"], | |||
| None, | |||
| &["install", "maturin"], | |||
| Some(venv), | |||
| ) | |||
| .await | |||
| .context("pip install failed")?; | |||
| .context("pip install maturin failed")?; | |||
| run( | |||
| "maturin", | |||
| @@ -81,6 +74,16 @@ async fn main() -> eyre::Result<()> { | |||
| async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { | |||
| let cargo = std::env::var("CARGO").unwrap(); | |||
| // First build the dataflow (install requirements) | |||
| let mut cmd = tokio::process::Command::new(&cargo); | |||
| cmd.arg("run"); | |||
| cmd.arg("--package").arg("dora-cli"); | |||
| cmd.arg("--").arg("build").arg(dataflow); | |||
| if !cmd.status().await?.success() { | |||
| bail!("failed to run dataflow"); | |||
| }; | |||
| let mut cmd = tokio::process::Command::new(&cargo); | |||
| cmd.arg("run"); | |||
| cmd.arg("--package").arg("dora-cli"); | |||
| @@ -1,82 +0,0 @@ | |||
| LABELS = [ | |||
| "ABC", | |||
| "bicycle", | |||
| "car", | |||
| "motorcycle", | |||
| "airplane", | |||
| "bus", | |||
| "train", | |||
| "truck", | |||
| "boat", | |||
| "traffic light", | |||
| "fire hydrant", | |||
| "stop sign", | |||
| "parking meter", | |||
| "bench", | |||
| "bird", | |||
| "cat", | |||
| "dog", | |||
| "horse", | |||
| "sheep", | |||
| "cow", | |||
| "elephant", | |||
| "bear", | |||
| "zebra", | |||
| "giraffe", | |||
| "backpack", | |||
| "umbrella", | |||
| "handbag", | |||
| "tie", | |||
| "suitcase", | |||
| "frisbee", | |||
| "skis", | |||
| "snowboard", | |||
| "sports ball", | |||
| "kite", | |||
| "baseball bat", | |||
| "baseball glove", | |||
| "skateboard", | |||
| "surfboard", | |||
| "tennis racket", | |||
| "bottle", | |||
| "wine glass", | |||
| "cup", | |||
| "fork", | |||
| "knife", | |||
| "spoon", | |||
| "bowl", | |||
| "banana", | |||
| "apple", | |||
| "sandwich", | |||
| "orange", | |||
| "broccoli", | |||
| "carrot", | |||
| "hot dog", | |||
| "pizza", | |||
| "donut", | |||
| "cake", | |||
| "chair", | |||
| "couch", | |||
| "potted plant", | |||
| "bed", | |||
| "dining table", | |||
| "toilet", | |||
| "tv", | |||
| "laptop", | |||
| "mouse", | |||
| "remote", | |||
| "keyboard", | |||
| "cell phone", | |||
| "microwave", | |||
| "oven", | |||
| "toaster", | |||
| "sink", | |||
| "refrigerator", | |||
| "book", | |||
| "clock", | |||
| "vase", | |||
| "scissors", | |||
| "teddy bear", | |||
| "hair drier", | |||
| "toothbrush", | |||
| ] | |||
| @@ -1,52 +0,0 @@ | |||
| #!/usr/bin/env python3 | |||
| # -*- coding: utf-8 -*- | |||
| import os | |||
| import time | |||
| import numpy as np | |||
| import cv2 | |||
| from dora import Node | |||
| node = Node() | |||
| CAMERA_INDEX = int(os.getenv("CAMERA_INDEX", 0)) | |||
| CAMERA_WIDTH = 640 | |||
| CAMERA_HEIGHT = 480 | |||
| video_capture = cv2.VideoCapture(CAMERA_INDEX) | |||
| font = cv2.FONT_HERSHEY_SIMPLEX | |||
| start = time.time() | |||
| # Run for 20 seconds | |||
| while time.time() - start < 10: | |||
| # Wait next dora_input | |||
| event = node.next() | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| ret, frame = video_capture.read() | |||
| if not ret: | |||
| frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) | |||
| cv2.putText( | |||
| frame, | |||
| "No Webcam was found at index %d" % (CAMERA_INDEX), | |||
| (int(30), int(30)), | |||
| font, | |||
| 0.75, | |||
| (255, 255, 255), | |||
| 2, | |||
| 1, | |||
| ) | |||
| node.send_output( | |||
| "image", | |||
| cv2.imencode(".jpg", frame)[1].tobytes(), | |||
| event["metadata"], | |||
| ) | |||
| elif event_type == "STOP": | |||
| print("received stop") | |||
| break | |||
| else: | |||
| print("received unexpected event:", event_type) | |||
| break | |||
| video_capture.release() | |||
| @@ -1,48 +1,38 @@ | |||
| nodes: | |||
| - id: webcam | |||
| custom: | |||
| source: ./webcam.py | |||
| inputs: | |||
| tick: | |||
| source: dora/timer/millis/10 | |||
| queue_size: 1000 | |||
| outputs: | |||
| - image | |||
| - text | |||
| envs: | |||
| IMAGE_WIDTH: 960 | |||
| IMAGE_HEIGHT: 540 | |||
| - id: camera | |||
| build: pip install ../../node-hub/opencv-video-capture | |||
| path: opencv-video-capture | |||
| inputs: | |||
| tick: dora/timer/millis/20 | |||
| outputs: | |||
| - image | |||
| env: | |||
| CAPTURE_PATH: 0 | |||
| IMAGE_WIDTH: 640 | |||
| IMAGE_HEIGHT: 480 | |||
| ENCODING: rgb8 | |||
| - id: object_detection | |||
| custom: | |||
| source: ./object_detection.py | |||
| inputs: | |||
| image: webcam/image | |||
| outputs: | |||
| - bbox | |||
| envs: | |||
| IMAGE_WIDTH: 960 | |||
| IMAGE_HEIGHT: 540 | |||
| - id: object-detection | |||
| build: pip install -e ../../node-hub/ultralytics-yolo | |||
| path: ultralytics-yolo | |||
| inputs: | |||
| image: | |||
| source: camera/image | |||
| queue_size: 1 | |||
| outputs: | |||
| - bbox | |||
| env: | |||
| MODEL: yolov8n.pt | |||
| FORMAT: xywh | |||
| - id: rerun | |||
| custom: | |||
| source: dora-rerun | |||
| inputs: | |||
| image: webcam/image | |||
| text: webcam/text | |||
| boxes2d: object_detection/bbox | |||
| envs: | |||
| IMAGE_WIDTH: 960 | |||
| IMAGE_HEIGHT: 540 | |||
| IMAGE_DEPTH: 3 | |||
| - id: matplotlib | |||
| custom: | |||
| source: ./plot.py | |||
| inputs: | |||
| image: webcam/image | |||
| bbox: object_detection/bbox | |||
| envs: | |||
| IMAGE_WIDTH: 960 | |||
| IMAGE_HEIGHT: 540 | |||
| build: cargo build -p dora-rerun --release | |||
| path: dora-rerun | |||
| inputs: | |||
| image: | |||
| source: camera/image | |||
| queue_size: 1 | |||
| boxes2d: object-detection/bbox | |||
| env: | |||
| RERUN_FLUSH_TICK_SECS: "0.001" | |||
| RERUN_MEMORY_LIMIT: 25% | |||
| @@ -1,45 +0,0 @@ | |||
| #!/usr/bin/env python3 | |||
| # -*- coding: utf-8 -*- | |||
| import os | |||
| import cv2 | |||
| import numpy as np | |||
| from ultralytics import YOLO | |||
| from dora import Node | |||
| import pyarrow as pa | |||
| model = YOLO("yolov8n.pt") | |||
| node = Node() | |||
| IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", 960)) | |||
| IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", 540)) | |||
| for event in node: | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| event_id = event["id"] | |||
| if event_id == "image": | |||
| print("[object detection] received image input") | |||
| image = event["value"].to_numpy().reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) | |||
| frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |||
| frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) | |||
| results = model(frame) # includes NMS | |||
| # Process results | |||
| boxes = np.array(results[0].boxes.xywh.cpu()) | |||
| conf = np.array(results[0].boxes.conf.cpu()) | |||
| label = np.array(results[0].boxes.cls.cpu()) | |||
| # concatenate them together | |||
| arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) | |||
| node.send_output("bbox", pa.array(arrays.ravel()), event["metadata"]) | |||
| else: | |||
| print("[object detection] ignoring unexpected input:", event_id) | |||
| elif event_type == "STOP": | |||
| print("[object detection] received stop") | |||
| elif event_type == "ERROR": | |||
| print("[object detection] error: ", event["error"]) | |||
| else: | |||
| print("[object detection] received unexpected event:", event_type) | |||
| @@ -1,90 +0,0 @@ | |||
| #!/usr/bin/env python3 | |||
| # -*- coding: utf-8 -*- | |||
| import os | |||
| from dora import Node | |||
| from dora import DoraStatus | |||
| import cv2 | |||
| import numpy as np | |||
| CI = os.environ.get("CI") | |||
| font = cv2.FONT_HERSHEY_SIMPLEX | |||
| IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", 960)) | |||
| IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", 540)) | |||
| class Plotter: | |||
| """ | |||
| Plot image and bounding box | |||
| """ | |||
| def __init__(self): | |||
| self.image = [] | |||
| self.bboxs = [] | |||
| def on_input( | |||
| self, | |||
| dora_input, | |||
| ) -> DoraStatus: | |||
| """ | |||
| Put image and bounding box on cv2 window. | |||
| Args: | |||
| dora_input["id"] (str): Id of the dora_input declared in the yaml configuration | |||
| dora_input["value"] (arrow array): message of the dora_input | |||
| """ | |||
| if dora_input["id"] == "image": | |||
| image = ( | |||
| dora_input["value"].to_numpy().reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) | |||
| ) | |||
| image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |||
| self.image = image | |||
| elif dora_input["id"] == "bbox" and len(self.image) != 0: | |||
| bboxs = dora_input["value"].to_numpy() | |||
| self.bboxs = np.reshape(bboxs, (-1, 6)) | |||
| for bbox in self.bboxs: | |||
| [ | |||
| x, | |||
| y, | |||
| w, | |||
| h, | |||
| confidence, | |||
| label, | |||
| ] = bbox | |||
| cv2.rectangle( | |||
| self.image, | |||
| (int(x - w / 2), int(y - h / 2)), | |||
| (int(x + w / 2), int(y + h / 2)), | |||
| (0, 255, 0), | |||
| 2, | |||
| ) | |||
| if CI != "true": | |||
| cv2.imshow("frame", self.image) | |||
| if cv2.waitKey(1) & 0xFF == ord("q"): | |||
| return DoraStatus.STOP | |||
| return DoraStatus.CONTINUE | |||
| plotter = Plotter() | |||
| node = Node() | |||
| for event in node: | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| status = plotter.on_input(event) | |||
| if status == DoraStatus.CONTINUE: | |||
| pass | |||
| elif status == DoraStatus.STOP: | |||
| print("plotter returned stop status") | |||
| break | |||
| elif event_type == "STOP": | |||
| print("received stop") | |||
| else: | |||
| print("received unexpected event:", event_type) | |||
| @@ -1,5 +1,4 @@ | |||
| use dora_core::{get_pip_path, get_python_path, run}; | |||
| use dora_download::download_file; | |||
| use dora_tracing::set_up_tracing; | |||
| use eyre::{bail, ContextCompat, WrapErr}; | |||
| use std::path::Path; | |||
| @@ -51,20 +50,13 @@ async fn main() -> eyre::Result<()> { | |||
| ); | |||
| } | |||
| run( | |||
| get_python_path().context("Could not get pip binary")?, | |||
| &["-m", "pip", "install", "--upgrade", "pip"], | |||
| None, | |||
| ) | |||
| .await | |||
| .context("failed to install pip")?; | |||
| run( | |||
| get_pip_path().context("Could not get pip binary")?, | |||
| &["install", "-r", "requirements.txt"], | |||
| None, | |||
| &["install", "maturin"], | |||
| Some(venv), | |||
| ) | |||
| .await | |||
| .context("pip install failed")?; | |||
| .context("pip install maturin failed")?; | |||
| run( | |||
| "maturin", | |||
| @@ -73,12 +65,6 @@ async fn main() -> eyre::Result<()> { | |||
| ) | |||
| .await | |||
| .context("maturin develop failed")?; | |||
| download_file( | |||
| "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt", | |||
| Path::new("yolov8n.pt"), | |||
| ) | |||
| .await | |||
| .context("Could not download weights.")?; | |||
| let dataflow = Path::new("dataflow.yml"); | |||
| run_dataflow(dataflow).await?; | |||
| @@ -88,6 +74,16 @@ async fn main() -> eyre::Result<()> { | |||
| async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { | |||
| let cargo = std::env::var("CARGO").unwrap(); | |||
| // First build the dataflow (install requirements) | |||
| let mut cmd = tokio::process::Command::new(&cargo); | |||
| cmd.arg("run"); | |||
| cmd.arg("--package").arg("dora-cli"); | |||
| cmd.arg("--").arg("build").arg(dataflow); | |||
| if !cmd.status().await?.success() { | |||
| bail!("failed to run dataflow"); | |||
| }; | |||
| let mut cmd = tokio::process::Command::new(&cargo); | |||
| cmd.arg("run"); | |||
| cmd.arg("--package").arg("dora-cli"); | |||
| @@ -1,56 +0,0 @@ | |||
| #!/usr/bin/env python3 | |||
| # -*- coding: utf-8 -*- | |||
| import os | |||
| import time | |||
| import numpy as np | |||
| import cv2 | |||
| from dora import Node | |||
| import pyarrow as pa | |||
| node = Node() | |||
| IMAGE_INDEX = int(os.getenv("IMAGE_INDEX", 0)) | |||
| IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", 960)) | |||
| IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", 540)) | |||
| video_capture = cv2.VideoCapture(IMAGE_INDEX) | |||
| video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, IMAGE_WIDTH) | |||
| video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, IMAGE_HEIGHT) | |||
| font = cv2.FONT_HERSHEY_SIMPLEX | |||
| start = time.time() | |||
| # Run for 20 seconds | |||
| while time.time() - start < 1000: | |||
| # Wait next dora_input | |||
| event = node.next() | |||
| if event is None: | |||
| break | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| ret, frame = video_capture.read() | |||
| if not ret: | |||
| frame = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH, 3), dtype=np.uint8) | |||
| cv2.putText( | |||
| frame, | |||
| "No Webcam was found at index %d" % (IMAGE_INDEX), | |||
| (int(30), int(30)), | |||
| font, | |||
| 0.75, | |||
| (255, 255, 255), | |||
| 2, | |||
| 1, | |||
| ) | |||
| if len(frame) != IMAGE_HEIGHT * IMAGE_WIDTH * 3: | |||
| print("frame size is not correct") | |||
| frame = cv2.resize(frame, (IMAGE_WIDTH, IMAGE_HEIGHT)) | |||
| frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |||
| node.send_output( | |||
| "image", | |||
| pa.array(frame.ravel()), | |||
| event["metadata"], | |||
| ) | |||
| node.send_output("text", pa.array([f"send image at: {time.time()}"])) | |||
| @@ -3,12 +3,16 @@ | |||
| #![allow(clippy::missing_safety_doc)] | |||
| use std::collections::BTreeMap; | |||
| use arrow_data::ArrayData; | |||
| use arrow_schema::DataType; | |||
| use eyre::Context; | |||
| use serde::{Deserialize, Serialize}; | |||
| pub use uhlc; | |||
| pub type MetadataParameters = BTreeMap<String, Parameter>; | |||
| #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] | |||
| pub struct Metadata { | |||
| metadata_version: u16, | |||
| @@ -105,20 +109,11 @@ pub struct BufferOffset { | |||
| pub len: usize, | |||
| } | |||
| #[derive(Debug, Clone, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] | |||
| pub struct MetadataParameters { | |||
| pub watermark: u64, | |||
| pub deadline: u64, | |||
| pub open_telemetry_context: String, | |||
| } | |||
| impl MetadataParameters { | |||
| pub fn into_owned(self) -> MetadataParameters { | |||
| MetadataParameters { | |||
| open_telemetry_context: self.open_telemetry_context, | |||
| ..self | |||
| } | |||
| } | |||
| #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] | |||
| pub enum Parameter { | |||
| Bool(bool), | |||
| Integer(i64), | |||
| String(String), | |||
| } | |||
| impl Metadata { | |||
| @@ -142,4 +137,12 @@ impl Metadata { | |||
| pub fn timestamp(&self) -> uhlc::Timestamp { | |||
| self.timestamp | |||
| } | |||
| pub fn open_telemetry_context(&self) -> String { | |||
| if let Some(Parameter::String(otel)) = self.parameters.get("open_telemetry_context") { | |||
| otel.to_string() | |||
| } else { | |||
| "".to_string() | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,88 @@ | |||
| ## Dora Node Hub | |||
| This hub contains useful pre-built nodes for Dora. | |||
| # Structure | |||
| The structure of the node hub is as follows (please use the same structure if you need to add a new node): | |||
| ``` | |||
| node-hub/ | |||
| └── your-node/ | |||
| ├── main.py | |||
| ├── README.mdr | |||
| └── pyproject.toml | |||
| ``` | |||
| The idea is to make a `pyproject.toml` file that will install the required dependencies for the node **and** attach main | |||
| function of the node inside a callable script in your environment. | |||
| To do so, you will need to add a `main` function inside the `main.py` file. | |||
| ```python | |||
| def main(): | |||
| pass | |||
| ``` | |||
| And then you will need to adapt the following `pyproject.toml` file: | |||
| ```toml | |||
| [tool.poetry] | |||
| name = "[name of the node e.g. video-encoder, with '-' to replace spaces]" | |||
| version = "0.1" | |||
| authors = ["[Pseudo/Name] <[email]>"] | |||
| description = "Dora Node for []" | |||
| readme = "README.md" | |||
| packages = [ | |||
| { include = "main.py", to = "[name of the node with '_' to replace spaces]" } | |||
| ] | |||
| [tool.poetry.dependencies] | |||
| python = "^3.11" | |||
| dora-rs = "0.3.5" | |||
| ... [add your dependencies here] ... | |||
| [tool.poetry.scripts] | |||
| [name of the node with '-' to replace spaces] = "[name of the node with '_' to replace spaces].main:main" | |||
| [build-system] | |||
| requires = ["poetry-core>=1.0.0"] | |||
| build-backend = "poetry.core.masonry.api" | |||
| ``` | |||
| Finally, the README.md file should explicit all inputs/outputs of the node and how to configure it in the YAML file. | |||
| # Example | |||
| ```toml | |||
| [tool.poetry] | |||
| name = "opencv-plot" | |||
| version = "0.1" | |||
| authors = [ | |||
| "Haixuan Xavier Tao <tao.xavier@outlook.com>", | |||
| "Enzo Le Van <dev@enzo-le-van.fr>" | |||
| ] | |||
| description = "Dora Node for plotting data with OpenCV" | |||
| readme = "README.md" | |||
| packages = [ | |||
| { include = "main.py", to = "opencv_plot" } | |||
| ] | |||
| [tool.poetry.dependencies] | |||
| python = "^3.11" | |||
| dora-rs = "^0.3.5" | |||
| opencv-python = "^4.10.0.84" | |||
| [tool.poetry.scripts] | |||
| opencv-plot = "opencv_plot.main:main" | |||
| [build-system] | |||
| requires = ["poetry-core>=1.0.0"] | |||
| build-backend = "poetry.core.masonry.api" | |||
| ``` | |||
| ## License | |||
| This project is licensed under Apache-2.0. Check out [NOTICE.md](../NOTICE.md) for more information. | |||
| @@ -100,7 +100,12 @@ async fn main() -> eyre::Result<()> { | |||
| None => {} | |||
| Some(tx) => drop(tx), | |||
| }, | |||
| _ => {} | |||
| Event::Error(err) => { | |||
| println!("Error: {}", err); | |||
| } | |||
| event => { | |||
| println!("Event: {event:#?}") | |||
| } | |||
| } | |||
| } | |||
| @@ -137,7 +142,7 @@ async fn write_event( | |||
| let timestamp_utc = TimestampMillisecondArray::from(vec![dt.timestamp_millis()]); | |||
| let timestamp_utc = make_array(timestamp_utc.into()); | |||
| let string_otel_context = metadata.parameters.open_telemetry_context.to_string(); | |||
| let string_otel_context = metadata.open_telemetry_context(); | |||
| let otel_context = deserialize_to_hashmap(&string_otel_context); | |||
| let traceparent = otel_context.get("traceparent"); | |||
| let trace_id = match traceparent { | |||
| @@ -25,15 +25,15 @@ cargo install --git https://github.com/dora-rs/dora dora-rerun | |||
| text: webcam/text | |||
| boxes2d: object_detection/bbox | |||
| envs: | |||
| IMAGE_WIDTH: 960 | |||
| IMAGE_HEIGHT: 540 | |||
| IMAGE_DEPTH: 3 | |||
| RERUN_MEMORY_LIMIT: 25% | |||
| ``` | |||
| ## Input definition | |||
| - image: UInt8Array + metadata { "width": int, "height": int, "encoding": str } | |||
| - boxes2D: StructArray + metadata { "format": str } | |||
| - text: StringArray | |||
| ## Configurations | |||
| - IMAGE_WIDTH: Image width in pixels | |||
| - IMAGE_HEIGHT: Image height in heights | |||
| - IMAGE_DEPTH: Image depth | |||
| - RERUN_MEMORY_LIMIT: Rerun memory limit | |||
| @@ -0,0 +1,192 @@ | |||
| //! Demonstrates the most barebone usage of the Rerun SDK. | |||
| use std::env::VarError; | |||
| use dora_node_api::{ | |||
| arrow::{ | |||
| array::{AsArray, StringArray, StructArray, UInt8Array}, | |||
| datatypes::Float32Type, | |||
| }, | |||
| DoraNode, Event, Parameter, | |||
| }; | |||
| use eyre::{eyre, Context, ContextCompat, Result}; | |||
| use rerun::{ | |||
| external::re_types::ArrowBuffer, SpawnOptions, TensorBuffer, TensorData, TensorDimension, Text, | |||
| }; | |||
| fn main() -> Result<()> { | |||
| // rerun `serve()` requires to have a running Tokio runtime in the current context. | |||
| let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime"); | |||
| let _guard = rt.enter(); | |||
| let (_node, mut events) = | |||
| DoraNode::init_from_env().context("Could not initialize dora node")?; | |||
| // Limit memory usage | |||
| let mut options = SpawnOptions::default(); | |||
| let memory_limit = match std::env::var("RERUN_MEMORY_LIMIT") { | |||
| Ok(memory_limit) => memory_limit | |||
| .parse::<String>() | |||
| .context("Could not parse RERUN_MEMORY_LIMIT value")?, | |||
| Err(VarError::NotUnicode(_)) => { | |||
| return Err(eyre!("RERUN_MEMORY_LIMIT env variable is not unicode")); | |||
| } | |||
| Err(VarError::NotPresent) => "25%".to_string(), | |||
| }; | |||
| options.memory_limit = memory_limit; | |||
| let rec = rerun::RecordingStreamBuilder::new("dora-rerun") | |||
| .spawn_opts(&options, None) | |||
| .context("Could not spawn rerun visualization")?; | |||
| while let Some(event) = events.recv() { | |||
| if let Event::Input { id, data, metadata } = event { | |||
| if id.as_str().contains("image") { | |||
| let height = | |||
| if let Some(Parameter::Integer(height)) = metadata.parameters.get("height") { | |||
| height | |||
| } else { | |||
| &480 | |||
| }; | |||
| let width = | |||
| if let Some(Parameter::Integer(width)) = metadata.parameters.get("width") { | |||
| width | |||
| } else { | |||
| &640 | |||
| }; | |||
| let encoding = if let Some(Parameter::String(encoding)) = | |||
| metadata.parameters.get("encoding") | |||
| { | |||
| encoding | |||
| } else { | |||
| "bgr8" | |||
| }; | |||
| let channels = if encoding == "bgr8" { 3 } else { 3 }; | |||
| let shape = vec![ | |||
| TensorDimension { | |||
| name: Some("height".into()), | |||
| size: *height as u64, | |||
| }, | |||
| TensorDimension { | |||
| name: Some("width".into()), | |||
| size: *width as u64, | |||
| }, | |||
| TensorDimension { | |||
| name: Some("depth".into()), | |||
| size: channels as u64, | |||
| }, | |||
| ]; | |||
| let image = if encoding == "bgr8" { | |||
| let buffer: &UInt8Array = data.as_any().downcast_ref().unwrap(); | |||
| let buffer: &[u8] = buffer.values(); | |||
| // Transpose values from BGR to RGB | |||
| let buffer: Vec<u8> = | |||
| buffer.chunks(3).flat_map(|x| [x[2], x[1], x[0]]).collect(); | |||
| let buffer = TensorBuffer::U8(ArrowBuffer::from(buffer)); | |||
| let tensordata = TensorData::new(shape.clone(), buffer); | |||
| rerun::Image::new(tensordata) | |||
| } else if encoding == "rgb8" { | |||
| let buffer: &UInt8Array = data.as_any().downcast_ref().unwrap(); | |||
| let buffer: &[u8] = buffer.values(); | |||
| let buffer = TensorBuffer::U8(ArrowBuffer::from(buffer)); | |||
| let tensordata = TensorData::new(shape.clone(), buffer); | |||
| rerun::Image::new(tensordata) | |||
| } else { | |||
| unimplemented!("We haven't worked on additional encodings.") | |||
| }; | |||
| rec.log(id.as_str(), &image) | |||
| .context("could not log image")?; | |||
| } else if id.as_str().contains("textlog") { | |||
| let buffer: StringArray = data.to_data().into(); | |||
| buffer.iter().try_for_each(|string| -> Result<()> { | |||
| if let Some(str) = string { | |||
| rec.log(id.as_str(), &rerun::TextLog::new(str)) | |||
| .wrap_err("Could not log text") | |||
| } else { | |||
| Ok(()) | |||
| } | |||
| })?; | |||
| } else if id.as_str().contains("boxes2d") { | |||
| let bbox_struct: StructArray = data.to_data().into(); | |||
| let format = | |||
| if let Some(Parameter::String(format)) = metadata.parameters.get("format") { | |||
| format | |||
| } else { | |||
| "xyxy" | |||
| }; | |||
| // Cast Bbox | |||
| let bbox_buffer = bbox_struct | |||
| .column_by_name("bbox") | |||
| .context("Did not find labels field within bbox struct")?; | |||
| let bbox = bbox_buffer | |||
| .as_list_opt::<i32>() | |||
| .context("Could not deserialize bbox as list")? | |||
| .values(); | |||
| let bbox = bbox | |||
| .as_primitive_opt::<Float32Type>() | |||
| .context("Could not get bbox value as list")? | |||
| .values(); | |||
| // Cast Labels | |||
| let labels_buffer = bbox_struct | |||
| .column_by_name("labels") | |||
| .context("Did not find labels field within bbox struct")?; | |||
| let labels = labels_buffer | |||
| .as_list_opt::<i32>() | |||
| .context("Could not deserialize labels as list")? | |||
| .values(); | |||
| let labels = labels | |||
| .as_string_opt::<i32>() | |||
| .context("Could not deserialize labels as string")?; | |||
| let labels: Vec<Text> = labels.iter().map(|x| Text::from(x.unwrap())).collect(); | |||
| // Cast confidence | |||
| let conf_buffer = bbox_struct | |||
| .column_by_name("conf") | |||
| .context("Did not find conf field within bbox struct")?; | |||
| let conf = conf_buffer | |||
| .as_list_opt::<i32>() | |||
| .context("Could not deserialize conf as list")? | |||
| .values(); | |||
| let _conf = conf | |||
| .as_primitive_opt::<Float32Type>() | |||
| .context("Could not deserialize conf as string")?; | |||
| let mut centers = vec![]; | |||
| let mut sizes = vec![]; | |||
| if format == "xywh" { | |||
| bbox.chunks(4).for_each(|block| { | |||
| if let [x, y, w, h] = block { | |||
| centers.push((*x, *y)); | |||
| sizes.push((*w, *h)); | |||
| } | |||
| }); | |||
| } else if format == "xyxy" { | |||
| bbox.chunks(4).for_each(|block| { | |||
| if let [min_x, min_y, max_x, max_y] = block { | |||
| centers.push(((max_x + min_x) / 2., (max_y + min_y) / 2.)); | |||
| sizes.push(((max_x - min_x), (max_y - min_y))); | |||
| } | |||
| }); | |||
| } | |||
| rec.log( | |||
| id.as_str(), | |||
| &rerun::Boxes2D::from_centers_and_sizes(centers, sizes).with_labels(labels), | |||
| ) | |||
| .wrap_err("Could not log Boxes2D")?; | |||
| } | |||
| } | |||
| } | |||
| Ok(()) | |||
| } | |||
| @@ -0,0 +1,89 @@ | |||
| # Dora Node for plotting data with OpenCV | |||
| This node is used to plot a text and a list of bbox on a base image (ideal for object detection). | |||
| # YAML | |||
| ```yaml | |||
| - id: opencv-plot | |||
| build: pip install ../../node-hub/opencv-plot | |||
| path: opencv-plot | |||
| inputs: | |||
| # image: Arrow array of size 1 containing the base image | |||
| # bbox: Arrow array of bbox | |||
| # text: Arrow array of size 1 containing the text to be plotted | |||
| env: | |||
| PLOT_WIDTH: 640 # optional, default is image input width | |||
| PLOT_HEIGHT: 480 # optional, default is image input height | |||
| ``` | |||
| # Inputs | |||
| - `image`: Arrow array containing the base image | |||
| ```python | |||
| ## Image data | |||
| image_data: UInt8Array # Example: pa.array(img.ravel()) | |||
| metadata = { | |||
| "width": 640, | |||
| "height": 480, | |||
| "encoding": str, # bgr8, rgb8 | |||
| } | |||
| ## Example | |||
| node.send_output( | |||
| image_data, {"width": 640, "height": 480, "encoding": "bgr8"} | |||
| ) | |||
| ## Decoding | |||
| storage = event["value"] | |||
| metadata = event["metadata"] | |||
| encoding = metadata["encoding"] | |||
| width = metadata["width"] | |||
| height = metadata["height"] | |||
| if encoding == "bgr8": | |||
| channels = 3 | |||
| storage_type = np.uint8 | |||
| frame = ( | |||
| storage.to_numpy() | |||
| .astype(storage_type) | |||
| .reshape((height, width, channels)) | |||
| ) | |||
| ``` | |||
| - `bbox`: an arrow array containing the bounding boxes, confidence scores, and class names of the detected objects | |||
| ```Python | |||
| bbox: { | |||
| "bbox": np.array, # flattened array of bounding boxes | |||
| "conf": np.array, # flat array of confidence scores | |||
| "labels": np.array, # flat array of class names | |||
| } | |||
| encoded_bbox = pa.array([bbox], {"format": "xyxy"}) | |||
| decoded_bbox = { | |||
| "bbox": encoded_bbox[0]["bbox"].values.to_numpy().reshape(-1, 4), | |||
| "conf": encoded_bbox[0]["conf"].values.to_numpy(), | |||
| "labels": encoded_bbox[0]["labels"].values.to_numpy(zero_copy_only=False), | |||
| } | |||
| ``` | |||
| - `text`: Arrow array containing the text to be plotted | |||
| ```python | |||
| text: str | |||
| encoded_text = pa.array([text]) | |||
| decoded_text = encoded_text[0].as_py() | |||
| ``` | |||
| ## License | |||
| This project is licensed under Apache-2.0. Check out [NOTICE.md](../../NOTICE.md) for more information. | |||
| @@ -0,0 +1,201 @@ | |||
| import os | |||
| import argparse | |||
| import cv2 | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| from dora import Node | |||
| RUNNER_CI = True if os.getenv("CI") == "true" else False | |||
| class Plot: | |||
| frame: np.array = np.array([]) | |||
| bboxes: dict = { | |||
| "bbox": np.array([]), | |||
| "conf": np.array([]), | |||
| "labels": np.array([]), | |||
| } | |||
| text: str = "" | |||
| width: np.uint32 = None | |||
| height: np.uint32 = None | |||
| def plot_frame(plot): | |||
| for bbox in zip(plot.bboxes["bbox"], plot.bboxes["conf"], plot.bboxes["labels"]): | |||
| [ | |||
| [min_x, min_y, max_x, max_y], | |||
| confidence, | |||
| label, | |||
| ] = bbox | |||
| cv2.rectangle( | |||
| plot.frame, | |||
| (int(min_x), int(min_y)), | |||
| (int(max_x), int(max_y)), | |||
| (0, 255, 0), | |||
| 2, | |||
| ) | |||
| cv2.putText( | |||
| plot.frame, | |||
| f"{label}, {confidence:0.2f}", | |||
| (int(max_x) - 120, int(max_y) - 10), | |||
| cv2.FONT_HERSHEY_SIMPLEX, | |||
| 0.5, | |||
| (0, 255, 0), | |||
| 1, | |||
| 1, | |||
| ) | |||
| cv2.putText( | |||
| plot.frame, | |||
| plot.text, | |||
| (20, 20), | |||
| cv2.FONT_HERSHEY_SIMPLEX, | |||
| 0.5, | |||
| (255, 255, 255), | |||
| 1, | |||
| 1, | |||
| ) | |||
| if plot.width is not None and plot.height is not None: | |||
| plot.frame = cv2.resize(plot.frame, (plot.width, plot.height)) | |||
| if not RUNNER_CI: | |||
| if len(plot.frame.shape) >= 3: | |||
| cv2.imshow("Dora Node: opencv-plot", plot.frame) | |||
| def main(): | |||
| # Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables. | |||
| parser = argparse.ArgumentParser( | |||
| description="OpenCV Plotter: This node is used to plot text and bounding boxes on an image." | |||
| ) | |||
| parser.add_argument( | |||
| "--name", | |||
| type=str, | |||
| required=False, | |||
| help="The name of the node in the dataflow.", | |||
| default="opencv-plot", | |||
| ) | |||
| parser.add_argument( | |||
| "--plot-width", | |||
| type=int, | |||
| required=False, | |||
| help="The width of the plot.", | |||
| default=None, | |||
| ) | |||
| parser.add_argument( | |||
| "--plot-height", | |||
| type=int, | |||
| required=False, | |||
| help="The height of the plot.", | |||
| default=None, | |||
| ) | |||
| args = parser.parse_args() | |||
| plot_width = os.getenv("PLOT_WIDTH", args.plot_width) | |||
| plot_height = os.getenv("PLOT_HEIGHT", args.plot_height) | |||
| if plot_width is not None: | |||
| if isinstance(plot_width, str) and plot_width.isnumeric(): | |||
| plot_width = int(plot_width) | |||
| if plot_height is not None: | |||
| if isinstance(plot_height, str) and plot_height.isnumeric(): | |||
| plot_height = int(plot_height) | |||
| node = Node( | |||
| args.name | |||
| ) # provide the name to connect to the dataflow if dynamic node | |||
| plot = Plot() | |||
| plot.width = plot_width | |||
| plot.height = plot_height | |||
| pa.array([]) # initialize pyarrow array | |||
| for event in node: | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| event_id = event["id"] | |||
| if event_id == "image": | |||
| storage = event["value"] | |||
| metadata = event["metadata"] | |||
| encoding = metadata["encoding"] | |||
| width = metadata["width"] | |||
| height = metadata["height"] | |||
| if encoding == "bgr8": | |||
| channels = 3 | |||
| storage_type = np.uint8 | |||
| plot.frame = ( | |||
| storage.to_numpy() | |||
| .astype(storage_type) | |||
| .reshape((height, width, channels)) | |||
| .copy() # Copy So that we can add annotation on the image | |||
| ) | |||
| elif encoding == "rgb8": | |||
| channels = 3 | |||
| storage_type = np.uint8 | |||
| frame = ( | |||
| storage.to_numpy() | |||
| .astype(storage_type) | |||
| .reshape((height, width, channels)) | |||
| ) | |||
| plot.frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) | |||
| else: | |||
| raise RuntimeError(f"Unsupported image encoding: {encoding}") | |||
| plot_frame(plot) | |||
| if not RUNNER_CI: | |||
| if cv2.waitKey(1) & 0xFF == ord("q"): | |||
| break | |||
| elif event_id == "bbox": | |||
| arrow_bbox = event["value"][0] | |||
| bbox_format = event["metadata"]["format"] | |||
| if bbox_format == "xyxy": | |||
| bbox = arrow_bbox["bbox"].values.to_numpy().reshape(-1, 4) | |||
| elif bbox_format == "xywh": | |||
| original_bbox = arrow_bbox["bbox"].values.to_numpy().reshape(-1, 4) | |||
| bbox = np.array( | |||
| [ | |||
| ( | |||
| x - w / 2, | |||
| y - h / 2, | |||
| x + w / 2, | |||
| y + h / 2, | |||
| ) | |||
| for [x, y, w, h] in original_bbox | |||
| ] | |||
| ) | |||
| else: | |||
| raise RuntimeError(f"Unsupported bbox format: {bbox_format}") | |||
| plot.bboxes = { | |||
| "bbox": bbox, | |||
| "conf": arrow_bbox["conf"].values.to_numpy(), | |||
| "labels": arrow_bbox["labels"].values.to_numpy( | |||
| zero_copy_only=False | |||
| ), | |||
| } | |||
| elif event_id == "text": | |||
| plot.text = event["value"][0].as_py() | |||
| elif event_type == "ERROR": | |||
| raise RuntimeError(event["error"]) | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -0,0 +1,23 @@ | |||
| [tool.poetry] | |||
| name = "opencv-plot" | |||
| version = "0.1" | |||
| authors = [ | |||
| "Haixuan Xavier Tao <tao.xavier@outlook.com>", | |||
| "Enzo Le Van <dev@enzo-le-van.fr>", | |||
| ] | |||
| description = "Dora Node for plotting text and bbox on image with OpenCV" | |||
| readme = "README.md" | |||
| packages = [{ include = "opencv_plot" }] | |||
| [tool.poetry.dependencies] | |||
| dora-rs = "0.3.5" | |||
| numpy = "< 2.0.0" | |||
| opencv-python = ">= 4.1.1" | |||
| [tool.poetry.scripts] | |||
| opencv-plot = "opencv_plot.main:main" | |||
| [build-system] | |||
| requires = ["poetry-core>=1.8.0"] | |||
| build-backend = "poetry.core.masonry.api" | |||
| @@ -0,0 +1,66 @@ | |||
| # Dora Node for capturing video with OpenCV | |||
| This node is used to capture video from a camera using OpenCV. | |||
| # YAML | |||
| ```yaml | |||
| - id: opencv-video-capture | |||
| build: pip install ../../node-hub/opencv-video-capture | |||
| path: opencv-video-capture | |||
| inputs: | |||
| tick: dora/timer/millis/16 # try to capture at 60fps | |||
| outputs: | |||
| - image: # the captured image | |||
| env: | |||
| PATH: 0 # optional, default is 0 | |||
| IMAGE_WIDTH: 640 # optional, default is video capture width | |||
| IMAGE_HEIGHT: 480 # optional, default is video capture height | |||
| ``` | |||
| # Inputs | |||
| - `tick`: empty Arrow array to trigger the capture | |||
| # Outputs | |||
| - `image`: an arrow array containing the captured image | |||
| ```Python | |||
| ## Image data | |||
| image_data: UInt8Array # Example: pa.array(img.ravel()) | |||
| metadata = { | |||
| "width": 640, | |||
| "height": 480, | |||
| "encoding": str, # bgr8, rgb8 | |||
| } | |||
| ## Example | |||
| node.send_output( | |||
| image_data, {"width": 640, "height": 480, "encoding": "bgr8"} | |||
| ) | |||
| ## Decoding | |||
| storage = event["value"] | |||
| metadata = event["metadata"] | |||
| encoding = metadata["encoding"] | |||
| width = metadata["width"] | |||
| height = metadata["height"] | |||
| if encoding == "bgr8": | |||
| channels = 3 | |||
| storage_type = np.uint8 | |||
| frame = ( | |||
| storage.to_numpy() | |||
| .astype(storage_type) | |||
| .reshape((height, width, channels)) | |||
| ) | |||
| ``` | |||
| ## License | |||
| This project is licensed under Apache-2.0. Check out [NOTICE.md](../../NOTICE.md) for more information. | |||
| @@ -0,0 +1,133 @@ | |||
| import os | |||
| import argparse | |||
| import cv2 | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| from dora import Node | |||
| import time | |||
| RUNNER_CI = True if os.getenv("CI") == "true" else False | |||
| def main(): | |||
| # Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables. | |||
| parser = argparse.ArgumentParser( | |||
| description="OpenCV Video Capture: This node is used to capture video from a camera." | |||
| ) | |||
| parser.add_argument( | |||
| "--name", | |||
| type=str, | |||
| required=False, | |||
| help="The name of the node in the dataflow.", | |||
| default="opencv-video-capture", | |||
| ) | |||
| parser.add_argument( | |||
| "--path", | |||
| type=int, | |||
| required=False, | |||
| help="The path of the device to capture (e.g. /dev/video1, or an index like 0, 1...", | |||
| default=0, | |||
| ) | |||
| parser.add_argument( | |||
| "--image-width", | |||
| type=int, | |||
| required=False, | |||
| help="The width of the image output. Default is the camera width.", | |||
| default=None, | |||
| ) | |||
| parser.add_argument( | |||
| "--image-height", | |||
| type=int, | |||
| required=False, | |||
| help="The height of the camera. Default is the camera height.", | |||
| default=None, | |||
| ) | |||
| args = parser.parse_args() | |||
| video_capture_path = os.getenv("CAPTURE_PATH", args.path) | |||
| encoding = os.getenv("ENCODING", "bgr8") | |||
| if isinstance(video_capture_path, str) and video_capture_path.isnumeric(): | |||
| video_capture_path = int(video_capture_path) | |||
| video_capture = cv2.VideoCapture(video_capture_path) | |||
| image_width = os.getenv("IMAGE_WIDTH", args.image_width) | |||
| if image_width is not None: | |||
| if isinstance(image_width, str) and image_width.isnumeric(): | |||
| image_width = int(image_width) | |||
| video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, image_width) | |||
| image_height = os.getenv("IMAGE_HEIGHT", args.image_height) | |||
| if image_height is not None: | |||
| if isinstance(image_height, str) and image_height.isnumeric(): | |||
| image_height = int(image_height) | |||
| video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, image_height) | |||
| node = Node(args.name) | |||
| start_time = time.time() | |||
| pa.array([]) # initialize pyarrow array | |||
| for event in node: | |||
| # Run this example in the CI for 20 seconds only. | |||
| if RUNNER_CI and time.time() - start_time > 20: | |||
| break | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| event_id = event["id"] | |||
| if event_id == "tick": | |||
| ret, frame = video_capture.read() | |||
| if not ret: | |||
| frame = np.zeros((480, 640, 3), dtype=np.uint8) | |||
| cv2.putText( | |||
| frame, | |||
| f"Error: no frame for camera at path {video_capture_path}.", | |||
| (int(30), int(30)), | |||
| cv2.FONT_HERSHEY_SIMPLEX, | |||
| 0.50, | |||
| (255, 255, 255), | |||
| 1, | |||
| 1, | |||
| ) | |||
| # resize the frame | |||
| if ( | |||
| image_width is not None | |||
| and image_height is not None | |||
| and ( | |||
| frame.shape[1] != image_width or frame.shape[0] != image_height | |||
| ) | |||
| ): | |||
| frame = cv2.resize(frame, (image_width, image_height)) | |||
| # Get the right encoding | |||
| if encoding == "rgb8": | |||
| frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |||
| storage = pa.array(frame.ravel()) | |||
| metadata = event["metadata"] | |||
| metadata["width"] = int(frame.shape[1]) | |||
| metadata["height"] = int(frame.shape[0]) | |||
| metadata["encoding"] = encoding | |||
| node.send_output("image", storage, metadata) | |||
| elif event_type == "ERROR": | |||
| raise RuntimeError(event["error"]) | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -0,0 +1,23 @@ | |||
| [tool.poetry] | |||
| name = "opencv-video-capture" | |||
| version = "0.1" | |||
| authors = [ | |||
| "Haixuan Xavier Tao <tao.xavier@outlook.com>", | |||
| "Enzo Le Van <dev@enzo-le-van.fr>", | |||
| ] | |||
| description = "Dora Node for capturing video with OpenCV" | |||
| readme = "README.md" | |||
| packages = [{ include = "opencv_video_capture" }] | |||
| [tool.poetry.dependencies] | |||
| dora-rs = "0.3.5" | |||
| numpy = "< 2.0.0" | |||
| opencv-python = ">= 4.1.1" | |||
| [tool.poetry.scripts] | |||
| opencv-video-capture = "opencv_video_capture.main:main" | |||
| [build-system] | |||
| requires = ["poetry-core>=1.8.0"] | |||
| build-backend = "poetry.core.masonry.api" | |||
| @@ -0,0 +1,81 @@ | |||
| # Dora Node for detecting objects in images using YOLOv8 | |||
| This node is used to detect objects in images using YOLOv8. | |||
| # YAML | |||
| ```yaml | |||
| - id: object_detection | |||
| build: pip install ../../node-hub/ultralytics-yolo | |||
| path: ultralytics-yolo | |||
| inputs: | |||
| image: webcam/image | |||
| outputs: | |||
| - bbox | |||
| env: | |||
| MODEL: yolov5n.pt | |||
| ``` | |||
| # Inputs | |||
| - `image`: Arrow array containing the base image | |||
| ```python | |||
| ## Image data | |||
| image_data: UInt8Array # Example: pa.array(img.ravel()) | |||
| metadata = { | |||
| "width": 640, | |||
| "height": 480, | |||
| "encoding": str, # bgr8, rgb8 | |||
| } | |||
| ## Example | |||
| node.send_output( | |||
| image_data, {"width": 640, "height": 480, "encoding": "bgr8"} | |||
| ) | |||
| ## Decoding | |||
| storage = event["value"] | |||
| metadata = event["metadata"] | |||
| encoding = metadata["encoding"] | |||
| width = metadata["width"] | |||
| height = metadata["height"] | |||
| if encoding == "bgr8": | |||
| channels = 3 | |||
| storage_type = np.uint8 | |||
| frame = ( | |||
| storage.to_numpy() | |||
| .astype(storage_type) | |||
| .reshape((height, width, channels)) | |||
| ) | |||
| ``` | |||
| # Outputs | |||
| - `bbox`: an arrow array containing the bounding boxes, confidence scores, and class names of the detected objects | |||
| ```Python | |||
| bbox: { | |||
| "bbox": np.array, # flattened array of bounding boxes | |||
| "conf": np.array, # flat array of confidence scores | |||
| "labels": np.array, # flat array of class names | |||
| } | |||
| encoded_bbox = pa.array([bbox], {"format": "xyxy"}) | |||
| decoded_bbox = { | |||
| "bbox": encoded_bbox[0]["bbox"].values.to_numpy().reshape(-1, 4), | |||
| "conf": encoded_bbox[0]["conf"].values.to_numpy(), | |||
| "labels": encoded_bbox[0]["labels"].values.to_numpy(zero_copy_only=False), | |||
| } | |||
| ``` | |||
| ## License | |||
| This project is licensed under Apache-2.0. Check out [NOTICE.md](../../NOTICE.md) for more information. | |||
| @@ -0,0 +1,23 @@ | |||
| [tool.poetry] | |||
| name = "ultralytics-yolo" | |||
| version = "0.1" | |||
| authors = [ | |||
| "Haixuan Xavier Tao <tao.xavier@outlook.com>", | |||
| "Enzo Le Van <dev@enzo-le-van.fr>", | |||
| ] | |||
| description = "Dora Node for object detection with Ultralytics YOLOv8" | |||
| readme = "README.md" | |||
| packages = [{ include = "ultralytics_yolo" }] | |||
| [tool.poetry.dependencies] | |||
| dora-rs = "0.3.5" | |||
| numpy = "< 2.0.0" | |||
| ultralytics = "<= 8.2.52" | |||
| [tool.poetry.scripts] | |||
| ultralytics-yolo = "ultralytics_yolo.main:main" | |||
| [build-system] | |||
| requires = ["poetry-core>=1.8.0"] | |||
| build-backend = "poetry.core.masonry.api" | |||
| @@ -0,0 +1,111 @@ | |||
| import os | |||
| import argparse | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| from dora import Node | |||
| from ultralytics import YOLO | |||
| def main(): | |||
| # Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables. | |||
| parser = argparse.ArgumentParser( | |||
| description="UltraLytics YOLO: This node is used to perform object detection using the UltraLytics YOLO model." | |||
| ) | |||
| parser.add_argument( | |||
| "--name", | |||
| type=str, | |||
| required=False, | |||
| help="The name of the node in the dataflow.", | |||
| default="ultralytics-yolo", | |||
| ) | |||
| parser.add_argument( | |||
| "--model", | |||
| type=str, | |||
| required=False, | |||
| help="The name of the model file (e.g. yolov8n.pt).", | |||
| default="yolov8n.pt", | |||
| ) | |||
| args = parser.parse_args() | |||
| model_path = os.getenv("MODEL", args.model) | |||
| bbox_format = os.getenv("FORMAT", "xyxy") | |||
| model = YOLO(model_path) | |||
| node = Node(args.name) | |||
| pa.array([]) # initialize pyarrow array | |||
| for event in node: | |||
| event_type = event["type"] | |||
| if event_type == "INPUT": | |||
| event_id = event["id"] | |||
| if event_id == "image": | |||
| storage = event["value"] | |||
| metadata = event["metadata"] | |||
| encoding = metadata["encoding"] | |||
| width = metadata["width"] | |||
| height = metadata["height"] | |||
| if encoding == "bgr8": | |||
| channels = 3 | |||
| storage_type = np.uint8 | |||
| elif encoding == "rgb8": | |||
| channels = 3 | |||
| storage_type = np.uint8 | |||
| else: | |||
| raise RuntimeError(f"Unsupported image encoding: {encoding}") | |||
| frame = ( | |||
| storage.to_numpy() | |||
| .astype(storage_type) | |||
| .reshape((height, width, channels)) | |||
| ) | |||
| if encoding == "bgr8": | |||
| frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) | |||
| elif encoding == "rgb8": | |||
| pass | |||
| else: | |||
| raise RuntimeError(f"Unsupported image encoding: {encoding}") | |||
| results = model(frame, verbose=False) # includes NMS | |||
| if bbox_format == "xyxy": | |||
| bboxes = np.array(results[0].boxes.xyxy.cpu()) | |||
| elif bbox_format == "xywh": | |||
| bboxes = np.array(results[0].boxes.xywh.cpu()) | |||
| else: | |||
| raise RuntimeError(f"Unsupported bbox format: {bbox_format}") | |||
| conf = np.array(results[0].boxes.conf.cpu()) | |||
| labels = np.array(results[0].boxes.cls.cpu()) | |||
| names = [model.names.get(label) for label in labels] | |||
| bbox = { | |||
| "bbox": bboxes.ravel(), | |||
| "conf": conf, | |||
| "labels": names, | |||
| } | |||
| bbox = pa.array([bbox]) | |||
| metadata = event["metadata"] | |||
| metadata["format"] = bbox_format | |||
| node.send_output( | |||
| "bbox", | |||
| bbox, | |||
| metadata, | |||
| ) | |||
| elif event_type == "ERROR": | |||
| raise RuntimeError(event["error"]) | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -1,132 +0,0 @@ | |||
| //! Demonstrates the most barebone usage of the Rerun SDK. | |||
| use std::env::VarError; | |||
| use dora_node_api::{ | |||
| arrow::array::{Float32Array, StringArray, UInt8Array}, | |||
| DoraNode, Event, | |||
| }; | |||
| use eyre::{eyre, Context, Result}; | |||
| use rerun::{ | |||
| external::re_types::ArrowBuffer, SpawnOptions, TensorBuffer, TensorData, TensorDimension, | |||
| }; | |||
| fn main() -> Result<()> { | |||
| // rerun `serve()` requires to have a running Tokio runtime in the current context. | |||
| let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime"); | |||
| let _guard = rt.enter(); | |||
| let (_node, mut events) = | |||
| DoraNode::init_from_env().context("Could not initialize dora node")?; | |||
| // Limit memory usage | |||
| let mut options = SpawnOptions::default(); | |||
| let memory_limit = match std::env::var("RERUN_MEMORY_LIMIT") { | |||
| Ok(memory_limit) => memory_limit | |||
| .parse::<String>() | |||
| .context("Could not parse RERUN_MEMORY_LIMIT value")?, | |||
| Err(VarError::NotUnicode(_)) => { | |||
| return Err(eyre!("RERUN_MEMORY_LIMIT env variable is not unicode")); | |||
| } | |||
| Err(VarError::NotPresent) => "25%".to_string(), | |||
| }; | |||
| options.memory_limit = memory_limit; | |||
| let rec = rerun::RecordingStreamBuilder::new("dora-rerun") | |||
| .spawn_opts(&options, None) | |||
| .context("Could not spawn rerun visualization")?; | |||
| while let Some(event) = events.recv() { | |||
| if let Event::Input { | |||
| id, | |||
| data, | |||
| metadata: _, | |||
| } = event | |||
| { | |||
| if id.as_str().contains("image") { | |||
| let shape = vec![ | |||
| TensorDimension { | |||
| name: Some("height".into()), | |||
| size: std::env::var(format!("{}_HEIGHT", id.as_str().to_uppercase())) | |||
| .context(format!( | |||
| "Could not read {}_HEIGHT env variable for parsing the image", | |||
| id.as_str().to_uppercase() | |||
| ))? | |||
| .parse() | |||
| .context(format!( | |||
| "Could not parse env {}_HEIGHT", | |||
| id.as_str().to_uppercase() | |||
| ))?, | |||
| }, | |||
| TensorDimension { | |||
| name: Some("width".into()), | |||
| size: std::env::var(format!("{}_WIDTH", id.as_str().to_uppercase())) | |||
| .context(format!( | |||
| "Could not read {}_WIDTH env variable for parsing the image", | |||
| id.as_str().to_uppercase() | |||
| ))? | |||
| .parse() | |||
| .context(format!( | |||
| "Could not parse env {}_WIDTH", | |||
| id.as_str().to_uppercase() | |||
| ))?, | |||
| }, | |||
| TensorDimension { | |||
| name: Some("depth".into()), | |||
| size: std::env::var(format!("{}_DEPTH", id.as_str().to_uppercase())) | |||
| .context(format!( | |||
| "Could not read {}_DEPTH env variable for parsing the image", | |||
| id.as_str().to_uppercase() | |||
| ))? | |||
| .parse() | |||
| .context(format!( | |||
| "Could not parse env {}_DEPTH", | |||
| id.as_str().to_uppercase() | |||
| ))?, | |||
| }, | |||
| ]; | |||
| let buffer: UInt8Array = data.to_data().into(); | |||
| let buffer: &[u8] = buffer.values(); | |||
| let buffer = TensorBuffer::U8(ArrowBuffer::from(buffer)); | |||
| let tensordata = TensorData::new(shape.clone(), buffer); | |||
| let image = rerun::Image::new(tensordata); | |||
| rec.log(id.as_str(), &image) | |||
| .context("could not log image")?; | |||
| } else if id.as_str().contains("textlog") { | |||
| let buffer: StringArray = data.to_data().into(); | |||
| buffer.iter().try_for_each(|string| -> Result<()> { | |||
| if let Some(str) = string { | |||
| rec.log(id.as_str(), &rerun::TextLog::new(str)) | |||
| .wrap_err("Could not log text") | |||
| } else { | |||
| Ok(()) | |||
| } | |||
| })?; | |||
| } else if id.as_str().contains("boxes2d") { | |||
| let buffer: Float32Array = data.to_data().into(); | |||
| let buffer: &[f32] = buffer.values(); | |||
| let mut centers = vec![]; | |||
| let mut sizes = vec![]; | |||
| let mut classes = vec![]; | |||
| buffer.chunks(6).for_each(|block| { | |||
| if let [x, y, w, h, _conf, cls] = block { | |||
| centers.push((*x, *y)); | |||
| sizes.push((*w, *h)); | |||
| classes.push(*cls as u16); | |||
| } | |||
| }); | |||
| rec.log( | |||
| id.as_str(), | |||
| &rerun::Boxes2D::from_centers_and_sizes(centers, sizes).with_class_ids(classes), | |||
| ) | |||
| .wrap_err("Could not log Boxes2D")?; | |||
| } | |||
| } | |||
| } | |||
| Ok(()) | |||
| } | |||