Browse Source

Merge branch 'main' into git-source

tags/v0.3.12-rc0
Philipp Oppermann 7 months ago
parent
commit
187efed38e
Failed to extract signature
100 changed files with 8883 additions and 1465 deletions
  1. +8
    -8
      .github/workflows/ci.yml
  2. +8
    -2
      .github/workflows/node-hub-ci-cd.yml
  3. +16
    -16
      .github/workflows/node_hub_test.sh
  4. +1
    -0
      .gitignore
  5. +1321
    -1025
      Cargo.lock
  6. +30
    -23
      Cargo.toml
  7. +73
    -0
      Changelog.md
  8. +36
    -16
      README.md
  9. +43
    -47
      apis/python/node/dora/cuda.py
  10. +1
    -0
      apis/rust/node/src/event_stream/scheduler.rs
  11. +20
    -19
      apis/rust/node/src/node/mod.rs
  12. +7
    -1
      benches/llms/README.md
  13. +2
    -0
      benches/llms/llama_cpp_python.yaml
  14. +2
    -0
      benches/llms/mistralrs.yaml
  15. +2
    -0
      benches/llms/phi4.yaml
  16. +2
    -0
      benches/llms/qwen2.5.yaml
  17. +2
    -0
      benches/llms/transformers.yaml
  18. +22
    -0
      benches/mllm/pyproject.toml
  19. +1
    -0
      binaries/cli/Cargo.toml
  20. +91
    -21
      binaries/cli/src/lib.rs
  21. +91
    -12
      binaries/daemon/src/lib.rs
  22. +42
    -11
      binaries/daemon/src/spawn.rs
  23. +9
    -5
      binaries/runtime/src/lib.rs
  24. +62
    -0
      examples/av1-encoding/dataflow.yml
  25. +68
    -0
      examples/av1-encoding/dataflow_reachy.yml
  26. +54
    -0
      examples/av1-encoding/ios-dev.yaml
  27. +3
    -1
      examples/benchmark/dataflow.yml
  28. +18
    -27
      examples/benchmark/node/src/main.rs
  29. +2
    -3
      examples/benchmark/sink/src/main.rs
  30. +11
    -12
      examples/cuda-benchmark/demo_receiver.py
  31. +8
    -10
      examples/cuda-benchmark/receiver.py
  32. +3
    -0
      examples/depth_camera/ios-dev.yaml
  33. +11
    -0
      examples/mediapipe/README.md
  34. +26
    -0
      examples/mediapipe/realsense-dev.yml
  35. +26
    -0
      examples/mediapipe/rgb-dev.yml
  36. +250
    -0
      examples/reachy2-remote/dataflow_reachy.yml
  37. +76
    -0
      examples/reachy2-remote/parse_bbox.py
  38. +62
    -0
      examples/reachy2-remote/parse_point.py
  39. +300
    -0
      examples/reachy2-remote/parse_pose.py
  40. +135
    -0
      examples/reachy2-remote/parse_whisper.py
  41. +42
    -0
      examples/reachy2-remote/whisper-dev.yml
  42. +1
    -1
      examples/rerun-viewer/dataflow.yml
  43. +10
    -4
      examples/rerun-viewer/run.rs
  44. +94
    -0
      examples/so100-remote/README.md
  45. +50
    -0
      examples/so100-remote/no_torque.yml
  46. +69
    -0
      examples/so100-remote/parse_bbox.py
  47. +161
    -0
      examples/so100-remote/parse_pose.py
  48. +56
    -0
      examples/so100-remote/parse_whisper.py
  49. +180
    -0
      examples/so100-remote/qwenvl-compression.yml
  50. +153
    -0
      examples/so100-remote/qwenvl-remote.yml
  51. +142
    -0
      examples/so100-remote/qwenvl.yml
  52. +51
    -0
      examples/tracker/facebook_cotracker.yml
  53. +63
    -0
      examples/tracker/parse_bbox.py
  54. +67
    -0
      examples/tracker/qwenvl_cotracker.yml
  55. +4
    -4
      libraries/extensions/telemetry/metrics/Cargo.toml
  56. +2
    -2
      libraries/extensions/telemetry/metrics/src/lib.rs
  57. +73
    -28
      libraries/extensions/telemetry/tracing/src/lib.rs
  58. +1
    -1
      node-hub/dora-argotranslate/pyproject.toml
  59. +221
    -0
      node-hub/dora-cotracker/README.md
  60. +40
    -0
      node-hub/dora-cotracker/demo.yml
  61. +11
    -0
      node-hub/dora-cotracker/dora_cotracker/__init__.py
  62. +5
    -0
      node-hub/dora-cotracker/dora_cotracker/__main__.py
  63. +212
    -0
      node-hub/dora-cotracker/dora_cotracker/main.py
  64. +32
    -0
      node-hub/dora-cotracker/pyproject.toml
  65. +9
    -0
      node-hub/dora-cotracker/tests/test_dora_cotracker.py
  66. +32
    -0
      node-hub/dora-dav1d/Cargo.toml
  67. +26
    -0
      node-hub/dora-dav1d/pyproject.toml
  68. +219
    -0
      node-hub/dora-dav1d/src/lib.rs
  69. +3
    -0
      node-hub/dora-dav1d/src/main.rs
  70. +5
    -3
      node-hub/dora-distil-whisper/pyproject.toml
  71. +1
    -1
      node-hub/dora-echo/pyproject.toml
  72. +15
    -14
      node-hub/dora-internvl/pyproject.toml
  73. +85
    -39
      node-hub/dora-ios-lidar/dora_ios_lidar/main.py
  74. +2
    -2
      node-hub/dora-ios-lidar/pyproject.toml
  75. +1
    -1
      node-hub/dora-keyboard/pyproject.toml
  76. +41
    -5
      node-hub/dora-kit-car/README.md
  77. +1
    -1
      node-hub/dora-kokoro-tts/pyproject.toml
  78. +0
    -3
      node-hub/dora-llama-cpp-python/pyproject.toml
  79. +3
    -2
      node-hub/dora-magma/pyproject.toml
  80. +40
    -0
      node-hub/dora-mediapipe/README.md
  81. +13
    -0
      node-hub/dora-mediapipe/dora_mediapipe/__init__.py
  82. +6
    -0
      node-hub/dora-mediapipe/dora_mediapipe/__main__.py
  83. +136
    -0
      node-hub/dora-mediapipe/dora_mediapipe/main.py
  84. +25
    -0
      node-hub/dora-mediapipe/pyproject.toml
  85. +13
    -0
      node-hub/dora-mediapipe/tests/test_dora_mediapipe.py
  86. +2252
    -0
      node-hub/dora-mediapipe/uv.lock
  87. +1
    -1
      node-hub/dora-microphone/pyproject.toml
  88. +1
    -1
      node-hub/dora-object-to-pose/Cargo.toml
  89. +0
    -2
      node-hub/dora-object-to-pose/pyproject.toml
  90. +56
    -45
      node-hub/dora-object-to-pose/src/lib.rs
  91. +1
    -1
      node-hub/dora-openai-server/pyproject.toml
  92. +14
    -12
      node-hub/dora-opus/pyproject.toml
  93. +4
    -5
      node-hub/dora-opus/tests/test_translate.py
  94. +1175
    -0
      node-hub/dora-opus/uv.lock
  95. +2
    -0
      node-hub/dora-outtetts/README.md
  96. +4
    -14
      node-hub/dora-outtetts/dora_outtetts/tests/test_main.py
  97. +1
    -1
      node-hub/dora-outtetts/pyproject.toml
  98. +9
    -11
      node-hub/dora-parler/pyproject.toml
  99. +1
    -1
      node-hub/dora-phi4/pyproject.toml
  100. +1
    -1
      node-hub/dora-piper/pyproject.toml

+ 8
- 8
.github/workflows/ci.yml View File

@@ -63,11 +63,11 @@ jobs:
cache-directories: ${{ env.CARGO_TARGET_DIR }} cache-directories: ${{ env.CARGO_TARGET_DIR }}


- name: "Check" - name: "Check"
run: cargo check --all
run: cargo check --all --exclude dora-dav1d --exclude dora-rav1e
- name: "Build (Without Python dep as it is build with maturin)" - name: "Build (Without Python dep as it is build with maturin)"
run: cargo build --all --exclude dora-node-api-python --exclude dora-operator-api-python --exclude dora-ros2-bridge-python
run: cargo build --all --exclude dora-dav1d --exclude dora-rav1e --exclude dora-node-api-python --exclude dora-operator-api-python --exclude dora-ros2-bridge-python
- name: "Test" - name: "Test"
run: cargo test --all --exclude dora-node-api-python --exclude dora-operator-api-python --exclude dora-ros2-bridge-python
run: cargo test --all --exclude dora-dav1d --exclude dora-rav1e --exclude dora-node-api-python --exclude dora-operator-api-python --exclude dora-ros2-bridge-python


# Run examples as separate job because otherwise we will exhaust the disk # Run examples as separate job because otherwise we will exhaust the disk
# space of the GitHub action runners. # space of the GitHub action runners.
@@ -310,7 +310,7 @@ jobs:
# Test Rust template Project # Test Rust template Project
dora new test_rust_project --internal-create-with-path-dependencies dora new test_rust_project --internal-create-with-path-dependencies
cd test_rust_project cd test_rust_project
cargo build --all
cargo build --all --exclude dora-dav1d --exclude dora-rav1e
dora up dora up
dora list dora list
dora start dataflow.yml --name ci-rust-test --detach dora start dataflow.yml --name ci-rust-test --detach
@@ -459,12 +459,12 @@ jobs:
- run: cargo --version --verbose - run: cargo --version --verbose


- name: "Clippy" - name: "Clippy"
run: cargo clippy --all
run: cargo clippy --all --exclude dora-dav1d --exclude dora-rav1e
- name: "Clippy (tracing feature)" - name: "Clippy (tracing feature)"
run: cargo clippy --all --features tracing
run: cargo clippy --all --exclude dora-dav1d --exclude dora-rav1e --features tracing
if: false # only the dora-runtime has this feature, but it is currently commented out if: false # only the dora-runtime has this feature, but it is currently commented out
- name: "Clippy (metrics feature)" - name: "Clippy (metrics feature)"
run: cargo clippy --all --features metrics
run: cargo clippy --all --exclude dora-dav1d --exclude dora-rav1e --features metrics
if: false # only the dora-runtime has this feature, but it is currently commented out if: false # only the dora-runtime has this feature, but it is currently commented out


rustfmt: rustfmt:
@@ -544,4 +544,4 @@ jobs:
with: with:
use-cross: true use-cross: true
command: check command: check
args: --target ${{ matrix.platform.target }} --all --exclude dora-node-api-python --exclude dora-operator-api-python --exclude dora-ros2-bridge-python
args: --target ${{ matrix.platform.target }} --all --exclude dora-dav1d --exclude dora-rav1e --exclude dora-node-api-python --exclude dora-operator-api-python --exclude dora-ros2-bridge-python

+ 8
- 2
.github/workflows/node-hub-ci-cd.yml View File

@@ -13,7 +13,7 @@ on:


jobs: jobs:
find-jobs: find-jobs:
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
name: Find Jobs name: Find Jobs
outputs: outputs:
folders: ${{ steps.jobs.outputs.folders }} folders: ${{ steps.jobs.outputs.folders }}
@@ -34,7 +34,7 @@ jobs:
strategy: strategy:
fail-fast: ${{ github.event_name != 'workflow_dispatch' && !(github.event_name == 'release' && startsWith(github.ref, 'refs/tags/')) }} fail-fast: ${{ github.event_name != 'workflow_dispatch' && !(github.event_name == 'release' && startsWith(github.ref, 'refs/tags/')) }}
matrix: matrix:
platform: [ubuntu-22.04, macos-14]
platform: [ubuntu-24.04, macos-14]
folder: ${{ fromJson(needs.find-jobs.outputs.folders )}} folder: ${{ fromJson(needs.find-jobs.outputs.folders )}}
steps: steps:
- name: Checkout repository - name: Checkout repository
@@ -52,7 +52,12 @@ jobs:
- name: Install system-level dependencies - name: Install system-level dependencies
if: runner.os == 'Linux' if: runner.os == 'Linux'
run: | run: |
sudo apt update
sudo apt-get install portaudio19-dev sudo apt-get install portaudio19-dev
sudo apt-get install libdav1d-dev nasm libudev-dev
mkdir -p $HOME/.rustup/toolchains/stable-x86_64-unknown-linux-gnu/lib/rustlib/x86_64-unknown-linux-gnu/lib
ln -s /lib/x86_64-linux-gnu/libdav1d.so $HOME/.rustup/toolchains/stable-x86_64-unknown-linux-gnu/lib/rustlib/x86_64-unknown-linux-gnu/lib/libdav1d.so

# Install mingw-w64 cross-compilers # Install mingw-w64 cross-compilers
sudo apt install g++-mingw-w64-x86-64 gcc-mingw-w64-x86-64 sudo apt install g++-mingw-w64-x86-64 gcc-mingw-w64-x86-64


@@ -60,6 +65,7 @@ jobs:
if: runner.os == 'MacOS' && (github.event_name == 'workflow_dispatch' || (github.event_name == 'release' && startsWith(github.ref, 'refs/tags/'))) if: runner.os == 'MacOS' && (github.event_name == 'workflow_dispatch' || (github.event_name == 'release' && startsWith(github.ref, 'refs/tags/')))
run: | run: |
brew install portaudio brew install portaudio
brew install dav1d nasm


- name: Set up Python - name: Set up Python
if: runner.os == 'Linux' || github.event_name == 'workflow_dispatch' || (github.event_name == 'release' && startsWith(github.ref, 'refs/tags/')) if: runner.os == 'Linux' || github.event_name == 'workflow_dispatch' || (github.event_name == 'release' && startsWith(github.ref, 'refs/tags/'))


+ 16
- 16
.github/workflows/node_hub_test.sh View File

@@ -2,10 +2,10 @@
set -euo set -euo


# List of ignored modules # List of ignored modules
ignored_folders=("dora-parler")
ignored_folders=("dora-parler" "dora-opus" "dora-internvl" "dora-magma")


# Skip test # Skip test
skip_test_folders=("dora-internvl" "dora-parler" "dora-keyboard" "dora-microphone" "terminal-input")
skip_test_folders=("dora-internvl" "dora-parler" "dora-keyboard" "dora-microphone" "terminal-input" "dora-magma")


# Get current working directory # Get current working directory
dir=$(pwd) dir=$(pwd)
@@ -26,8 +26,8 @@ else
cargo build cargo build
cargo test cargo test


pip install "maturin[zig]"
maturin build --zig
pip install "maturin[zig, patchelf]"
maturin build --release --compatibility manylinux_2_28 --zig
# If GITHUB_EVENT_NAME is release or workflow_dispatch, publish the wheel on multiple platforms # If GITHUB_EVENT_NAME is release or workflow_dispatch, publish the wheel on multiple platforms
if [ "$GITHUB_EVENT_NAME" == "release" ] || [ "$GITHUB_EVENT_NAME" == "workflow_dispatch" ]; then if [ "$GITHUB_EVENT_NAME" == "release" ] || [ "$GITHUB_EVENT_NAME" == "workflow_dispatch" ]; then
# Free up ubuntu space # Free up ubuntu space
@@ -36,10 +36,10 @@ else
sudo rm -rf /usr/share/dotnet/ sudo rm -rf /usr/share/dotnet/
sudo rm -rf /opt/ghc/ sudo rm -rf /opt/ghc/


maturin publish --skip-existing --zig
maturin publish --skip-existing --compatibility manylinux_2_28 --zig
# aarch64-unknown-linux-gnu # aarch64-unknown-linux-gnu
rustup target add aarch64-unknown-linux-gnu rustup target add aarch64-unknown-linux-gnu
maturin publish --target aarch64-unknown-linux-gnu --skip-existing --zig
maturin publish --target aarch64-unknown-linux-gnu --skip-existing --zig --compatibility manylinux_2_28
# armv7-unknown-linux-musleabihf # armv7-unknown-linux-musleabihf
rustup target add armv7-unknown-linux-musleabihf rustup target add armv7-unknown-linux-musleabihf
@@ -53,8 +53,15 @@ else
fi fi


elif [[ -f "Cargo.toml" && -f "pyproject.toml" && "$(uname)" = "Darwin" ]]; then elif [[ -f "Cargo.toml" && -f "pyproject.toml" && "$(uname)" = "Darwin" ]]; then
pip install "maturin[zig, patchelf]"
# aarch64-apple-darwin
maturin build --release
# If GITHUB_EVENT_NAME is release or workflow_dispatch, publish the wheel
if [ "$GITHUB_EVENT_NAME" == "release" ] || [ "$GITHUB_EVENT_NAME" == "workflow_dispatch" ]; then
maturin publish --skip-existing
fi
# x86_64-apple-darwin # x86_64-apple-darwin
pip install "maturin[zig]"
rustup target add x86_64-apple-darwin rustup target add x86_64-apple-darwin
maturin build --target x86_64-apple-darwin --zig --release maturin build --target x86_64-apple-darwin --zig --release
# If GITHUB_EVENT_NAME is release or workflow_dispatch, publish the wheel # If GITHUB_EVENT_NAME is release or workflow_dispatch, publish the wheel
@@ -62,15 +69,7 @@ else
maturin publish --target x86_64-apple-darwin --skip-existing --zig maturin publish --target x86_64-apple-darwin --skip-existing --zig
fi fi


# aarch64-apple-darwin
rustup target add aarch64-apple-darwin
maturin build --target aarch64-apple-darwin --zig --release
# If GITHUB_EVENT_NAME is release or workflow_dispatch, publish the wheel
if [ "$GITHUB_EVENT_NAME" == "release" ] || [ "$GITHUB_EVENT_NAME" == "workflow_dispatch" ]; then
maturin publish --target aarch64-apple-darwin --skip-existing --zig
fi

else
elif [[ "$(uname)" = "Linux" ]]; then
if [ -f "$dir/Cargo.toml" ]; then if [ -f "$dir/Cargo.toml" ]; then
echo "Running build and tests for Rust project in $dir..." echo "Running build and tests for Rust project in $dir..."
cargo check cargo check
@@ -96,6 +95,7 @@ else
uv run pytest uv run pytest
fi fi
if [ "$GITHUB_EVENT_NAME" == "release" ] || [ "$GITHUB_EVENT_NAME" == "workflow_dispatch" ]; then if [ "$GITHUB_EVENT_NAME" == "release" ] || [ "$GITHUB_EVENT_NAME" == "workflow_dispatch" ]; then
uv build
uv publish --check-url https://pypi.org/simple uv publish --check-url https://pypi.org/simple
fi fi
fi fi


+ 1
- 0
.gitignore View File

@@ -11,6 +11,7 @@ examples/**/*.txt


# Remove hdf and stl files # Remove hdf and stl files
*.stl *.stl
*.dae
*.STL *.STL
*.hdf5 *.hdf5




+ 1321
- 1025
Cargo.lock
File diff suppressed because it is too large
View File


+ 30
- 23
Cargo.toml View File

@@ -37,6 +37,9 @@ members = [
"node-hub/dora-kit-car", "node-hub/dora-kit-car",
"node-hub/dora-object-to-pose", "node-hub/dora-object-to-pose",
"node-hub/dora-mistral-rs", "node-hub/dora-mistral-rs",
"node-hub/dora-rav1e",
"node-hub/dora-dav1d",
"node-hub/dora-rustypot",
"libraries/extensions/ros2-bridge", "libraries/extensions/ros2-bridge",
"libraries/extensions/ros2-bridge/msg-gen", "libraries/extensions/ros2-bridge/msg-gen",
"libraries/extensions/ros2-bridge/python", "libraries/extensions/ros2-bridge/python",
@@ -47,34 +50,34 @@ members = [
[workspace.package] [workspace.package]
edition = "2021" edition = "2021"
# Make sure to also bump `apis/node/python/__init__.py` version. # Make sure to also bump `apis/node/python/__init__.py` version.
version = "0.3.10"
version = "0.3.11"
description = "`dora` goal is to be a low latency, composable, and distributed data flow." description = "`dora` goal is to be a low latency, composable, and distributed data flow."
documentation = "https://dora.carsmos.ai"
documentation = "https://dora-rs.ai"
license = "Apache-2.0" license = "Apache-2.0"
repository = "https://github.com/dora-rs/dora/" repository = "https://github.com/dora-rs/dora/"


[workspace.dependencies] [workspace.dependencies]
dora-node-api = { version = "0.3.10", path = "apis/rust/node", default-features = false }
dora-node-api-python = { version = "0.3.10", path = "apis/python/node", default-features = false }
dora-operator-api = { version = "0.3.10", path = "apis/rust/operator", default-features = false }
dora-operator-api-macros = { version = "0.3.10", path = "apis/rust/operator/macros" }
dora-operator-api-types = { version = "0.3.10", path = "apis/rust/operator/types" }
dora-operator-api-python = { version = "0.3.10", path = "apis/python/operator" }
dora-operator-api-c = { version = "0.3.10", path = "apis/c/operator" }
dora-node-api-c = { version = "0.3.10", path = "apis/c/node" }
dora-core = { version = "0.3.10", path = "libraries/core" }
dora-arrow-convert = { version = "0.3.10", path = "libraries/arrow-convert" }
dora-tracing = { version = "0.3.10", path = "libraries/extensions/telemetry/tracing" }
dora-metrics = { version = "0.3.10", path = "libraries/extensions/telemetry/metrics" }
dora-download = { version = "0.3.10", path = "libraries/extensions/download" }
shared-memory-server = { version = "0.3.10", path = "libraries/shared-memory-server" }
communication-layer-request-reply = { version = "0.3.10", path = "libraries/communication-layer/request-reply" }
dora-cli = { version = "0.3.10", path = "binaries/cli" }
dora-runtime = { version = "0.3.10", path = "binaries/runtime" }
dora-daemon = { version = "0.3.10", path = "binaries/daemon" }
dora-coordinator = { version = "0.3.10", path = "binaries/coordinator" }
dora-ros2-bridge = { version = "0.3.10", path = "libraries/extensions/ros2-bridge" }
dora-ros2-bridge-msg-gen = { version = "0.3.10", path = "libraries/extensions/ros2-bridge/msg-gen" }
dora-node-api = { version = "0.3.11", path = "apis/rust/node", default-features = false }
dora-node-api-python = { version = "0.3.11", path = "apis/python/node", default-features = false }
dora-operator-api = { version = "0.3.11", path = "apis/rust/operator", default-features = false }
dora-operator-api-macros = { version = "0.3.11", path = "apis/rust/operator/macros" }
dora-operator-api-types = { version = "0.3.11", path = "apis/rust/operator/types" }
dora-operator-api-python = { version = "0.3.11", path = "apis/python/operator" }
dora-operator-api-c = { version = "0.3.11", path = "apis/c/operator" }
dora-node-api-c = { version = "0.3.11", path = "apis/c/node" }
dora-core = { version = "0.3.11", path = "libraries/core" }
dora-arrow-convert = { version = "0.3.11", path = "libraries/arrow-convert" }
dora-tracing = { version = "0.3.11", path = "libraries/extensions/telemetry/tracing" }
dora-metrics = { version = "0.3.11", path = "libraries/extensions/telemetry/metrics" }
dora-download = { version = "0.3.11", path = "libraries/extensions/download" }
shared-memory-server = { version = "0.3.11", path = "libraries/shared-memory-server" }
communication-layer-request-reply = { version = "0.3.11", path = "libraries/communication-layer/request-reply" }
dora-cli = { version = "0.3.11", path = "binaries/cli" }
dora-runtime = { version = "0.3.11", path = "binaries/runtime" }
dora-daemon = { version = "0.3.11", path = "binaries/daemon" }
dora-coordinator = { version = "0.3.11", path = "binaries/coordinator" }
dora-ros2-bridge = { version = "0.3.11", path = "libraries/extensions/ros2-bridge" }
dora-ros2-bridge-msg-gen = { version = "0.3.11", path = "libraries/extensions/ros2-bridge/msg-gen" }
dora-ros2-bridge-python = { path = "libraries/extensions/ros2-bridge/python" } dora-ros2-bridge-python = { path = "libraries/extensions/ros2-bridge/python" }
# versioned independently from the other dora crates # versioned independently from the other dora crates
dora-message = { version = "0.4.4", path = "libraries/message" } dora-message = { version = "0.4.4", path = "libraries/message" }
@@ -187,6 +190,10 @@ name = "cxx-ros2-dataflow"
path = "examples/c++-ros2-dataflow/run.rs" path = "examples/c++-ros2-dataflow/run.rs"
required-features = ["ros2-examples"] required-features = ["ros2-examples"]


[[example]]
name = "rerun-viewer"
path = "examples/rerun-viewer/run.rs"

# The profile that 'dist' will build with # The profile that 'dist' will build with
[profile.dist] [profile.dist]
inherits = "release" inherits = "release"


+ 73
- 0
Changelog.md View File

@@ -1,5 +1,78 @@
# Changelog # Changelog


## v0.3.11 (2025-04-07)

## What's Changed

- Post dora 0.3.10 release fix by @haixuanTao in https://github.com/dora-rs/dora/pull/804
- Add windows release for rust nodes by @haixuanTao in https://github.com/dora-rs/dora/pull/805
- Add Node Table into README.md by @haixuanTao in https://github.com/dora-rs/dora/pull/808
- update dora yaml json schema validator by @haixuanTao in https://github.com/dora-rs/dora/pull/809
- Improve readme support matrix readability by @haixuanTao in https://github.com/dora-rs/dora/pull/810
- Clippy automatic fixes applied by @Shar-jeel-Sajid in https://github.com/dora-rs/dora/pull/812
- Improve documentation on adding new node to the node-hub by @haixuanTao in https://github.com/dora-rs/dora/pull/820
- #807 Fixed by @7SOMAY in https://github.com/dora-rs/dora/pull/818
- Applied Ruff pydocstyle to dora by @Mati-ur-rehman-017 in https://github.com/dora-rs/dora/pull/831
- Related to dora-bot issue assignment by @MunishMummadi in https://github.com/dora-rs/dora/pull/840
- Add dora-lerobot node into dora by @Ignavar in https://github.com/dora-rs/dora/pull/834
- CI: Permit issue modifications for issue assign job by @phil-opp in https://github.com/dora-rs/dora/pull/848
- Fix: Set variables outside bash script to prevent injection by @phil-opp in https://github.com/dora-rs/dora/pull/849
- Replacing Deprecated functions of pyo3 by @Shar-jeel-Sajid in https://github.com/dora-rs/dora/pull/838
- Add noise filtering on whisper to be able to use speakers by @haixuanTao in https://github.com/dora-rs/dora/pull/847
- Add minimal Dockerfile with Python and uv for easy onboarding by @Krishnadubey1008 in https://github.com/dora-rs/dora/pull/843
- More compact readme with example section by @haixuanTao in https://github.com/dora-rs/dora/pull/855
- Create docker-image.yml by @haixuanTao in https://github.com/dora-rs/dora/pull/857
- Multi platform docker by @haixuanTao in https://github.com/dora-rs/dora/pull/858
- change: `dora/node-hub/README.md` by @MunishMummadi in https://github.com/dora-rs/dora/pull/862
- Added dora-phi4 inside node-hub by @7SOMAY in https://github.com/dora-rs/dora/pull/861
- node-hub: Added dora-magma node by @MunishMummadi in https://github.com/dora-rs/dora/pull/853
- Added the dora-llama-cpp-python node by @ShashwatPatil in https://github.com/dora-rs/dora/pull/850
- Adding in some missing types and test cases within arrow convert crate by @Ignavar in https://github.com/dora-rs/dora/pull/864
- Migrate robots from dora-lerobot to dora repository by @rahat2134 in https://github.com/dora-rs/dora/pull/868
- Applied pyupgrade style by @Mati-ur-rehman-017 in https://github.com/dora-rs/dora/pull/876
- Adding additional llm in tests by @haixuanTao in https://github.com/dora-rs/dora/pull/873
- Dora transformer node by @ShashwatPatil in https://github.com/dora-rs/dora/pull/870
- Using macros in Arrow Conversion by @Shar-jeel-Sajid in https://github.com/dora-rs/dora/pull/877
- Adding run command within python API by @haixuanTao in https://github.com/dora-rs/dora/pull/875
- Added f16 type conversion by @Shar-jeel-Sajid in https://github.com/dora-rs/dora/pull/886
- Added "PERF" flag inside node-hub by @7SOMAY in https://github.com/dora-rs/dora/pull/880
- Added quality ruff-flags for better code quality by @7SOMAY in https://github.com/dora-rs/dora/pull/888
- Add llm benchmark by @haixuanTao in https://github.com/dora-rs/dora/pull/881
- Implement `into_vec_f64(&ArrowData) -> Vec<f64)` conversion function by @Shar-jeel-Sajid in https://github.com/dora-rs/dora/pull/893
- Adding virtual env within dora build command by @haixuanTao in https://github.com/dora-rs/dora/pull/895
- Adding metrics for node api by @haixuanTao in https://github.com/dora-rs/dora/pull/903
- Made UI interface for input in dora, using Gradio by @ShashwatPatil in https://github.com/dora-rs/dora/pull/891
- Add chinese voice support by @haixuanTao in https://github.com/dora-rs/dora/pull/902
- Made conversion generic by @Shar-jeel-Sajid in https://github.com/dora-rs/dora/pull/908
- Added husky simulation in Mujoco and gamepad node by @ShashwatPatil in https://github.com/dora-rs/dora/pull/906
- use `cargo-dist` tool for dora-cli releases by @Hennzau in https://github.com/dora-rs/dora/pull/916
- Implementing Self update by @Shar-jeel-Sajid in https://github.com/dora-rs/dora/pull/920
- Fix: RUST_LOG=. dora run bug by @starlitxiling in https://github.com/dora-rs/dora/pull/924
- Added dora-mistral-rs node in node-hub for inference in rust by @Ignavar in https://github.com/dora-rs/dora/pull/910
- Fix reachy left arm by @haixuanTao in https://github.com/dora-rs/dora/pull/907
- Functions for sending and receiving data using Arrow::FFI by @Mati-ur-rehman-017 in https://github.com/dora-rs/dora/pull/918
- Adding `recv_async` dora method to retrieve data in python async by @haixuanTao in https://github.com/dora-rs/dora/pull/909
- Update: README.md of the node hub by @Choudhry18 in https://github.com/dora-rs/dora/pull/929
- Fix magma by @haixuanTao in https://github.com/dora-rs/dora/pull/926
- Add support for mask in rerun by @haixuanTao in https://github.com/dora-rs/dora/pull/927
- Bump array-init-cursor from 0.2.0 to 0.2.1 by @dependabot in https://github.com/dora-rs/dora/pull/933
- Enhance Zenoh Integration Documentation by @NageshMandal in https://github.com/dora-rs/dora/pull/935
- Support av1 by @haixuanTao in https://github.com/dora-rs/dora/pull/932
- Bump dora v0.3.11 by @haixuanTao in https://github.com/dora-rs/dora/pull/948

## New Contributors

- @Shar-jeel-Sajid made their first contribution in https://github.com/dora-rs/dora/pull/812
- @7SOMAY made their first contribution in https://github.com/dora-rs/dora/pull/818
- @Mati-ur-rehman-017 made their first contribution in https://github.com/dora-rs/dora/pull/831
- @MunishMummadi made their first contribution in https://github.com/dora-rs/dora/pull/840
- @Ignavar made their first contribution in https://github.com/dora-rs/dora/pull/834
- @Krishnadubey1008 made their first contribution in https://github.com/dora-rs/dora/pull/843
- @ShashwatPatil made their first contribution in https://github.com/dora-rs/dora/pull/850
- @rahat2134 made their first contribution in https://github.com/dora-rs/dora/pull/868
- @Choudhry18 made their first contribution in https://github.com/dora-rs/dora/pull/929
- @NageshMandal made their first contribution in https://github.com/dora-rs/dora/pull/935

## v0.3.10 (2025-03-04) ## v0.3.10 (2025-03-04)


## What's Changed ## What's Changed


+ 36
- 16
README.md View File

@@ -29,6 +29,9 @@
<a href="https://pypi.org/project/dora-rs/"> <a href="https://pypi.org/project/dora-rs/">
<img src="https://img.shields.io/pypi/v/dora-rs.svg" alt="PyPi Latest Release"/> <img src="https://img.shields.io/pypi/v/dora-rs.svg" alt="PyPi Latest Release"/>
</a> </a>
<a href="https://github.com/dora-rs/dora/blob/main/LICENSE">
<img src="https://img.shields.io/github/license/dora-rs/dora" alt="PyPi Latest Release"/>
</a>
</div> </div>
<div align="center"> <div align="center">
<a href="https://trendshift.io/repositories/9190" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9190" alt="dora-rs%2Fdora | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a> <a href="https://trendshift.io/repositories/9190" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9190" alt="dora-rs%2Fdora | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
@@ -59,25 +62,30 @@
<details open> <details open>
<summary><b>2025</b></summary> <summary><b>2025</b></summary>


- \[03/05\] dora-rs has been accepted to [**GSoC 2025 🎉**](https://summerofcode.withgoogle.com/programs/2025/organizations/dora-rs-tb), with the following [**idea list**](https://github.com/dora-rs/dora/wiki/GSoC_2025).
- \[03/04\] Add support for Zenoh for distributed dataflow.
- \[03/04\] Add support for Meta SAM2, Kokoro(TTS), Improved Qwen2.5 Performance using `llama.cpp`.
- \[05/25\] Add support for dora-pytorch-kinematics for fk and ik, dora-mediapipe for pose estimation, dora-rustypot for rust serialport read/write, points2d and points3d visualization in rerun.
- \[04/25\] Add support for dora-cotracker to track any point on a frame, dora-rav1e AV1 encoding up to 12bit and dora-dav1d AV1 decoding,
- \[03/25\] Add support for dora async Python.
- \[03/25\] Add support for Microsoft Phi4, Microsoft Magma.
- \[03/25\] dora-rs has been accepted to [**GSoC 2025 🎉**](https://summerofcode.withgoogle.com/programs/2025/organizations/dora-rs-tb), with the following [**idea list**](https://github.com/dora-rs/dora/wiki/GSoC_2025).
- \[03/25\] Add support for Zenoh for distributed dataflow.
- \[03/25\] Add support for Meta SAM2, Kokoro(TTS), Improved Qwen2.5 Performance using `llama.cpp`.
- \[02/25\] Add support for Qwen2.5(LLM), Qwen2.5-VL(VLM), outetts(TTS) - \[02/25\] Add support for Qwen2.5(LLM), Qwen2.5-VL(VLM), outetts(TTS)
</details> </details>


## Support Matrix ## Support Matrix


| | dora-rs |
| --------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **APIs** | Python >= 3.7 ✅ <br> Rust ✅<br> C/C++ 🆗 <br>ROS2 >= Foxy 🆗 |
| **OS** | Linux: Arm 32 ✅ Arm 64 ✅ x64_86 ✅ <br>MacOS: Arm 64 ✅ x64_86 ✅<br>Windows: x64_86 🆗<br> Android: 🛠️ (Blocked by: https://github.com/elast0ny/shared_memory/issues/32) <br> IOS: 🛠️ |
| **Message Format** | Arrow ✅ <br> Standard Specification 🛠️ |
| **Local Communication** | Shared Memory ✅ <br> [Cuda IPC](https://arrow.apache.org/docs/python/api/cuda.html) 📐 |
| **Remote Communication** | [Zenoh](https://zenoh.io/) 📐 |
| **Metrics, Tracing, and Logging** | Opentelemetry 📐 |
| **Configuration** | YAML ✅ |
| **Package Manager** | [pip](https://pypi.org/): Python Node ✅ Rust Node ✅ C/C++ Node 🛠️ <br>[cargo](https://crates.io/): Rust Node ✅ |

| | dora-rs |
| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **APIs** | Python >= 3.7 including sync ⭐✅ <br> Rust ✅<br> C/C++ 🆗 <br>ROS2 >= Foxy 🆗 |
| **OS** | Linux: Arm 32 ⭐✅ Arm 64 ⭐✅ x64_86 ⭐✅ <br>MacOS: Arm 64 ⭐✅ x64_86 ✅<br>Windows: x64_86 🆗 <br>WSL: x64_86 🆗 <br> Android: 🛠️ (Blocked by: https://github.com/elast0ny/shared_memory/issues/32) <br> IOS: 🛠️ |
| **Message Format** | Arrow ✅ <br> Standard Specification 🛠️ |
| **Local Communication** | Shared Memory ✅ <br> [Cuda IPC](https://arrow.apache.org/docs/python/api/cuda.html) 📐 |
| **Remote Communication** | [Zenoh](https://zenoh.io/) 📐 |
| **Metrics, Tracing, and Logging** | Opentelemetry 📐 |
| **Configuration** | YAML ✅ |
| **Package Manager** | [pip](https://pypi.org/): Python Node ✅ Rust Node ✅ C/C++ Node 🛠️ <br>[cargo](https://crates.io/): Rust Node ✅ |

> - ⭐ = Recommended
> - ✅ = First Class Support > - ✅ = First Class Support
> - 🆗 = Best Effort Support > - 🆗 = Best Effort Support
> - 📐 = Experimental and looking for contributions > - 📐 = Experimental and looking for contributions
@@ -172,13 +180,13 @@ cargo install dora-cli
### With Github release for macOS and Linux ### With Github release for macOS and Linux


```bash ```bash
curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/dora-rs/dora/main/install.sh | bash
curl --proto '=https' --tlsv1.2 -LsSf https://github.com/dora-rs/dora/releases/latest/download/dora-cli-installer.sh | sh
``` ```


### With Github release for Windows ### With Github release for Windows


```powershell ```powershell
powershell -c "irm https://raw.githubusercontent.com/dora-rs/dora/main/install.ps1 | iex"
powershell -ExecutionPolicy ByPass -c "irm https://github.com/dora-rs/dorareleases/latest/download/dora-cli-installer.ps1 | iex"
``` ```


### With Source ### With Source
@@ -331,3 +339,15 @@ We also have [a contributing guide](CONTRIBUTING.md).
## License ## License


This project is licensed under Apache-2.0. Check out [NOTICE.md](NOTICE.md) for more information. This project is licensed under Apache-2.0. Check out [NOTICE.md](NOTICE.md) for more information.

---

## Further Resources 📚

- [Zenoh Documentation](https://zenoh.io/docs/getting-started/first-app/)
- [DORA Zenoh Discussion (GitHub Issue #512)](https://github.com/dora-rs/dora/issues/512)
- [Dora Autoware Localization Demo](https://github.com/dora-rs/dora-autoware-localization-demo)

```

```

+ 43
- 47
apis/python/node/dora/cuda.py View File

@@ -8,13 +8,19 @@ from numba.cuda import to_device


# Make sure to install numba with cuda # Make sure to install numba with cuda
from numba.cuda.cudadrv.devicearray import DeviceNDArray from numba.cuda.cudadrv.devicearray import DeviceNDArray
from numba.cuda.cudadrv.devices import get_context
from numba.cuda.cudadrv.driver import IpcHandle


# To install pyarrow.cuda, run `conda install pyarrow "arrow-cpp-proc=*=cuda" -c conda-forge`
from pyarrow import cuda

import json

from contextlib import contextmanager
from typing import ContextManager




def torch_to_ipc_buffer(tensor: torch.TensorType) -> tuple[pa.array, dict]: def torch_to_ipc_buffer(tensor: torch.TensorType) -> tuple[pa.array, dict]:
"""Convert a Pytorch tensor into a pyarrow buffer containing the IPC handle and its metadata.
"""Convert a Pytorch tensor into a pyarrow buffer containing the IPC handle
and its metadata.


Example Use: Example Use:
```python ```python
@@ -24,75 +30,65 @@ def torch_to_ipc_buffer(tensor: torch.TensorType) -> tuple[pa.array, dict]:
``` ```
""" """
device_arr = to_device(tensor) device_arr = to_device(tensor)
cuda_buf = pa.cuda.CudaBuffer.from_numba(device_arr.gpu_data)
handle_buffer = cuda_buf.export_for_ipc().serialize()
ipch = get_context().get_ipc_handle(device_arr.gpu_data)
_, handle, size, source_info, offset = ipch.__reduce__()[1]
metadata = { metadata = {
"shape": device_arr.shape, "shape": device_arr.shape,
"strides": device_arr.strides, "strides": device_arr.strides,
"dtype": device_arr.dtype.str, "dtype": device_arr.dtype.str,
"size": size,
"offset": offset,
"source_info": json.dumps(source_info),
} }
return pa.array(handle_buffer, type=pa.uint8()), metadata
return pa.array(handle, pa.int8()), metadata




def ipc_buffer_to_ipc_handle(handle_buffer: pa.array) -> cuda.IpcMemHandle:
"""Convert a buffer containing a serialized handler into cuda IPC MemHandle.
def ipc_buffer_to_ipc_handle(handle_buffer: pa.array, metadata: dict) -> IpcHandle:
"""Convert a buffer containing a serialized handler into cuda IPC Handle.


example use: example use:
```python ```python
from dora.cuda import ipc_buffer_to_ipc_handle, open_ipc_handle


import pyarrow as pa
from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch

ctx = pa.cuda.context()
event = node.next() event = node.next()


ipc_handle = ipc_buffer_to_ipc_handle(event["value"])
cudabuffer = ctx.open_ipc_buffer(ipc_handle)
torch_tensor = cudabuffer_to_torch(cudabuffer, event["metadata"]) # on cuda
ipc_handle = ipc_buffer_to_ipc_handle(event["value"], event["metadata"])
with open_ipc_handle(ipc_handle, event["metadata"]) as tensor:
pass
``` ```
""" """
handle_buffer = handle_buffer.buffers()[1]
return pa.cuda.IpcMemHandle.from_buffer(handle_buffer)
handle = handle_buffer.to_pylist()
return IpcHandle._rebuild(
handle,
metadata["size"],
json.loads(metadata["source_info"]),
metadata["offset"],
)




def cudabuffer_to_numba(buffer: cuda.CudaBuffer, metadata: dict) -> DeviceNDArray:
"""Convert a pyarrow CUDA buffer to numba.
@contextmanager
def open_ipc_handle(
ipc_handle: IpcHandle, metadata: dict
) -> ContextManager[torch.TensorType]:
"""Open a CUDA IPC handle and return a Pytorch tensor.


example use: example use:
```python ```python
from dora.cuda import ipc_buffer_to_ipc_handle, open_ipc_handle


import pyarrow as pa
from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch

ctx = pa.cuda.context()
event = node.next() event = node.next()


ipc_handle = ipc_buffer_to_ipc_handle(event["value"])
cudabuffer = ctx.open_ipc_buffer(ipc_handle)
numba_tensor = cudabuffer_to_numbda(cudabuffer, event["metadata"])
ipc_handle = ipc_buffer_to_ipc_handle(event["value"], event["metadata"])
with open_ipc_handle(ipc_handle, event["metadata"]) as tensor:
pass
``` ```
""" """
shape = metadata["shape"] shape = metadata["shape"]
strides = metadata["strides"] strides = metadata["strides"]
dtype = metadata["dtype"] dtype = metadata["dtype"]
return DeviceNDArray(shape, strides, dtype, gpu_data=buffer.to_numba())


def cudabuffer_to_torch(buffer: cuda.CudaBuffer, metadata: dict) -> torch.Tensor:
"""Convert a pyarrow CUDA buffer to a torch tensor.

example use:
```python

import pyarrow as pa
from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch

ctx = pa.cuda.context()
event = node.next()

ipc_handle = ipc_buffer_to_ipc_handle(event["value"])
cudabuffer = ctx.open_ipc_buffer(ipc_handle)
torch_tensor = cudabuffer_to_torch(cudabuffer, event["metadata"]) # on cuda
```
"""
return torch.as_tensor(cudabuffer_to_numba(buffer, metadata), device="cuda")
try:
buffer = ipc_handle.open(get_context())
device_arr = DeviceNDArray(shape, strides, dtype, gpu_data=buffer)
yield torch.as_tensor(device_arr, device="cuda")
finally:
ipc_handle.close()

+ 1
- 0
apis/rust/node/src/event_stream/scheduler.rs View File

@@ -54,6 +54,7 @@ impl Scheduler {
if let Some((size, queue)) = self.event_queues.get_mut(event_id) { if let Some((size, queue)) = self.event_queues.get_mut(event_id) {
// Remove the oldest event if at limit // Remove the oldest event if at limit
if &queue.len() >= size { if &queue.len() >= size {
tracing::debug!("Discarding event for input `{event_id}` due to queue size limit");
queue.pop_front(); queue.pop_front();
} }
queue.push_back(event); queue.push_back(event);


+ 20
- 19
apis/rust/node/src/node/mod.rs View File

@@ -32,9 +32,10 @@ use std::{
use tracing::{info, warn}; use tracing::{info, warn};


#[cfg(feature = "metrics")] #[cfg(feature = "metrics")]
use dora_metrics::init_meter_provider;
use dora_metrics::run_metrics_monitor;
#[cfg(feature = "tracing")] #[cfg(feature = "tracing")]
use dora_tracing::set_up_tracing;
use dora_tracing::TracingBuilder;

use tokio::runtime::{Handle, Runtime}; use tokio::runtime::{Handle, Runtime};


pub mod arrow_utils; pub mod arrow_utils;
@@ -81,8 +82,12 @@ impl DoraNode {
serde_yaml::from_str(&raw).context("failed to deserialize node config")? serde_yaml::from_str(&raw).context("failed to deserialize node config")?
}; };
#[cfg(feature = "tracing")] #[cfg(feature = "tracing")]
set_up_tracing(node_config.node_id.as_ref())
.context("failed to set up tracing subscriber")?;
{
TracingBuilder::new(node_config.node_id.as_ref())
.build()
.wrap_err("failed to set up tracing subscriber")?;
}

Self::init(node_config) Self::init(node_config)
} }


@@ -156,24 +161,20 @@ impl DoraNode {
let id = format!("{}/{}", dataflow_id, node_id); let id = format!("{}/{}", dataflow_id, node_id);


#[cfg(feature = "metrics")] #[cfg(feature = "metrics")]
match &rt {
TokioRuntime::Runtime(rt) => rt.spawn(async {
if let Err(e) = init_meter_provider(id)
.await
.context("failed to init metrics provider")
{
warn!("could not create metric provider with err: {:#?}", e);
}
}),
TokioRuntime::Handle(handle) => handle.spawn(async {
if let Err(e) = init_meter_provider(id)
{
let monitor_task = async move {
if let Err(e) = run_metrics_monitor(id.clone())
.await .await
.context("failed to init metrics provider")
.wrap_err("metrics monitor exited unexpectedly")
{ {
warn!("could not create metric provider with err: {:#?}", e);
warn!("metrics monitor failed: {:#?}", e);
} }
}),
};
};
match &rt {
TokioRuntime::Runtime(rt) => rt.spawn(monitor_task),
TokioRuntime::Handle(handle) => handle.spawn(monitor_task),
};
}


let event_stream = EventStream::init( let event_stream = EventStream::init(
dataflow_id, dataflow_id,


+ 7
- 1
benches/llms/README.md View File

@@ -1,6 +1,12 @@
# Benchmark LLM Speed # Benchmark LLM Speed


Use the following command to run the benchmark:
If you do not have a python virtual environment setup run

'''bash
uv venv --seed -p 3.11
'''

Then Use the following command to run the benchmark:


```bash ```bash
dora build transformers.yaml --uv dora build transformers.yaml --uv


+ 2
- 0
benches/llms/llama_cpp_python.yaml View File

@@ -1,5 +1,7 @@
nodes: nodes:
- id: benchmark_script - id: benchmark_script
build: |
pip install ../mllm
path: ../mllm/benchmark_script.py path: ../mllm/benchmark_script.py
inputs: inputs:
text: llm/text text: llm/text


+ 2
- 0
benches/llms/mistralrs.yaml View File

@@ -1,5 +1,7 @@
nodes: nodes:
- id: benchmark_script - id: benchmark_script
build: |
pip install ../mllm
path: ../mllm/benchmark_script.py path: ../mllm/benchmark_script.py
inputs: inputs:
text: llm/text text: llm/text


+ 2
- 0
benches/llms/phi4.yaml View File

@@ -1,5 +1,7 @@
nodes: nodes:
- id: benchmark_script - id: benchmark_script
build: |
pip install ../mllm
path: ../mllm/benchmark_script.py path: ../mllm/benchmark_script.py
inputs: inputs:
text: llm/text text: llm/text


+ 2
- 0
benches/llms/qwen2.5.yaml View File

@@ -1,5 +1,7 @@
nodes: nodes:
- id: benchmark_script - id: benchmark_script
build: |
pip install ../mllm
path: ../mllm/benchmark_script.py path: ../mllm/benchmark_script.py
inputs: inputs:
text: llm/text text: llm/text


+ 2
- 0
benches/llms/transformers.yaml View File

@@ -1,5 +1,7 @@
nodes: nodes:
- id: benchmark_script - id: benchmark_script
build: |
pip install ../mllm
path: ../mllm/benchmark_script.py path: ../mllm/benchmark_script.py
inputs: inputs:
text: llm/text text: llm/text


+ 22
- 0
benches/mllm/pyproject.toml View File

@@ -0,0 +1,22 @@
[project]
name = "dora-bench"
version = "0.1.0"
description = "Script to benchmark performance of llms while using dora"
authors = [{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" }]
license = { text = "MIT" }
readme = "README.md"
requires-python = ">=3.11"

dependencies = [
"dora-rs>=0.3.9",
"librosa>=0.10.0",
"opencv-python>=4.8",
"Pillow>=10",
]

[project.scripts]
dora-benches = "benchmark_script.main:main"

[build-system]
requires = ["setuptools>=61", "wheel"]
build-backend = "setuptools.build_meta"

+ 1
- 0
binaries/cli/Cargo.toml View File

@@ -60,6 +60,7 @@ pyo3 = { workspace = true, features = [
"extension-module", "extension-module",
"abi3", "abi3",
], optional = true } ], optional = true }
self-replace = "1.5.0"
dunce = "1.0.5" dunce = "1.0.5"
git2 = { workspace = true } git2 = { workspace = true }




+ 91
- 21
binaries/cli/src/lib.rs View File

@@ -15,8 +15,7 @@ use dora_message::{
coordinator_to_cli::{ControlRequestReply, DataflowList, DataflowResult, DataflowStatus}, coordinator_to_cli::{ControlRequestReply, DataflowList, DataflowResult, DataflowStatus},
}; };
#[cfg(feature = "tracing")] #[cfg(feature = "tracing")]
use dora_tracing::set_up_tracing;
use dora_tracing::{set_up_tracing_opts, FileLogging};
use dora_tracing::TracingBuilder;
use duration_str::parse; use duration_str::parse;
use eyre::{bail, Context}; use eyre::{bail, Context};
use formatting::FormatDataflowError; use formatting::FormatDataflowError;
@@ -246,7 +245,7 @@ enum Command {
#[clap(long)] #[clap(long)]
quiet: bool, quiet: bool,
}, },
/// Dora CLI self-management commands
Self_ { Self_ {
#[clap(subcommand)] #[clap(subcommand)]
command: SelfSubCommand, command: SelfSubCommand,
@@ -255,11 +254,18 @@ enum Command {


#[derive(Debug, clap::Subcommand)] #[derive(Debug, clap::Subcommand)]
enum SelfSubCommand { enum SelfSubCommand {
/// Check for updates or update the CLI
Update { Update {
/// Only check for updates without installing /// Only check for updates without installing
#[clap(long)] #[clap(long)]
check_only: bool, check_only: bool,
}, },
/// Remove The Dora CLI from the system
Uninstall {
/// Force uninstallation without confirmation
#[clap(long)]
force: bool,
},
} }


#[derive(Debug, clap::Args)] #[derive(Debug, clap::Args)]
@@ -310,34 +316,42 @@ fn run_cli(args: Args) -> eyre::Result<()> {
.as_ref() .as_ref()
.map(|id| format!("{name}-{id}")) .map(|id| format!("{name}-{id}"))
.unwrap_or(name.to_string()); .unwrap_or(name.to_string());
let stdout = (!quiet).then_some("info,zenoh=warn");
let file = Some(FileLogging {
file_name: filename,
filter: LevelFilter::INFO,
});
set_up_tracing_opts(name, stdout, file)
.context("failed to set up tracing subscriber")?;

let mut builder = TracingBuilder::new(name);
if !quiet {
builder = builder.with_stdout("info,zenoh=warn");
}
builder = builder.with_file(filename, LevelFilter::INFO)?;
builder
.build()
.wrap_err("failed to set up tracing subscriber")?;
} }
Command::Runtime => { Command::Runtime => {
// Do not set the runtime in the cli. // Do not set the runtime in the cli.
} }
Command::Coordinator { quiet, .. } => { Command::Coordinator { quiet, .. } => {
let name = "dora-coordinator"; let name = "dora-coordinator";
let stdout = (!quiet).then_some("info");
let file = Some(FileLogging {
file_name: name.to_owned(),
filter: LevelFilter::INFO,
});
set_up_tracing_opts(name, stdout, file)
.context("failed to set up tracing subscriber")?;
let mut builder = TracingBuilder::new(name);
if !quiet {
builder = builder.with_stdout("info");
}
builder = builder.with_file(name, LevelFilter::INFO)?;
builder
.build()
.wrap_err("failed to set up tracing subscriber")?;
} }
Command::Run { .. } => { Command::Run { .. } => {
let log_level = std::env::var("RUST_LOG").ok().or(Some("info".to_string()));
set_up_tracing_opts("run", log_level.as_deref(), None)
.context("failed to set up tracing subscriber")?;
let log_level = std::env::var("RUST_LOG").ok().unwrap_or("info".to_string());
TracingBuilder::new("run")
.with_stdout(log_level)
.build()
.wrap_err("failed to set up tracing subscriber")?;
} }
_ => { _ => {
set_up_tracing("dora-cli").context("failed to set up tracing subscriber")?;
TracingBuilder::new("dora-cli")
.with_stdout("warn")
.build()
.wrap_err("failed to set up tracing subscriber")?;
} }
}; };


@@ -574,6 +588,62 @@ fn run_cli(args: Args) -> eyre::Result<()> {
} }
} }
} }
SelfSubCommand::Uninstall { force } => {
if !force {
let confirmed =
inquire::Confirm::new("Are you sure you want to uninstall Dora CLI?")
.with_default(false)
.prompt()
.wrap_err("Uninstallation cancelled")?;

if !confirmed {
println!("Uninstallation cancelled");
return Ok(());
}
}

println!("Uninstalling Dora CLI...");
#[cfg(feature = "python")]
{
println!("Detected Python installation...");

// Try uv pip uninstall first
let uv_status = std::process::Command::new("uv")
.args(["pip", "uninstall", "dora-rs-cli"])
.status();

if let Ok(status) = uv_status {
if status.success() {
println!("Dora CLI has been successfully uninstalled via uv pip.");
return Ok(());
}
}

// Fall back to regular pip uninstall
println!("Trying with pip...");
let status = std::process::Command::new("pip")
.args(["uninstall", "-y", "dora-rs-cli"])
.status()
.wrap_err("Failed to run pip uninstall")?;

if status.success() {
println!("Dora CLI has been successfully uninstalled via pip.");
} else {
bail!("Failed to uninstall Dora CLI via pip.");
}
}
#[cfg(not(feature = "python"))]
{
match self_replace::self_delete() {
Ok(_) => {
println!("Dora CLI has been successfully uninstalled.");
}
Err(e) => {
bail!("Failed to uninstall Dora CLI: {}", e);
}
}
}
}
}, },
}; };




+ 91
- 12
binaries/daemon/src/lib.rs View File

@@ -287,21 +287,99 @@ impl Daemon {
None => None, None => None,
}; };


let zenoh_config = match std::env::var(zenoh::Config::DEFAULT_CONFIG_PATH_ENV) {
Ok(path) => zenoh::Config::from_file(&path)
.map_err(|e| eyre!(e))
.wrap_err_with(|| format!("failed to read zenoh config from {path}"))?,
Err(std::env::VarError::NotPresent) => zenoh::Config::default(),
let zenoh_session = match std::env::var(zenoh::Config::DEFAULT_CONFIG_PATH_ENV) {
Ok(path) => {
let zenoh_config = zenoh::Config::from_file(&path)
.map_err(|e| eyre!(e))
.wrap_err_with(|| format!("failed to read zenoh config from {path}"))?;
zenoh::open(zenoh_config)
.await
.map_err(|e| eyre!(e))
.context("failed to open zenoh session")?
}
Err(std::env::VarError::NotPresent) => {
let mut zenoh_config = zenoh::Config::default();

if let Some(addr) = coordinator_addr {
// Linkstate make it possible to connect two daemons on different network through a public daemon
// TODO: There is currently a CI/CD Error in windows linkstate.
if cfg!(not(target_os = "windows")) {
zenoh_config
.insert_json5("routing/peer", r#"{ mode: "linkstate" }"#)
.unwrap();
}

zenoh_config
.insert_json5(
"connect/endpoints",
&format!(
r#"{{ router: ["tcp/[::]:7447"], peer: ["tcp/{}:5456"] }}"#,
addr.ip()
),
)
.unwrap();
zenoh_config
.insert_json5(
"listen/endpoints",
r#"{ router: ["tcp/[::]:7447"], peer: ["tcp/[::]:5456"] }"#,
)
.unwrap();
if cfg!(target_os = "macos") {
warn!("disabling multicast on macos systems. Enable it with the ZENOH_CONFIG env variable or file");
zenoh_config
.insert_json5("scouting/multicast", r#"{ enabled: false }"#)
.unwrap();
}
};
if let Ok(zenoh_session) = zenoh::open(zenoh_config).await {
zenoh_session
} else {
warn!(
"failed to open zenoh session, retrying with default config + coordinator"
);
let mut zenoh_config = zenoh::Config::default();
// Linkstate make it possible to connect two daemons on different network through a public daemon
// TODO: There is currently a CI/CD Error in windows linkstate.
if cfg!(not(target_os = "windows")) {
zenoh_config
.insert_json5("routing/peer", r#"{ mode: "linkstate" }"#)
.unwrap();
}

if let Some(addr) = coordinator_addr {
zenoh_config
.insert_json5(
"connect/endpoints",
&format!(
r#"{{ router: ["tcp/[::]:7447"], peer: ["tcp/{}:5456"] }}"#,
addr.ip()
),
)
.unwrap();
if cfg!(target_os = "macos") {
warn!("disabling multicast on macos systems. Enable it with the ZENOH_CONFIG env variable or file");
zenoh_config
.insert_json5("scouting/multicast", r#"{ enabled: false }"#)
.unwrap();
}
}
if let Ok(zenoh_session) = zenoh::open(zenoh_config).await {
zenoh_session
} else {
warn!("failed to open zenoh session, retrying with default config");
let zenoh_config = zenoh::Config::default();
zenoh::open(zenoh_config)
.await
.map_err(|e| eyre!(e))
.context("failed to open zenoh session")?
}
}
}
Err(std::env::VarError::NotUnicode(_)) => eyre::bail!( Err(std::env::VarError::NotUnicode(_)) => eyre::bail!(
"{} env variable is not valid unicode", "{} env variable is not valid unicode",
zenoh::Config::DEFAULT_CONFIG_PATH_ENV zenoh::Config::DEFAULT_CONFIG_PATH_ENV
), ),
}; };
let zenoh_session = zenoh::open(zenoh_config)
.await
.map_err(|e| eyre!(e))
.context("failed to open zenoh session")?;

let (dora_events_tx, dora_events_rx) = mpsc::channel(5); let (dora_events_tx, dora_events_rx) = mpsc::channel(5);
let daemon = Self { let daemon = Self {
logger: Logger { logger: Logger {
@@ -2210,10 +2288,11 @@ impl ProcessId {


pub fn kill(&mut self) -> bool { pub fn kill(&mut self) -> bool {
if let Some(pid) = self.0 { if let Some(pid) = self.0 {
let pid = Pid::from(pid as usize);
let mut system = sysinfo::System::new(); let mut system = sysinfo::System::new();
system.refresh_processes();
system.refresh_process(pid);


if let Some(process) = system.process(Pid::from(pid as usize)) {
if let Some(process) = system.process(pid) {
process.kill(); process.kill();
self.mark_as_stopped(); self.mark_as_stopped();
return true; return true;


binaries/daemon/src/spawn/mod.rs → binaries/daemon/src/spawn.rs View File

@@ -28,7 +28,7 @@ use dora_node_api::{
arrow_utils::{copy_array_into_sample, required_data_size}, arrow_utils::{copy_array_into_sample, required_data_size},
Metadata, Metadata,
}; };
use eyre::{ContextCompat, WrapErr};
use eyre::{bail, ContextCompat, WrapErr};
use std::{ use std::{
future::Future, future::Future,
path::{Path, PathBuf}, path::{Path, PathBuf},
@@ -201,14 +201,14 @@ impl Spawner {
{ {
let conda = which::which("conda").context( let conda = which::which("conda").context(
"failed to find `conda`, yet a `conda_env` was defined. Make sure that `conda` is available.", "failed to find `conda`, yet a `conda_env` was defined. Make sure that `conda` is available.",
)?;
)?;
let mut command = tokio::process::Command::new(conda); let mut command = tokio::process::Command::new(conda);
command.args([ command.args([
"run", "run",
"-n", "-n",
conda_env, conda_env,
"python", "python",
"-c",
"-uc",
format!("import dora; dora.start_runtime() # {}", node.id).as_str(), format!("import dora; dora.start_runtime() # {}", node.id).as_str(),
]); ]);
Some(command) Some(command)
@@ -234,20 +234,51 @@ impl Spawner {
}; };
// Force python to always flush stdout/stderr buffer // Force python to always flush stdout/stderr buffer
cmd.args([ cmd.args([
"-c",
"-uc",
format!("import dora; dora.start_runtime() # {}", node.id).as_str(), format!("import dora; dora.start_runtime() # {}", node.id).as_str(),
]); ]);
Some(cmd) Some(cmd)
} }
} else if python_operators.is_empty() && other_operators { } else if python_operators.is_empty() && other_operators {
let mut cmd = tokio::process::Command::new(
std::env::current_exe()
.wrap_err("failed to get current executable path")?,
);
cmd.arg("runtime");
Some(cmd)
let current_exe = std::env::current_exe()
.wrap_err("failed to get current executable path")?;
let mut file_name = current_exe.clone();
file_name.set_extension("");
let file_name = file_name
.file_name()
.and_then(|s| s.to_str())
.context("failed to get file name from current executable")?;

// Check if the current executable is a python binary meaning that dora is installed within the python environment
if file_name.ends_with("python") || file_name.ends_with("python3") {
// Use the current executable to spawn runtime
let python = get_python_path()
.wrap_err("Could not find python path when spawning custom node")?;
let mut cmd = tokio::process::Command::new(python);

tracing::info!(
"spawning: python -uc import dora; dora.start_runtime() # {}",
node.id
);

cmd.args([
"-uc",
format!("import dora; dora.start_runtime() # {}", node.id).as_str(),
]);
Some(cmd)
} else {
let mut cmd = tokio::process::Command::new(
std::env::current_exe()
.wrap_err("failed to get current executable path")?,
);
cmd.arg("runtime");
Some(cmd)
}
} else { } else {
eyre::bail!("Runtime can not mix Python Operator with other type of operator.");
bail!(
"Cannot spawn runtime with both Python and non-Python operators. \
Please use a single operator or ensure that all operators are Python-based."
);
}; };


let runtime_config = RuntimeConfig { let runtime_config = RuntimeConfig {

+ 9
- 5
binaries/runtime/src/lib.rs View File

@@ -5,15 +5,14 @@ use dora_core::{
descriptor::OperatorConfig, descriptor::OperatorConfig,
}; };
use dora_message::daemon_to_node::{NodeConfig, RuntimeConfig}; use dora_message::daemon_to_node::{NodeConfig, RuntimeConfig};
use dora_metrics::init_meter_provider;
use dora_metrics::run_metrics_monitor;
use dora_node_api::{DoraNode, Event}; use dora_node_api::{DoraNode, Event};
use dora_tracing::TracingBuilder;
use eyre::{bail, Context, Result}; use eyre::{bail, Context, Result};
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
use futures_concurrency::stream::Merge; use futures_concurrency::stream::Merge;
use operator::{run_operator, OperatorEvent, StopReason}; use operator::{run_operator, OperatorEvent, StopReason};


#[cfg(feature = "tracing")]
use dora_tracing::set_up_tracing;
use std::{ use std::{
collections::{BTreeMap, BTreeSet, HashMap}, collections::{BTreeMap, BTreeSet, HashMap},
mem, mem,
@@ -37,7 +36,12 @@ pub fn main() -> eyre::Result<()> {
} = config; } = config;
let node_id = config.node_id.clone(); let node_id = config.node_id.clone();
#[cfg(feature = "tracing")] #[cfg(feature = "tracing")]
set_up_tracing(node_id.as_ref()).context("failed to set up tracing subscriber")?;
{
TracingBuilder::new(node_id.as_ref())
.with_stdout("warn")
.build()
.wrap_err("failed to set up tracing subscriber")?;
}


let dataflow_descriptor = config.dataflow_descriptor.clone(); let dataflow_descriptor = config.dataflow_descriptor.clone();


@@ -123,7 +127,7 @@ async fn run(
init_done: oneshot::Receiver<Result<()>>, init_done: oneshot::Receiver<Result<()>>,
) -> eyre::Result<()> { ) -> eyre::Result<()> {
#[cfg(feature = "metrics")] #[cfg(feature = "metrics")]
let _meter_provider = init_meter_provider(config.node_id.to_string());
let _meter_provider = run_metrics_monitor(config.node_id.to_string());
init_done init_done
.await .await
.wrap_err("the `init_done` channel was closed unexpectedly")? .wrap_err("the `init_done` channel was closed unexpectedly")?


+ 62
- 0
examples/av1-encoding/dataflow.yml View File

@@ -0,0 +1,62 @@
nodes:
- id: camera
build: pip install ../../node-hub/opencv-video-capture
path: opencv-video-capture
_unstable_deploy:
machine: encoder
inputs:
tick: dora/timer/millis/50
outputs:
- image
env:
CAPTURE_PATH: 0
IMAGE_WIDTH: 1280
IMAGE_HEIGHT: 720

- id: rav1e-local
path: dora-rav1e
build: cargo build -p dora-rav1e --release
_unstable_deploy:
machine: encoder
inputs:
image: camera/image
outputs:
- image

- id: dav1d-remote
path: dora-dav1d
build: cargo build -p dora-dav1d --release
_unstable_deploy:
machine: decoder
inputs:
image: rav1e-local/image
outputs:
- image

- id: rav1e-remote
path: dora-rav1e
build: cargo build -p dora-rav1e --release
_unstable_deploy:
machine: decoder
inputs:
image: dav1d-remote/image
outputs:
- image

- id: dav1d-local
path: dora-dav1d
build: cargo build -p dora-dav1d --release
_unstable_deploy:
machine: encoder
inputs:
image: rav1e-remote/image
outputs:
- image

- id: plot
build: pip install -e ../../node-hub/dora-rerun
_unstable_deploy:
machine: encoder
path: dora-rerun
inputs:
image_decode: dav1d-local/image

+ 68
- 0
examples/av1-encoding/dataflow_reachy.yml View File

@@ -0,0 +1,68 @@
nodes:
- id: camera
path: dora-reachy2-camera
_unstable_deploy:
machine: encoder
inputs:
tick: dora/timer/millis/50
outputs:
- image_right
- image_left
- image_depth
- depth
env:
CAPTURE_PATH: 0
IMAGE_WIDTH: 640
IMAGE_HEIGHT: 480
ROBOT_IP: 127.0.0.1

- id: rav1e-local
path: dora-rav1e
build: cargo build -p dora-rav1e --release
_unstable_deploy:
machine: encoder
inputs:
depth: camera/depth
outputs:
- depth
env:
RAV1E_SPEED: 7

- id: rav1e-local-image
path: dora-rav1e
build: cargo build -p dora-rav1e --release
_unstable_deploy:
machine: encoder
inputs:
image_depth: camera/image_depth
image_left: camera/image_left
outputs:
- image_left
- image_depth
- depth
env:
RAV1E_SPEED: 10

- id: dav1d-remote
path: dora-dav1d
build: cargo build -p dora-dav1d --release
_unstable_deploy:
machine: plot
inputs:
image_depth: rav1e-local-image/image_depth
image_left: rav1e-local-image/image_left
depth: rav1e-local/depth
outputs:
- image_left
- image_depth
- depth

- id: plot
build: pip install -e ../../node-hub/dora-rerun
_unstable_deploy:
machine: plot
path: dora-rerun
inputs:
image: dav1d-remote/image_depth
depth: dav1d-remote/depth
image_left: dav1d-remote/image_left

+ 54
- 0
examples/av1-encoding/ios-dev.yaml View File

@@ -0,0 +1,54 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/dora-ios-lidar
path: dora-ios-lidar
inputs:
tick: dora/timer/millis/20
outputs:
- image
- depth
env:
IMAGE_WIDTH: 1280
IMAGE_HEIGHT: 720
ROTATE: ROTATE_90_CLOCKWISE

- id: rav1e-local
path: dora-rav1e
build: cargo build -p dora-rav1e --release
inputs:
image: camera/image
outputs:
- image
env:
RAV1E_SPEED: 10

- id: rav1e-local-depth
path: dora-rav1e
build: cargo build -p dora-rav1e --release
inputs:
depth: camera/depth
outputs:
- depth
env:
RAV1E_SPEED: 10
- id: dav1d-local-depth
path: dora-dav1d
build: cargo build -p dora-dav1d --release
inputs:
depth: rav1e-local-depth/depth
outputs:
- depth
- id: dav1d-local
path: dora-dav1d
build: cargo build -p dora-dav1d --release
inputs:
image: rav1e-local/image
outputs:
- image

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
image: dav1d-local/image
depth: dav1d-local-depth/depth

+ 3
- 1
examples/benchmark/dataflow.yml View File

@@ -11,4 +11,6 @@ nodes:
path: ../../target/release/benchmark-example-sink path: ../../target/release/benchmark-example-sink
inputs: inputs:
latency: rust-node/latency latency: rust-node/latency
throughput: rust-node/throughput
throughput:
source: rust-node/throughput
queue_size: 1000

+ 18
- 27
examples/benchmark/node/src/main.rs View File

@@ -1,7 +1,6 @@
use dora_node_api::{self, dora_core::config::DataId, DoraNode}; use dora_node_api::{self, dora_core::config::DataId, DoraNode};
use eyre::{Context, ContextCompat};
use rand::Rng;
use std::collections::HashMap;
use eyre::Context;
use rand::RngCore;
use std::time::Duration; use std::time::Duration;
use tracing_subscriber::Layer; use tracing_subscriber::Layer;


@@ -25,26 +24,17 @@ fn main() -> eyre::Result<()> {
1000 * 4096, 1000 * 4096,
]; ];


let mut data = HashMap::new();
for size in sizes {
let vec: Vec<u8> = rand::thread_rng()
.sample_iter(rand::distributions::Standard)
.take(size)
.collect();

data.insert(size, vec);
}
let data = sizes.map(|size| {
let mut data = vec![0u8; size];
rand::thread_rng().fill_bytes(&mut data);
data
});


// test latency first // test latency first
for size in sizes {
for _ in 0..100 {
let data = data.get(&size).wrap_err(eyre::Report::msg(format!(
"data not found for size {}",
size
)))?;

for data in &data {
for _ in 0..1 {
node.send_output_raw(latency.clone(), Default::default(), data.len(), |out| { node.send_output_raw(latency.clone(), Default::default(), data.len(), |out| {
out.copy_from_slice(data);
out.copy_from_slice(&data);
})?; })?;


// sleep a bit to avoid queue buildup // sleep a bit to avoid queue buildup
@@ -56,17 +46,18 @@ fn main() -> eyre::Result<()> {
std::thread::sleep(Duration::from_secs(2)); std::thread::sleep(Duration::from_secs(2));


// then throughput with full speed // then throughput with full speed
for size in sizes {
for data in &data {
for _ in 0..100 { for _ in 0..100 {
let data = data.get(&size).wrap_err(eyre::Report::msg(format!(
"data not found for size {}",
size
)))?;

node.send_output_raw(throughput.clone(), Default::default(), data.len(), |out| { node.send_output_raw(throughput.clone(), Default::default(), data.len(), |out| {
out.copy_from_slice(data);
out.copy_from_slice(&data);
})?; })?;
} }
// notify sink that all messages have been sent
node.send_output_raw(throughput.clone(), Default::default(), 1, |out| {
out.copy_from_slice(&[1]);
})?;

std::thread::sleep(Duration::from_secs(2));
} }


Ok(()) Ok(())


+ 2
- 3
examples/benchmark/sink/src/main.rs View File

@@ -24,7 +24,8 @@ fn main() -> eyre::Result<()> {
// check if new size bracket // check if new size bracket
let data_len = data.len(); let data_len = data.len();
if data_len != current_size { if data_len != current_size {
if n > 0 {
// data of length 1 is used to sync
if n > 0 && current_size != 1 {
record_results(start, current_size, n, latencies, latency); record_results(start, current_size, n, latencies, latency);
} }
current_size = data_len; current_size = data_len;
@@ -63,8 +64,6 @@ fn main() -> eyre::Result<()> {
} }
} }


record_results(start, current_size, n, latencies, latency);

Ok(()) Ok(())
} }




+ 11
- 12
examples/cuda-benchmark/demo_receiver.py View File

@@ -1,7 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python
"""TODO: Add docstring.""" """TODO: Add docstring."""



import os import os
import time import time


@@ -9,7 +8,7 @@ import numpy as np
import pyarrow as pa import pyarrow as pa
import torch import torch
from dora import Node from dora import Node
from dora.cuda import cudabuffer_to_torch, ipc_buffer_to_ipc_handle
from dora.cuda import ipc_buffer_to_ipc_handle, open_ipc_handle
from helper import record_results from helper import record_results
from tqdm import tqdm from tqdm import tqdm


@@ -17,7 +16,6 @@ torch.tensor([], device="cuda")




pa.array([]) pa.array([])
context = pa.cuda.Context()
node = Node("node_2") node = Node("node_2")


current_size = 8 current_size = 8
@@ -29,8 +27,6 @@ DEVICE = os.getenv("DEVICE", "cuda")


NAME = f"dora torch {DEVICE}" NAME = f"dora torch {DEVICE}"


ctx = pa.cuda.Context()

print() print()
print("Receiving 40MB packets using default dora-rs") print("Receiving 40MB packets using default dora-rs")


@@ -49,13 +45,13 @@ while True:
if event["metadata"]["device"] != "cuda": if event["metadata"]["device"] != "cuda":
# BEFORE # BEFORE
handle = event["value"].to_numpy() handle = event["value"].to_numpy()
scope = None
torch_tensor = torch.tensor(handle, device="cuda") torch_tensor = torch.tensor(handle, device="cuda")
else: else:
# AFTER # AFTER
# storage needs to be spawned in the same file as where it's used. Don't ask me why.
ipc_handle = ipc_buffer_to_ipc_handle(event["value"])
cudabuffer = ctx.open_ipc_buffer(ipc_handle)
torch_tensor = cudabuffer_to_torch(cudabuffer, event["metadata"]) # on cuda
ipc_handle = ipc_buffer_to_ipc_handle(event["value"], event["metadata"])
scope = open_ipc_handle(ipc_handle, event["metadata"])
torch_tensor = scope.__enter__()
else: else:
break break
t_received = time.perf_counter_ns() t_received = time.perf_counter_ns()
@@ -73,6 +69,9 @@ while True:
latencies = [] latencies = []
n += 1 n += 1


if scope:
scope.__exit__(None, None, None)



mean_cuda = np.array(latencies).mean() mean_cuda = np.array(latencies).mean()
pbar.close() pbar.close()
@@ -81,9 +80,9 @@ time.sleep(2)


print() print()
print("----") print("----")
print(f"Node communication duration with default dora-rs: {mean_cpu/1000:.1f}ms")
print(f"Node communication duration with dora CUDA->CUDA: {mean_cuda/1000:.1f}ms")
print(f"Node communication duration with default dora-rs: {mean_cpu / 1000:.1f}ms")
print(f"Node communication duration with dora CUDA->CUDA: {mean_cuda / 1000:.1f}ms")


print("----") print("----")
print(f"Speed Up: {(mean_cpu)/(mean_cuda):.0f}")
print(f"Speed Up: {(mean_cpu) / (mean_cuda):.0f}")
record_results(NAME, current_size, latencies) record_results(NAME, current_size, latencies)

+ 8
- 10
examples/cuda-benchmark/receiver.py View File

@@ -1,14 +1,13 @@
#!/usr/bin/env python #!/usr/bin/env python
"""TODO: Add docstring.""" """TODO: Add docstring."""



import os import os
import time import time


import pyarrow as pa import pyarrow as pa
import torch import torch
from dora import Node from dora import Node
from dora.cuda import cudabuffer_to_torch, ipc_buffer_to_ipc_handle
from dora.cuda import ipc_buffer_to_ipc_handle, open_ipc_handle
from helper import record_results from helper import record_results
from tqdm import tqdm from tqdm import tqdm


@@ -17,7 +16,6 @@ torch.tensor([], device="cuda")


pa.array([]) pa.array([])
pbar = tqdm(total=100) pbar = tqdm(total=100)
context = pa.cuda.Context()
node = Node("node_2") node = Node("node_2")




@@ -29,8 +27,6 @@ DEVICE = os.getenv("DEVICE", "cuda")


NAME = f"dora torch {DEVICE}" NAME = f"dora torch {DEVICE}"


ctx = pa.cuda.Context()

while True: while True:
event = node.next() event = node.next()
if event["type"] == "INPUT": if event["type"] == "INPUT":
@@ -40,12 +36,12 @@ while True:
# BEFORE # BEFORE
handle = event["value"].to_numpy() handle = event["value"].to_numpy()
torch_tensor = torch.tensor(handle, device="cuda") torch_tensor = torch.tensor(handle, device="cuda")
scope = None
else: else:
# AFTER # AFTER
# storage needs to be spawned in the same file as where it's used. Don't ask me why.
ipc_handle = ipc_buffer_to_ipc_handle(event["value"])
cudabuffer = ctx.open_ipc_buffer(ipc_handle)
torch_tensor = cudabuffer_to_torch(cudabuffer, event["metadata"]) # on cuda
ipc_handle = ipc_buffer_to_ipc_handle(event["value"], event["metadata"])
scope = open_ipc_handle(ipc_handle, event["metadata"])
torch_tensor = scope.__enter__()
else: else:
break break
t_received = time.perf_counter_ns() t_received = time.perf_counter_ns()
@@ -53,7 +49,6 @@ while True:


if length != current_size: if length != current_size:
if n > 0: if n > 0:

pbar.close() pbar.close()
pbar = tqdm(total=100) pbar = tqdm(total=100)
record_results(NAME, current_size, latencies) record_results(NAME, current_size, latencies)
@@ -69,4 +64,7 @@ while True:
n += 1 n += 1
i += 1 i += 1


if scope:
scope.__exit__(None, None, None)

record_results(NAME, current_size, latencies) record_results(NAME, current_size, latencies)

+ 3
- 0
examples/depth_camera/ios-dev.yaml View File

@@ -7,6 +7,9 @@ nodes:
outputs: outputs:
- image - image
- depth - depth
env:
IMAGE_WIDTH: 640
IMAGE_HEIGHT: 480


- id: plot - id: plot
build: pip install -e ../../node-hub/dora-rerun build: pip install -e ../../node-hub/dora-rerun


+ 11
- 0
examples/mediapipe/README.md View File

@@ -0,0 +1,11 @@
# Mediapipe example

## Make sure to have a webcam connected

```bash
uv venv --seed
dora build rgb-dev.yml --uv
dora run rgb-dev.yml --uv

## If the points are not plotted by default, you should try to add a 2d viewer within rerun.
```

+ 26
- 0
examples/mediapipe/realsense-dev.yml View File

@@ -0,0 +1,26 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/dora-pyrealsense
path: dora-pyrealsense
inputs:
tick: dora/timer/millis/100
outputs:
- image
- depth

- id: dora-mediapipe
build: pip install -e ../../node-hub/dora-mediapipe
path: dora-mediapipe
inputs:
image: camera/image
depth: camera/depth
outputs:
- points3d

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
realsense/image: camera/image
realsense/depth: camera/depth
realsense/points3d: dora-mediapipe/points3d

+ 26
- 0
examples/mediapipe/rgb-dev.yml View File

@@ -0,0 +1,26 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/opencv-video-capture
path: opencv-video-capture
inputs:
tick: dora/timer/millis/100
outputs:
- image
env:
CAPTURE_PATH: 0

- id: dora-mediapipe
build: pip install -e ../../node-hub/dora-mediapipe
path: dora-mediapipe
inputs:
image: camera/image
outputs:
- points2d

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
image: camera/image
# Make sure to add a 2d viewer to see the points
points2d: dora-mediapipe/points2d

+ 250
- 0
examples/reachy2-remote/dataflow_reachy.yml View File

@@ -0,0 +1,250 @@
nodes:
- id: camera
path: dora-reachy2-camera
_unstable_deploy:
machine: encoder
inputs:
tick: dora/timer/millis/20
outputs:
- image_left
- image_depth
- depth
env:
IMAGE_WIDTH: 640
IMAGE_HEIGHT: 480
ROBOT_IP: 127.0.0.1

- id: reachy-left-arm
build: pip install -e ../../node-hub/dora-reachy2
path: dora-reachy2-left-arm
_unstable_deploy:
machine: encoder
inputs:
pose: parse_pose/action_l_arm
outputs:
- response_l_arm
env:
ROBOT_IP: 127.0.0.1

- id: reachy-right-arm
build: pip install -e ../../node-hub/dora-reachy2
path: dora-reachy2-right-arm
_unstable_deploy:
machine: encoder
inputs:
pose: parse_pose/action_r_arm
outputs:
- response_r_arm
env:
ROBOT_IP: 127.0.0.1

- id: rav1e-local-image
path: dora-rav1e
build: cargo build -p dora-rav1e --release
_unstable_deploy:
machine: encoder
inputs:
image_depth: camera/image_depth
image_left: camera/image_left
outputs:
- image_left
- image_depth
env:
RAV1E_SPEED: 10

- id: rav1e-local-depth
path: dora-rav1e
build: cargo build -p dora-rav1e --release
_unstable_deploy:
machine: encoder
inputs:
depth: camera/depth
outputs:
- depth
env:
RAV1E_SPEED: 7

- id: dav1d-remote
path: dora-dav1d
build: cargo build -p dora-dav1d --release
_unstable_deploy:
machine: gpu
inputs:
image_depth: rav1e-local-image/image_depth
image_left: rav1e-local-image/image_left
depth: rav1e-local-depth/depth
outputs:
- image_left
- image_depth
- depth

- id: dora-microphone
build: pip install -e ../../node-hub/dora-microphone
path: dora-microphone
_unstable_deploy:
machine: macbook
inputs:
tick: dora/timer/millis/2000
outputs:
- audio

- id: dora-vad
build: pip install -e ../../node-hub/dora-vad
_unstable_deploy:
machine: macbook
path: dora-vad
inputs:
audio: dora-microphone/audio
outputs:
- audio

- id: dora-distil-whisper
build: pip install -e ../../node-hub/dora-distil-whisper
_unstable_deploy:
machine: gpu
path: dora-distil-whisper
inputs:
input: dora-vad/audio
outputs:
- text
env:
TARGET_LANGUAGE: english

- id: parse_whisper
path: parse_whisper.py
_unstable_deploy:
machine: gpu
inputs:
text: dora-distil-whisper/text
arrived: parse_point/arrived
outputs:
- bbox
- action
- points
- text
- action_release_left
- action_release_right
env:
IMAGE_RESIZE_RATIO: "1.0"

- id: dora-qwenvl
build: pip install -e ../../node-hub/dora-qwen2-5-vl
path: dora-qwen2-5-vl
_unstable_deploy:
machine: gpu
inputs:
image_left: dav1d-remote/image_left
image_depth: dav1d-remote/image_depth
text: parse_whisper/text
outputs:
- text
env:
DEFAULT_QUESTION: Output the bounding box of the suitcase.
IMAGE_RESIZE_RATIO: "1.0"

- id: parse_bbox
path: parse_bbox.py
_unstable_deploy:
machine: gpu
inputs:
text: dora-qwenvl/text
points: parse_whisper/points
outputs:
- bbox_track
- bbox_grab
env:
IMAGE_RESIZE_RATIO: "1.0"

- id: sam2
build: pip install -e ../../node-hub/dora-sam2
path: dora-sam2
_unstable_deploy:
machine: gpu
inputs:
image_depth: dav1d-remote/image_depth
boxes2d: parse_bbox/bbox_grab
outputs:
- masks

- id: tracker
build: pip install -e ../../node-hub/dora-cotracker
path: dora-cotracker
_unstable_deploy:
machine: gpu
inputs:
image: dav1d-remote/image_left
boxes2d: parse_bbox/bbox_track
outputs:
- tracked_image
- points
env:
INTERACTIVE_MODE: false

- id: box_coordinates
build: pip install -e ../../node-hub/dora-object-to-pose
path: dora-object-to-pose
_unstable_deploy:
machine: gpu
inputs:
depth: dav1d-remote/depth
masks: sam2/masks
outputs:
- pose

- id: parse_pose
path: parse_pose.py
_unstable_deploy:
machine: gpu
inputs:
pose: box_coordinates/pose
response_r_arm: reachy-right-arm/response_r_arm
response_l_arm: reachy-left-arm/response_l_arm
release_left: parse_whisper/action_release_left
release_right: parse_whisper/action_release_right
outputs:
- action_r_arm
- action_l_arm
env:
IMAGE_RESIZE_RATIO: "1.0"

- id: parse_point
path: parse_point.py
_unstable_deploy:
machine: gpu
inputs:
points: tracker/points
outputs:
- action
- arrived
env:
IMAGE_RESIZE_RATIO: "1.0"

- id: reachy-mobile-base
build: pip install -e ../../node-hub/dora-reachy2
path: dora-reachy2-mobile-base
_unstable_deploy:
machine: encoder
inputs:
action_base: parse_point/action
action_whisper: parse_whisper/action
outputs:
- response_base
env:
ROBOT_IP: 127.0.0.1

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
_unstable_deploy:
machine: macbook
inputs:
image: dav1d-remote/image_left
torso/image: dav1d-remote/image_depth
torso/depth: dav1d-remote/depth
torso/boxes2d: parse_bbox/bbox
original_text: dora-distil-whisper/text
parsed_text: parse_whisper/text
qwenvl_text: dora-qwenvl/text
env:
RERUN_MEMORY_LIMIT: 5%
CAMERA_PITCH: 2.47

+ 76
- 0
examples/reachy2-remote/parse_bbox.py View File

@@ -0,0 +1,76 @@
"""TODO: Add docstring."""

import json
import os

import numpy as np
import pyarrow as pa
from dora import Node

node = Node()

IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0"))


def extract_bboxes(json_text):
"""Extract bounding boxes from a JSON string with markdown markers and return them as a NumPy array.

Parameters
----------
json_text : str
JSON string containing bounding box data, including ```json markers.

Returns
-------
np.ndarray: NumPy array of bounding boxes.

"""
# Ensure all lines are stripped of whitespace and markers
lines = json_text.strip().splitlines()

# Filter out lines that are markdown markers
clean_lines = [line for line in lines if not line.strip().startswith("```")]

# Join the lines back into a single string
clean_text = "\n".join(clean_lines)
# Parse the cleaned JSON text
try:
data = json.loads(clean_text)

# Extract bounding boxes
bboxes = [item["bbox_2d"] for item in data]
labels = [item["label"] for item in data]

return np.array(bboxes), np.array(labels)
except Exception as _e: # noqa
pass
return None, None


for event in node:
if event["type"] == "INPUT":
if len(event["value"]) == 0:
node.send_output("bbox_track", pa.array([]))
continue

text = event["value"][0].as_py()
metadata = event["metadata"]
image_id = event["metadata"]["image_id"]

bboxes, labels = extract_bboxes(text)
if bboxes is not None and len(bboxes) > 0:
bboxes = bboxes * int(1 / IMAGE_RESIZE_RATIO)
metadata["image_id"] = image_id
metadata["encoding"] = "xyxy"
if image_id == "image_left":
node.send_output(
"bbox_track",
pa.array(bboxes.ravel()),
metadata,
)
elif image_id == "image_depth":
node.send_output(
"bbox_grab",
pa.array(bboxes.ravel()),
metadata,
)

+ 62
- 0
examples/reachy2-remote/parse_point.py View File

@@ -0,0 +1,62 @@
"""TODO: Add docstring."""

import json
import os
import time

import numpy as np
import pyarrow as pa
from dora import Node

node = Node()

IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0"))

arrive_time = time.time()

for event in node:
if event["type"] == "INPUT":
text = event["value"][0].as_py()
width = event["metadata"]["width"]
height = event["metadata"]["height"]
values = event["value"].to_numpy().reshape((-1, 2))
values = values * int(1 / IMAGE_RESIZE_RATIO)

# Do point 0 first
if len(values) == 0:
continue
elif len(values) > 1:
point = values[-1]

rz = int((width / 2) - point[0]) / (width / 2)
x_distance = min(height, height - point[1])
y = 0
if abs(rz) > 0.75:
rz = np.deg2rad(45) * np.sign(rz)
elif abs(rz) > 0.5:
rz = np.deg2rad(30) * np.sign(rz)
elif abs(rz) > 0.3:
rz = np.deg2rad(20) * np.sign(rz)
elif abs(rz) > 0.1:
rz = np.deg2rad(10) * np.sign(rz)
else:
x = 0

if x_distance > (height * 0.7):
x = 0.7
elif x_distance > (height * 0.5):
x = 0.6
elif x_distance > (height * 0.25):
x = 0.5
else:
x = 0
if x_distance < (height * 0.25):
print("ARRIVED!")
time.sleep(1.0)
if time.time() - arrive_time > 4.0:
node.send_output("arrived", pa.array([]))
arrive_time = time.time()
# Action
action = pa.array([x, y, 0, 0, 0, rz])
node.send_output("action", action)

+ 300
- 0
examples/reachy2-remote/parse_pose.py View File

@@ -0,0 +1,300 @@
"""TODO: Add docstring."""

import json
import os

import numpy as np
import pyarrow as pa
from dora import Node

node = Node()

IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0"))


l_init_pose = [
-7.0631310641087435,
-10.432298603362307,
24.429809104404114,
-132.15000828778648,
-1.5494749438811133,
-21.749917789205202,
8.099312596108344,
100,
]
r_init_pose = [
-5.60273587426976,
10.780818397272316,
-27.868146823156042,
-126.15650363072193,
3.961108018106834,
-35.43682799906162,
350.9236448374495,
100,
]
r_release_closed_pose = [
-26.1507947940993,
12.16735021387949,
-2.2657319092611976,
-97.63648867582175,
-19.91084837404425,
22.10184328619011,
366.71351223614494,
0,
]

r_release_opened_pose = [
-26.1507947940993,
12.16735021387949,
-2.2657319092611976,
-97.63648867582175,
-19.91084837404425,
22.10184328619011,
366.71351223614494,
100,
]

l_release_opened_pose = [
-30.04330081906935,
-7.415231584691132,
3.6972339048071468,
-97.7274736257555,
12.996718740452982,
30.838020649757016,
-1.5572310505704858,
0,
]

l_release_closed_pose = [
-30.04330081906935,
-7.415231584691132,
3.6972339048071468,
-97.7274736257555,
12.996718740452982,
30.838020649757016,
-1.5572310505704858,
100,
]


def wait_for_event(id, timeout=None, cache={}):
"""TODO: Add docstring."""
while True:
event = node.next(timeout=timeout)
if event is None:
cache["finished"] = True
return None, cache
if event["type"] == "INPUT":
cache[event["id"]] = event["value"]
if event["id"] == id:
return event["value"], cache

elif event["type"] == "ERROR":
return None, cache


arm_holding_object = None
cache = {}


## ---- INIT ---
node.send_output(
"action_r_arm",
pa.array(r_init_pose),
metadata={"encoding": "jointstate", "duration": 2},
)
node.send_output(
"action_l_arm",
pa.array(l_init_pose),
metadata={"encoding": "jointstate", "duration": 2},
)
node.send_output("look", pa.array([0.35, 0, 0]))


for event in node:
if event["type"] == "INPUT":
if event["id"] == "pose":
values = event["value"]
values = values.to_numpy()
print("Pose: ", values)
if len(values) == 0:
continue
x = values[0]
y = values[1]
z = values[2]
action = event["metadata"]["action"]

match action:
case "grab":
if len(values) == 0:
continue
x = x + 0.01

## Clip the Maximum and minim values for the height of the arm to avoid collision or weird movement.
trajectory = np.array(
[
[x, y, -0.16, 0, 0, 0, 100],
[x, y, z, 0, 0, 0, 0],
[x, y, -0.16, 0, 0, 0, 0],
],
).ravel()

if y < 0:
node.send_output(
"action_r_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.75"},
)
event = wait_for_event(id="response_r_arm", timeout=5)
if event is not None and event[0]:
print("Success")
arm_holding_object = "right"
node.send_output(
"action_r_arm",
pa.array([0.1, -0.2, -0.16, 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "1"},
)
else:
print("Failed: x: ", x, " y: ", y, " z: ", z)
node.send_output(
"action_r_arm",
pa.array(r_init_pose),
metadata={"encoding": "jointstate", "duration": "1"},
)
event = wait_for_event(id="response_r_arm")
else:
y += 0.03
node.send_output(
"action_l_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.75"},
)
event = wait_for_event(id="response_l_arm", timeout=5)
if event is not None and event[0]:
print("Success")
arm_holding_object = "left"
node.send_output(
"action_l_arm",
pa.array([0.1, 0.2, -0.16, 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "1"},
)
else:
print("Failed")
node.send_output(
"action_l_arm",
pa.array(l_init_pose),
metadata={"encoding": "jointstate", "duration": "1"},
)
event = wait_for_event(id="response_l_arm")
case "release":
if len(values) == 0:
continue
x = x + 0.01
if z < -0.4:
z = -0.16

## Clip the Maximum and minim values for the height of the arm to avoid collision or weird movement.
trajectory = np.array(
[
[x, y, z + 0.1, 0, 0, 0, 100],
],
).ravel()

if arm_holding_object is None:
continue
elif arm_holding_object == "right":
node.send_output(
"action_r_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.75"},
)
event = wait_for_event(id="response_r_arm", timeout=5)
if event is not None and event[0]:
print("Success release right with", event[0])
arm_holding_object = "right"
node.send_output(
"action_r_arm",
pa.array(r_init_pose),
metadata={"encoding": "jointstate", "duration": 1},
)
arm_holding_object = None
else:
print("Failed: x: ", x, " y: ", y, " z: ", z)
node.send_output(
"action_r_arm",
pa.array(r_init_pose),
metadata={"encoding": "jointstate", "duration": "1"},
)
event = wait_for_event(id="response_r_arm")
else:
y += 0.03
node.send_output(
"action_l_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.75"},
)
event = wait_for_event(id="response_l_arm", timeout=5)
if event is not None and event[0]:
print("Success release left with", event[0])
arm_holding_object = "left"
node.send_output(
"action_l_arm",
pa.array(l_init_pose),
metadata={"encoding": "jointstate", "duration": 1},
)
arm_holding_object = None
else:
print("Failed")
node.send_output(
"action_l_arm",
pa.array(l_init_pose),
metadata={"encoding": "jointstate", "duration": "1"},
)
event = wait_for_event(id="response_l_arm")

elif event["id"] == "release_right":
node.send_output(
"action_r_arm",
pa.array(
[
0.4,
0,
-0.16,
0,
0,
0,
100,
],
),
metadata={"encoding": "xyzrpy", "duration": "0.75"},
)
event, cache = wait_for_event(id="response_r_arm", cache=cache)
if event is not None and event[0]:
node.send_output(
"action_r_arm",
pa.array(r_init_pose),
metadata={"encoding": "jointstate", "duration": 1},
)
elif event["id"] == "release_left":
node.send_output(
"action_l_arm",
pa.array(
[
0.4,
0,
-0.16,
0,
0,
0,
100,
],
),
metadata={"encoding": "xyzrpy", "duration": "0.75"},
)
event, cache = wait_for_event(id="response_l_arm", cache=cache)
if event is not None and event[0]:
node.send_output(
"action_l_arm",
pa.array(l_init_pose),
metadata={"encoding": "jointstate", "duration": 1},
)

+ 135
- 0
examples/reachy2-remote/parse_whisper.py View File

@@ -0,0 +1,135 @@
"""TODO: Add docstring."""

import json
import os
import time

import numpy as np
import pyarrow as pa
from dora import Node

node = Node()

IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0"))


def extract_bboxes(json_text):
"""Extract bounding boxes from a JSON string with markdown markers and return them as a NumPy array.

Parameters
----------
json_text : str
JSON string containing bounding box data, including ```json markers.

Returns
-------
np.ndarray: NumPy array of bounding boxes.

"""
# Ensure all lines are stripped of whitespace and markers
lines = json_text.strip().splitlines()

# Filter out lines that are markdown markers
clean_lines = [line for line in lines if not line.strip().startswith("```")]

# Join the lines back into a single string
clean_text = "\n".join(clean_lines)
# Parse the cleaned JSON text
try:
data = json.loads(clean_text)

# Extract bounding boxes
bboxes = [item["bbox_2d"] for item in data]
labels = [item["label"] for item in data]

return np.array(bboxes), np.array(labels)
except Exception as _e: # noqa
pass
return None, None

last_prompt = ""
for event in node:
if event["type"] == "INPUT":
if event["id"] == "text":
text = event["value"][0].as_py().lower()

if "stop" in text:
node.send_output("points", pa.array([], type=pa.float64()))
elif "follow" in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the given followed object"
node.send_output("text", pa.array([text]), {"image_id": "image_left"})
elif "grab " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the given grabbed object"
node.send_output(
"text", pa.array([text]), {"image_id": "image_depth", "action": "grab"}
)
elif "get " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the object"
node.send_output(
"text", pa.array([text]), {"image_id": "image_left", "action": "grab"}
)
last_prompt = text
elif "put " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the place to put the object"
node.send_output(
"text",
pa.array([text]),
{"image_id": "image_left", "action": "release"},
)
last_prompt = text
elif "drop " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the place to drop the object"
node.send_output(
"text",
pa.array([text]),
{"image_id": "image_depth", "action": "release"},
)
elif "release left" in text:
node.send_output("action_release_left", pa.array([1.0]))
elif "release right" in text:
node.send_output("action_release_right", pa.array([1.0]))
elif "turn left" in text:
action = pa.array([0.0, 0, 0, 0, 0, np.deg2rad(160)])
node.send_output("action", action)
time.sleep(0.25)
action = pa.array([0.0, 0, 0, 0, 0, np.deg2rad(160)])
node.send_output("action", action)
node.send_output("points", pa.array([]))
elif "turn right" in text:
action = pa.array([0.0, 0, 0, 0, 0, -np.deg2rad(160)])
node.send_output("action", action)
time.sleep(0.25)
action = pa.array([0.0, 0, 0, 0, 0, -np.deg2rad(160)])
node.send_output("action", action)
node.send_output("points", pa.array([]))
elif "move forward" in text:
action = pa.array([0.5, 0, 0, 0, 0, 0])
node.send_output("action", action)
time.sleep(0.25)
node.send_output("action", action)
node.send_output("points", pa.array([]))
elif "move backward" in text:
action = pa.array([-0.5, 0, 0, 0, 0, 0])
node.send_output("action", action)
time.sleep(0.25)
node.send_output("action", action)
node.send_output("points", pa.array([]))
elif event["id"] == "arrived":
text = last_prompt
print("received arrived message")
node.send_output("points", pa.array([]))
if "get " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the place to put the object"
node.send_output(
"text",
pa.array([text]),
{"image_id": "image_depth", "action": "grab"},
)
elif "put " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the place to put the object"
node.send_output(
"text",
pa.array([text]),
{"image_id": "image_depth", "action": "release"},
)

+ 42
- 0
examples/reachy2-remote/whisper-dev.yml View File

@@ -0,0 +1,42 @@
nodes:
- id: dora-microphone
build: pip install -e ../../node-hub/dora-microphone
path: dora-microphone
_unstable_deploy:
machine: macbook
inputs:
tick: dora/timer/millis/2000
outputs:
- audio

- id: dora-vad
build: pip install -e ../../node-hub/dora-vad
_unstable_deploy:
machine: macbook
path: dora-vad
inputs:
audio: dora-microphone/audio
outputs:
- audio

- id: dora-distil-whisper
build: pip install -e ../../node-hub/dora-distil-whisper
_unstable_deploy:
machine: macbook
path: dora-distil-whisper
inputs:
input: dora-vad/audio
outputs:
- text
env:
TARGET_LANGUAGE: english
# For China
# USE_MODELSCOPE_HUB: true

- id: dora-rerun
build: cargo build -p dora-rerun --release
_unstable_deploy:
machine: macbook
path: dora-rerun
inputs:
original_text: dora-distil-whisper/text

+ 1
- 1
examples/rerun-viewer/dataflow.yml View File

@@ -14,7 +14,7 @@ nodes:


- id: rerun - id: rerun
build: cargo build -p dora-rerun --release build: cargo build -p dora-rerun --release
path: dora-rerun
path: ../../target/release/dora-rerun
inputs: inputs:
image: camera/image image: camera/image
env: env:


+ 10
- 4
examples/rerun-viewer/run.rs View File

@@ -5,7 +5,7 @@ use std::path::Path;


#[tokio::main] #[tokio::main]
async fn main() -> eyre::Result<()> { async fn main() -> eyre::Result<()> {
set_up_tracing("python-dataflow-runner")?;
set_up_tracing("rerun-viewer-runner")?;


let root = Path::new(env!("CARGO_MANIFEST_DIR")); let root = Path::new(env!("CARGO_MANIFEST_DIR"));
std::env::set_current_dir(root.join(file!()).parent().unwrap()) std::env::set_current_dir(root.join(file!()).parent().unwrap())
@@ -13,18 +13,24 @@ async fn main() -> eyre::Result<()> {


let uv = get_uv_path().context("Could not get uv binary")?; let uv = get_uv_path().context("Could not get uv binary")?;


run(&uv, &["venv", "-p", "3.10", "--seed"], None)
run(&uv, &["venv", "-p", "3.11", "--seed"], None)
.await .await
.context("failed to create venv")?; .context("failed to create venv")?;
run( run(
&uv, &uv,
&["pip", "install", "-e", "../../apis/python/node", "--reinstall"],
&[
"pip",
"install",
"-e",
"../../apis/python/node",
"--reinstall",
],
None, None,
) )
.await .await
.context("Unable to install develop dora-rs API")?; .context("Unable to install develop dora-rs API")?;


let dataflow = Path::new("qwen2-5-vl-vision-only-dev.yml");
let dataflow = Path::new("dataflow.yml");
run_dataflow(dataflow).await?; run_dataflow(dataflow).await?;


Ok(()) Ok(())


+ 94
- 0
examples/so100-remote/README.md View File

@@ -0,0 +1,94 @@
# SO100 and SO101 Remote Example

## Hardware requirements

- Realsense Camera
- so101 robotic arm

## Download the 3D model of the SO100

```bash
[ -f "$HOME/Downloads/so100_urdf.zip" ] || (wget -O "$HOME/Downloads/so100_urdf.zip" https://huggingface.co/datasets/haixuantao/urdfs/resolve/main/so100/so100_urdf.zip && unzip -o "$HOME/Downloads/so100_urdf.zip" -d "$HOME/Downloads/so100_urdf")
```

## To get started

```bash
uv venv --seed
dora build no_torque.yml --uv
```

## Make sure that both realsense and robotic arm connected

On linux, for the arm you can check connection with:

```bash
ls /dev/ttyACM*
```

This should show something like:

```bash
/dev/ttyACM0
```

Make sure to enable read with:

```bash
sudo chmod 777 /dev/ttyACM0
```

On linux, For the camera, make sure to have it well connected and check with:

```bash
ls /dev/video**
```

Result should be as follows:

```bash
/dev/video0 /dev/video2 /dev/video4 /dev/video6 /dev/video8
/dev/video1 /dev/video3 /dev/video5 /dev/video7 /dev/video9
```

## To run the no torque demo:

```bash
dora run no_torque.yml --uv
```

If the placement of the virtual robot arm is wrong, you can move it using the so100_transform environment configuration.

## To run the qwenvl demo:

```bash
dora run qwenvl.yml --uv
```

## To run the qwenvl remote demo:

On a remote machine:

```bash
dora coordinator &
dora daemon --machine-id gpu
```

```bash
dora daemon --coordinator-addr <IP_COORDINATOR_ADDR>
dora start qwenvl-remote.yml --uv --coordinator-addr <IP_COORDINATOR_ADDR>
```

## To run the qwenvl compression demo:

On a remote machine:

```bash
dora coordinator &
dora daemon --machine-id gpu
```

```bash
dora daemon --coordinator-addr <IP_COORDINATOR_ADDR>
dora start qwenvl-compression.yml --uv --coordinator-addr <IP_COORDINATOR_ADDR>
```

+ 50
- 0
examples/so100-remote/no_torque.yml View File

@@ -0,0 +1,50 @@
nodes:
- id: so100
path: dora-rustypot
build: pip install -e ../../node-hub/dora-rustypot
inputs:
tick: dora/timer/millis/33
#pose: pytorch-kinematics/action
outputs:
- pose
env:
PORT: /dev/ttyACM0
IDS: 1 2 3 4 5 6

- id: camera
build: pip install -e ../../node-hub/dora-pyrealsense
path: dora-pyrealsense
inputs:
tick: dora/timer/millis/33
outputs:
- image
- depth

- id: pytorch-kinematics
build: pip install -e ../../node-hub/dora-pytorch-kinematics
path: dora-pytorch-kinematics
inputs:
pose: so100/pose
outputs:
- pose
- action
env:
# Link to your installation of so100-urdf.
# https://huggingface.co/datasets/haixuantao/urdfs/resolve/main/so100/so100_urdf.zip
URDF_PATH: $HOME/Downloads/so100_urdf/so100.urdf
END_EFFECTOR_LINK: "Moving Jaw"
TRANSFORM: -0.2 -0.01 -0.57 0.7 0 0 0.7

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
jointstate_so100: so100/pose
camera/image: camera/image
camera/depth: camera/depth
env:
# Link to your installation of so100-urdf.
# https://huggingface.co/datasets/haixuantao/urdfs/resolve/main/so100/so100_urdf.zip
so100_urdf: $HOME/Downloads/so100_urdf/so100.urdf
so100_transform: -0.2 -0.01 -0.57 0.7 0 0 0.7
CAMERA_PITCH: -3.1415

+ 69
- 0
examples/so100-remote/parse_bbox.py View File

@@ -0,0 +1,69 @@
"""TODO: Add docstring."""

import json
import os

import numpy as np
import pyarrow as pa
from dora import Node

node = Node()

IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0"))


def extract_bboxes(json_text):
"""Extract bounding boxes from a JSON string with markdown markers and return them as a NumPy array.

Parameters
----------
json_text : str
JSON string containing bounding box data, including ```json markers.

Returns
-------
np.ndarray: NumPy array of bounding boxes.

"""
# Ensure all lines are stripped of whitespace and markers
lines = json_text.strip().splitlines()

# Filter out lines that are markdown markers
clean_lines = [line for line in lines if not line.strip().startswith("```")]

# Join the lines back into a single string
clean_text = "\n".join(clean_lines)
# Parse the cleaned JSON text
try:
data = json.loads(clean_text)

# Extract bounding boxes
bboxes = [item["bbox_2d"] for item in data]
labels = [item["label"] for item in data]

return np.array(bboxes), np.array(labels)
except Exception as _e: # noqa
pass
return None, None


for event in node:
if event["type"] == "INPUT":
if len(event["value"]) == 0:
node.send_output("bbox_track", pa.array([]))
continue

text = event["value"][0].as_py()
metadata = event["metadata"]
image_id = event["metadata"]["image_id"]

bboxes, labels = extract_bboxes(text)
if bboxes is not None and len(bboxes) > 0:
bboxes = bboxes * int(1 / IMAGE_RESIZE_RATIO)
metadata["image_id"] = image_id
metadata["encoding"] = "xyxy"
node.send_output(
"bbox",
pa.array(bboxes.ravel()),
metadata,
)

+ 161
- 0
examples/so100-remote/parse_pose.py View File

@@ -0,0 +1,161 @@
"""TODO: Add docstring."""

import time
import numpy as np
import pyarrow as pa
from dora import Node

node = Node()
top_z = -0.43
low_z = -0.57

roll = 1.86
pitch = 1.43
yaw_open = 0.8
yaw_close = -0.5


def grab(target_x, target_y, low_z, top_z, roll, pitch, yaw_open, yaw_close, last_x, last_y):

node.send_output(
"action",
pa.array([target_x, target_y, top_z, roll, pitch, yaw_open]),
metadata={"encoding": "xyzrpy"},
)

time.sleep(0.6)

node.send_output(
"action",
pa.array([target_x, target_y, low_z, roll, pitch, yaw_open]),
metadata={"encoding": "xyzrpy"},
)
time.sleep(0.2)


node.send_output(
"action",
pa.array([target_x, target_y, low_z, roll, pitch, yaw_close]),
metadata={"encoding": "xyzrpy"},
)

time.sleep(0.4)

node.send_output(
"action",
pa.array([target_x, target_y, top_z, roll, pitch, yaw_close]),
metadata={"encoding": "xyzrpy"},
)

time.sleep(0.5)

node.send_output(
"action",
pa.array([0.05, 0.0, top_z, roll, pitch, yaw_close]),
metadata={"encoding": "xyzrpy"},
)

def place(place_x, place_y, place_z, top_z, roll, pitch, yaw_open, yaw_close, last_x, last_y):


node.send_output(
"action",
pa.array([place_x, place_y, top_z, roll, pitch, yaw_close]),
metadata={"encoding": "xyzrpy"},
)

time.sleep(0.6)

node.send_output(
"action",
pa.array([place_x, place_y, place_z, roll, pitch, yaw_close]),
metadata={"encoding": "xyzrpy"},
)

time.sleep(0.2)


node.send_output(
"action",
pa.array([place_x, place_y, place_z, roll, pitch, yaw_open]),
metadata={"encoding": "xyzrpy"},
)
time.sleep(0.3)


node.send_output(
"action",
pa.array([place_x, place_y, top_z, roll, pitch, yaw_open]),
metadata={"encoding": "xyzrpy"},
)
time.sleep(0.3)

node.send_output(
"action",
pa.array([0.05, 0.0, top_z, roll, pitch, yaw_open]),
metadata={"encoding": "xyzrpy"},
)
time.sleep(0.6)


node.send_output(
"action",
pa.array([0.05, 0.0, top_z, roll, pitch, yaw_open]),
metadata={"encoding": "xyzrpy"},
)

last_x = 0
last_y = 0
last_z = 0

for event in node:
if event["type"] == "INPUT":
if event["id"] == "pose":
values = event["value"]
values = values.to_numpy()
print(values)
if len(values) == 0:
continue
x = values[0]
y = values[1]
z = values[2]
action = event["metadata"]["action"]

# Adjust z with the size of the gripper
z = z + 0.06
match action:
case "grab":
y = y + -0.01
grab(
x,
y,
z,
top_z,
roll,
pitch,
yaw_open,
yaw_close,
last_x,
last_y
)
case "release":
place(
x,
y,
z,
top_z,
roll,
pitch,
yaw_open,
yaw_close,
last_x,
last_y
)
last_x = -0.05
last_y = 0.04
last_z = z

+ 56
- 0
examples/so100-remote/parse_whisper.py View File

@@ -0,0 +1,56 @@
"""TODO: Add docstring."""

import json
import os
import time

import numpy as np
import pyarrow as pa
from dora import Node

node = Node()


last_prompt = ""
for event in node:
if event["type"] == "INPUT":
if event["id"] == "text":
text = event["value"][0].as_py().lower()

if "grab " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the given object"
node.send_output(
"text", pa.array([text]), {"image_id": "image", "action": "grab"}
)

elif "put " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the place to put the object"
node.send_output(
"text",
pa.array([text]),
{"image_id": "image", "action": "release"},
)

elif "make a hot dog" in text:
text = f"Given the prompt: grab the sausage. Output the bounding boxes for the given object"
node.send_output(
"text", pa.array([text]), {"image_id": "image", "action": "grab"}
)
time.sleep(4.0)

text = f"Given the prompt: put it in the black cooking grill. Output the bounding boxes for the given object"
node.send_output(
"text", pa.array([text]), {"image_id": "image", "action": "release"}
)
time.sleep(3.0)

text = f"Given the prompt: grab the sausage. Output the bounding boxes for the given object"
node.send_output(
"text", pa.array([text]), {"image_id": "image", "action": "grab"}
)
time.sleep(1.6)
text = f"Given the prompt: put it in the slice of bread. Output the bounding boxes for the given object"
node.send_output(
"text", pa.array([text]), {"image_id": "image", "action": "release"}
)

+ 180
- 0
examples/so100-remote/qwenvl-compression.yml View File

@@ -0,0 +1,180 @@
nodes:
- id: so100
path: dora-rustypot
inputs:
tick: dora/timer/millis/33
pose:
source: pytorch-kinematics/action
queue_size: 100
outputs:
- pose
env:
PORT: /dev/ttyACM0
TORQUE: 3000
IDS: 1 2 3 4 5 6

- id: camera
build: pip install -e ../../node-hub/dora-pyrealsense
path: dora-pyrealsense
inputs:
tick: dora/timer/millis/150
outputs:
- image
- depth

- id: rav1e-local
path: dora-rav1e
build: cargo build -p dora-rav1e --release
inputs:
image: camera/image
outputs:
- image

- id: rav1e-depth
path: dora-rav1e
build: cargo build -p dora-rav1e --release
inputs:
depth: camera/depth
outputs:
- depth
env:
RAV1E_SPEED: 5

- id: dav1d
path: dora-dav1d
build: cargo build -p dora-dav1d --release
_unstable_deploy:
machine: gpu
inputs:
image: rav1e-local/image
depth: rav1e-depth/depth
outputs:
- image
- depth

- id: pytorch-kinematics
build: pip install node-hub/dora-pytorch-kinematics
path: dora-pytorch-kinematics
_unstable_deploy:
machine: gpu
inputs:
pose: so100/pose
action:
source: parse_pose/action
queue_size: 100
outputs:
- pose
- action
env:
URDF_PATH: so100.urdf
END_EFFECTOR_LINK: "Moving Jaw"
TRANSFORM: -0.2 -0.01 -0.57 0.7 0 0 0.7

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
#series_so100: so100/pose
# series_pose: pytorch-kinematics/pose
jointstate_so100: so100/pose
camera/image: camera/image
camera/depth: camera/depth
text_whisper: dora-distil-whisper/text
text_vlm: dora-qwenvl/text
camera/boxes2d: parse_bbox/bbox
camera/masks: sam2/masks
env:
so100_urdf: $HOME/Downloads/so100_urdf/so100.urdf
so100_transform: -0.2 -0.01 -0.57 0.7 0 0 0.7
CAMERA_PITCH: -3.1415

- id: dora-microphone
build: pip install node-hub/dora-microphone
path: dora-microphone
inputs:
tick: dora/timer/millis/2000
outputs:
- audio

- id: parse_whisper
path: parse_whisper.py
inputs:
text: dora-distil-whisper/text
outputs:
- text
env:
SPEED: 1.5

- id: dora-qwenvl
build: pip install node-hub/dora-qwen2-5-vl
path: dora-qwen2-5-vl
_unstable_deploy:
machine: gpu
inputs:
image: dav1d/image
text: parse_whisper/text
outputs:
- text
env:
DEFAULT_QUESTION: Output the bounding box of the suitcase.
IMAGE_RESIZE_RATIO: "1.0"

- id: parse_bbox
path: parse_bbox.py
inputs:
text: dora-qwenvl/text
outputs:
- bbox
env:
IMAGE_RESIZE_RATIO: "1.0"

- id: sam2
build: pip install node-hub/dora-sam2
path: dora-sam2
_unstable_deploy:
machine: gpu
inputs:
image: dav1d/image
boxes2d: parse_bbox/bbox
outputs:
- masks

- id: box_coordinates
build: pip install node-hub/dora-object-to-pose
path: dora-object-to-pose
_unstable_deploy:
machine: gpu
inputs:
depth: dav1d/depth
masks: sam2/masks
outputs:
- pose
env:
CAMERA_PITCH: -3.1415

- id: parse_pose
path: parse_pose.py
inputs:
pose: box_coordinates/pose
outputs:
- action

- id: dora-vad
build: pip install node-hub/dora-vad
path: dora-vad
inputs:
audio: dora-microphone/audio
outputs:
- audio

- id: dora-distil-whisper
build: pip install node-hub/dora-distil-whisper
path: dora-distil-whisper
_unstable_deploy:
machine: gpu
inputs:
input: dora-vad/audio
outputs:
- text
env:
TARGET_LANGUAGE: english

+ 153
- 0
examples/so100-remote/qwenvl-remote.yml View File

@@ -0,0 +1,153 @@
nodes:
- id: so100
path: dora-rustypot
inputs:
tick: dora/timer/millis/33
pose:
source: pytorch-kinematics/action
queue_size: 100
outputs:
- pose
env:
PORT: /dev/ttyACM0
TORQUE: 2000
IDS: 1 2 3 4 5 6

- id: camera
build: pip install -e ../../node-hub/dora-pyrealsense
path: dora-pyrealsense
inputs:
tick: dora/timer/millis/100
outputs:
- image
- depth

- id: pytorch-kinematics
build: pip install node-hub/dora-pytorch-kinematics
path: dora-pytorch-kinematics
_unstable_deploy:
machine: gpu
inputs:
pose: so100/pose
action:
source: parse_pose/action
queue_size: 100
outputs:
- pose
- action
env:
URDF_PATH: so100.urdf
END_EFFECTOR_LINK: "Moving Jaw"
TRANSFORM: -0.2 -0.01 -0.57 0.7 0 0 0.7

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
#series_so100: so100/pose
# series_pose: pytorch-kinematics/pose
jointstate_so100: so100/pose
camera/image: camera/image
camera/depth: camera/depth
text_whisper: dora-distil-whisper/text
text_vlm: dora-qwenvl/text
camera/boxes2d: parse_bbox/bbox
camera/masks: sam2/masks
env:
# Link to your installation of so100-urdf.
# https://huggingface.co/datasets/haixuantao/urdfs/resolve/main/so100/so100_urdf.zip
so100_urdf: $HOME/Downloads/so100_urdf/so100.urdf
so100_transform: -0.2 -0.01 -0.57 0.7 0 0 0.7
so100_inference_transform: -0.2 -0.01 -0.57 0.7 0 0 0.7
CAMERA_PITCH: -3.1415

- id: dora-microphone
build: pip install node-hub/dora-microphone
path: dora-microphone
inputs:
tick: dora/timer/millis/2000
outputs:
- audio

- id: parse_whisper
path: parse_whisper.py
_unstable_deploy:
machine: gpu
inputs:
text: dora-distil-whisper/text
outputs:
- text

- id: dora-qwenvl
build: pip install node-hub/dora-qwen2-5-vl
path: dora-qwen2-5-vl
_unstable_deploy:
machine: gpu
inputs:
image: camera/image
text: parse_whisper/text
outputs:
- text
env:
DEFAULT_QUESTION: Output the bounding box of the suitcase.
IMAGE_RESIZE_RATIO: "1.0"

- id: parse_bbox
path: parse_bbox.py
inputs:
text: dora-qwenvl/text
outputs:
- bbox
env:
IMAGE_RESIZE_RATIO: "1.0"

- id: sam2
build: pip install node-hub/dora-sam2
path: dora-sam2
_unstable_deploy:
machine: gpu
inputs:
image: camera/image
boxes2d: parse_bbox/bbox
outputs:
- masks

- id: box_coordinates
build: pip install node-hub/dora-object-to-pose
path: dora-object-to-pose
_unstable_deploy:
machine: gpu
inputs:
depth: camera/depth
masks: sam2/masks
outputs:
- pose
env:
CAMERA_PITCH: -3.1415

- id: parse_pose
path: parse_pose.py
inputs:
pose: box_coordinates/pose
outputs:
- action

- id: dora-vad
build: pip install node-hub/dora-vad
path: dora-vad
inputs:
audio: dora-microphone/audio
outputs:
- audio

- id: dora-distil-whisper
build: pip install node-hub/dora-distil-whisper
path: dora-distil-whisper
_unstable_deploy:
machine: gpu
inputs:
input: dora-vad/audio
outputs:
- text
env:
TARGET_LANGUAGE: english

+ 142
- 0
examples/so100-remote/qwenvl.yml View File

@@ -0,0 +1,142 @@
nodes:
- id: so100
path: dora-rustypot
inputs:
tick: dora/timer/millis/33
pose:
source: pytorch-kinematics/action
queue_size: 100
outputs:
- pose
env:
PORT: /dev/ttyACM0
TORQUE: 5000
IDS: 1 2 3 4 5 6

- id: camera
build: pip install -e ../../node-hub/dora-pyrealsense
path: dora-pyrealsense
inputs:
tick: dora/timer/millis/33
outputs:
- image
- depth

- id: pytorch-kinematics
build: pip install -e ../../node-hub/dora-pytorch-kinematics
path: dora-pytorch-kinematics
inputs:
pose: so100/pose
action:
source: parse_pose/action
queue_size: 100
outputs:
- pose
- action
env:
# Link to your installation of so100-urdf.
# https://huggingface.co/datasets/haixuantao/urdfs/resolve/main/so100/so100_urdf.zip
URDF_PATH: $HOME/Downloads/so100_urdf/so100.urdf
END_EFFECTOR_LINK: "Moving Jaw"
TRANSFORM: -0.2 -0.01 -0.57 0.7 0 0 0.7

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
#series_so100: so100/pose
# series_pose: pytorch-kinematics/pose
jointstate_so100: so100/pose
jointstate_so100_inference: pytorch-kinematics/action
camera/image: camera/image
camera/depth: camera/depth
text_whisper: dora-distil-whisper/text
text_vlm: dora-qwenvl/text
camera/boxes2d: parse_bbox/bbox
camera/masks: sam2/masks
env:
so100_urdf: $HOME/Downloads/so100_urdf/so100.urdf
so100_transform: -0.2 -0.01 -0.57 0.7 0 0 0.7
so100_inference_transform: -0.2 -0.01 -0.57 0.7 0 0 0.7
CAMERA_PITCH: -3.1415

- id: dora-microphone
build: pip install -e ../../node-hub/dora-microphone
path: dora-microphone
inputs:
tick: dora/timer/millis/2000
outputs:
- audio

- id: parse_whisper
path: parse_whisper.py
inputs:
text: dora-distil-whisper/text
outputs:
- text

- id: dora-qwenvl
build: pip install -e ../../node-hub/dora-qwen2-5-vl
path: dora-qwen2-5-vl
inputs:
image: camera/image
text: parse_whisper/text
outputs:
- text
env:
DEFAULT_QUESTION: Output the bounding box of the suitcase.
IMAGE_RESIZE_RATIO: "1.0"

- id: parse_bbox
path: parse_bbox.py
inputs:
text: dora-qwenvl/text
outputs:
- bbox
env:
IMAGE_RESIZE_RATIO: "1.0"

- id: sam2
build: pip install -e ../../node-hub/dora-sam2
path: dora-sam2
inputs:
image: camera/image
boxes2d: parse_bbox/bbox
outputs:
- masks

- id: box_coordinates
build: pip install -e ../../node-hub/dora-object-to-pose
path: dora-object-to-pose
inputs:
depth: camera/depth
masks: sam2/masks
outputs:
- pose
env:
CAMERA_PITCH: -3.1415

- id: parse_pose
path: parse_pose.py
inputs:
pose: box_coordinates/pose
outputs:
- action

- id: dora-vad
build: pip install -e ../../node-hub/dora-vad
path: dora-vad
inputs:
audio: dora-microphone/audio
outputs:
- audio

- id: dora-distil-whisper
build: pip install -e ../../node-hub/dora-distil-whisper
path: dora-distil-whisper
inputs:
input: dora-vad/audio
outputs:
- text
env:
TARGET_LANGUAGE: english

+ 51
- 0
examples/tracker/facebook_cotracker.yml View File

@@ -0,0 +1,51 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/opencv-video-capture
path: opencv-video-capture
inputs:
tick: dora/timer/millis/100
outputs:
- image
env:
CAPTURE_PATH: "0"
ENCODING: "rgb8"
IMAGE_WIDTH: "640"
IMAGE_HEIGHT: "480"

- id: object-detection
build: pip install -e ../../node-hub/dora-yolo
path: dora-yolo
inputs:
image: camera/image
outputs:
- bbox

- id: tracker
build: pip install -e ../../node-hub/dora-cotracker
path: dora-cotracker
inputs:
image: camera/image
boxes2d: object-detection/bbox
# points_to_track: input/points_to_track # uncomment this if using input node
outputs:
- tracked_image
- points
env:
INTERACTIVE_MODE: false

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
image: camera/image
tracked_image: tracker/tracked_image

# replace with your own node that outputs tracking points # uncomment if input via node
# (e.g., YOLO detector, pose estimator, etc.)
# - id: point_source
# build: pip install your-node # Replace with your node's name
# path: your-point-source-node # Replace with your node's path
# inputs:
# image: camera/image # If your node needs image input
# outputs:
# - points_to_track # Must output points in required format

+ 63
- 0
examples/tracker/parse_bbox.py View File

@@ -0,0 +1,63 @@
"""TODO: Add docstring."""

import json
import os

import numpy as np
import pyarrow as pa
from dora import Node

node = Node()

IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0"))


def extract_bboxes(json_text):
"""Extract bounding boxes from a JSON string with markdown markers and return them as a NumPy array.

Parameters
----------
json_text : str
JSON string containing bounding box data, including ```json markers.

Returns
-------
np.ndarray: NumPy array of bounding boxes.

"""
# Ensure all lines are stripped of whitespace and markers
lines = json_text.strip().splitlines()

# Filter out lines that are markdown markers
clean_lines = [line for line in lines if not line.strip().startswith("```")]

# Join the lines back into a single string
clean_text = "\n".join(clean_lines)
# Parse the cleaned JSON text
try:
data = json.loads(clean_text)

# Extract bounding boxes
bboxes = [item["bbox_2d"] for item in data]
labels = [item["label"] for item in data]

return np.array(bboxes), np.array(labels)
except Exception as _e: # noqa
pass
return None, None


for event in node:
if event["type"] == "INPUT":
text = event["value"][0].as_py()
image_id = event["metadata"]["image_id"]

bboxes, labels = extract_bboxes(text)
if bboxes is not None and len(bboxes) > 0:
bboxes = bboxes * int(1 / IMAGE_RESIZE_RATIO)

node.send_output(
"bbox",
pa.array(bboxes.ravel()),
metadata={"encoding": "xyxy", "image_id": image_id},
)

+ 67
- 0
examples/tracker/qwenvl_cotracker.yml View File

@@ -0,0 +1,67 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/opencv-video-capture
path: opencv-video-capture
inputs:
tick: dora/timer/millis/100
outputs:
- image
env:
CAPTURE_PATH: "0"
ENCODING: "rgb8"
IMAGE_WIDTH: "640"
IMAGE_HEIGHT: "480"

- id: dora-qwenvl
build: pip install -e ../../node-hub/dora-qwen2-5-vl
path: dora-qwen2-5-vl
inputs:
image: camera/image
text_1: dora/timer/millis/600
outputs:
- text
env:
DEFAULT_QUESTION: Output the bounding box of the eyes.
IMAGE_RESIZE_RATIO: "0.5"
# ACTIVATION_WORDS: grab pick give output take catch grabs picks gives output takes catches have
#SYSTEM_PROMPT: You're a robot.

- id: parse_bbox
path: parse_bbox.py
inputs:
text: dora-qwenvl/text
outputs:
- bbox
env:
IMAGE_RESIZE_RATIO: "0.5"

- id: tracker
build: pip install -e ../../node-hub/dora-cotracker
path: dora-cotracker
inputs:
image: camera/image
boxes2d: parse_bbox/bbox
# points_to_track: input/points_to_track # uncomment this if using input node
outputs:
- tracked_image
- points
env:
INTERACTIVE_MODE: false

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
image: camera/image
boxes2d: parse_bbox/bbox
tracked_image: tracker/tracked_image

# replace with your own node that outputs tracking points # uncomment if input via node
# (e.g., YOLO detector, pose estimator, etc.)
# - id: point_source
# build: pip install your-node # Replace with your node's name
# path: your-point-source-node # Replace with your node's path
# inputs:
# image: camera/image # If your node needs image input
# outputs:
# - points_to_track # Must output points in required format

+ 4
- 4
libraries/extensions/telemetry/metrics/Cargo.toml View File

@@ -10,12 +10,12 @@ repository.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html


[dependencies] [dependencies]
opentelemetry = { version = "0.28.0", features = ["metrics"] }
opentelemetry-otlp = { version = "0.28.0", features = [
opentelemetry = { version = "0.29.1", features = ["metrics"] }
opentelemetry-otlp = { version = "0.29.0", features = [
"tonic", "tonic",
"metrics", "metrics",
"grpc-tonic", "grpc-tonic",
] } ] }
opentelemetry_sdk = { version = "0.28.0", features = ["rt-tokio", "metrics"] }
opentelemetry_sdk = { version = "0.29.0", features = ["rt-tokio", "metrics"] }
eyre = "0.6.12" eyre = "0.6.12"
opentelemetry-system-metrics = { version = "0.3.1" }
opentelemetry-system-metrics = { version = "0.4.1" }

+ 2
- 2
libraries/extensions/telemetry/metrics/src/lib.rs View File

@@ -31,7 +31,7 @@ pub fn init_metrics() -> SdkMeterProvider {
.build() .build()
} }


pub async fn init_meter_provider(meter_id: String) -> Result<SdkMeterProvider> {
pub async fn run_metrics_monitor(meter_id: String) -> Result<()> {
let meter_provider = init_metrics(); let meter_provider = init_metrics();
global::set_meter_provider(meter_provider.clone()); global::set_meter_provider(meter_provider.clone());
let scope = InstrumentationScope::builder(meter_id) let scope = InstrumentationScope::builder(meter_id)
@@ -40,5 +40,5 @@ pub async fn init_meter_provider(meter_id: String) -> Result<SdkMeterProvider> {
let meter = global::meter_with_scope(scope); let meter = global::meter_with_scope(scope);


init_process_observer(meter).await.unwrap(); init_process_observer(meter).await.unwrap();
Ok(meter_provider)
Ok(())
} }

+ 73
- 28
libraries/extensions/telemetry/tracing/src/lib.rs View File

@@ -11,38 +11,61 @@ use tracing_subscriber::{
filter::FilterExt, prelude::__tracing_subscriber_SubscriberExt, EnvFilter, Layer, filter::FilterExt, prelude::__tracing_subscriber_SubscriberExt, EnvFilter, Layer,
}; };


use eyre::ContextCompat;
use tracing_subscriber::Registry; use tracing_subscriber::Registry;
pub mod telemetry; pub mod telemetry;


/// Setup tracing with a default configuration.
///
/// This will set up a global subscriber that logs to stdout with a filter level of "warn".
///
/// Should **ONLY** be used in `DoraNode` implementations.
pub fn set_up_tracing(name: &str) -> eyre::Result<()> { pub fn set_up_tracing(name: &str) -> eyre::Result<()> {
set_up_tracing_opts(name, Some("warn"), None)
TracingBuilder::new(name)
.with_stdout("warn")
.build()
.wrap_err(format!(
"failed to set tracing global subscriber for {name}"
))?;
Ok(())
} }


pub struct FileLogging {
pub file_name: String,
pub filter: LevelFilter,
#[must_use = "call `build` to finalize the tracing setup"]
pub struct TracingBuilder {
name: String,
layers: Vec<Box<dyn Layer<Registry> + Send + Sync>>,
} }


pub fn set_up_tracing_opts(
name: &str,
stdout_filter: Option<impl AsRef<str>>,
file: Option<FileLogging>,
) -> eyre::Result<()> {
let mut layers = Vec::new();
impl TracingBuilder {
pub fn new(name: impl Into<String>) -> Self {
Self {
name: name.into(),
layers: Vec::new(),
}
}


if let Some(filter) = stdout_filter {
/// Add a layer that write logs to the [std::io::stdout] with the given filter.
///
/// **DO NOT** use this in `DoraNode` implementations,
/// it uses [std::io::stdout] which is synchronous
/// and might block the logging thread.
pub fn with_stdout(mut self, filter: impl AsRef<str>) -> Self {
let parsed = EnvFilter::builder().parse_lossy(filter); let parsed = EnvFilter::builder().parse_lossy(filter);
// Filter log using `RUST_LOG`. More useful for CLI.
let env_filter = EnvFilter::from_default_env().or(parsed); let env_filter = EnvFilter::from_default_env().or(parsed);
let layer = tracing_subscriber::fmt::layer() let layer = tracing_subscriber::fmt::layer()
.compact() .compact()
.with_writer(std::io::stdout)
.with_filter(env_filter); .with_filter(env_filter);
layers.push(layer.boxed());
self.layers.push(layer.boxed());
self
} }


if let Some(file) = file {
let FileLogging { file_name, filter } = file;
/// Add a layer that write logs to a file with the given name and filter.
pub fn with_file(
mut self,
file_name: impl Into<String>,
filter: LevelFilter,
) -> eyre::Result<Self> {
let file_name = file_name.into();
let out_dir = Path::new("out"); let out_dir = Path::new("out");
std::fs::create_dir_all(out_dir).context("failed to create `out` directory")?; std::fs::create_dir_all(out_dir).context("failed to create `out` directory")?;
let path = out_dir.join(file_name).with_extension("txt"); let path = out_dir.join(file_name).with_extension("txt");
@@ -51,26 +74,48 @@ pub fn set_up_tracing_opts(
.append(true) .append(true)
.open(path) .open(path)
.context("failed to create log file")?; .context("failed to create log file")?;
// Filter log using `RUST_LOG`. More useful for CLI.
let layer = tracing_subscriber::fmt::layer() let layer = tracing_subscriber::fmt::layer()
.with_ansi(false) .with_ansi(false)
.with_writer(file) .with_writer(file)
.with_filter(filter); .with_filter(filter);
layers.push(layer.boxed());
self.layers.push(layer.boxed());
Ok(self)
} }


if let Some(endpoint) = std::env::var_os("DORA_JAEGER_TRACING") {
let endpoint = endpoint
.to_str()
.wrap_err("Could not parse env variable: DORA_JAEGER_TRACING")?;
let tracer = crate::telemetry::init_jaeger_tracing(name, endpoint)
pub fn with_jaeger_tracing(mut self) -> eyre::Result<Self> {
let endpoint = std::env::var("DORA_JAEGER_TRACING")
.wrap_err("DORA_JAEGER_TRACING environment variable not set")?;
let tracer = crate::telemetry::init_jaeger_tracing(&self.name, &endpoint)
.wrap_err("Could not instantiate tracing")?; .wrap_err("Could not instantiate tracing")?;
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
layers.push(telemetry.boxed());
self.layers.push(telemetry.boxed());
Ok(self)
}

pub fn add_layer<L>(mut self, layer: L) -> Self
where
L: Layer<Registry> + Send + Sync + 'static,
{
self.layers.push(layer.boxed());
self
} }


let registry = Registry::default().with(layers);
tracing::subscriber::set_global_default(registry).context(format!(
"failed to set tracing global subscriber for {name}"
))
pub fn with_layers<I, L>(mut self, layers: I) -> Self
where
I: IntoIterator<Item = L>,
L: Layer<Registry> + Send + Sync + 'static,
{
for layer in layers {
self.layers.push(layer.boxed());
}
self
}

pub fn build(self) -> eyre::Result<()> {
let registry = Registry::default().with(self.layers);
tracing::subscriber::set_global_default(registry).context(format!(
"failed to set tracing global subscriber for {}",
self.name
))
}
} }

+ 1
- 1
node-hub/dora-argotranslate/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-argotranslate" name = "dora-argotranslate"
version = "0.3.10"
version = "0.3.11"
description = "Dora Node for Text translating using Argostranslate" description = "Dora Node for Text translating using Argostranslate"
authors = [ authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" }, { name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },


+ 221
- 0
node-hub/dora-cotracker/README.md View File

@@ -0,0 +1,221 @@
# dora-cotracker

A Dora node that implements real-time object tracking using Facebook's CoTracker model. The node supports both interactive point selection via clicking and programmatic point input through Dora's messaging system.

## Features

- Real-time object tracking using CoTracker
- Support for multiple tracking points
- Interactive point selection via mouse clicks
- Programmatic point input through Dora messages
- Visualization of tracked points with unique identifiers

## Getting Started

### Installation

Install using uv:

```bash
uv venv -p 3.11 --seed
uv pip install -e .
```

## Demo Video

Watch a demonstration of the dora-cotracker node in action:

[![Dora CoTracker Demo](https://img.youtube.com/vi/1VmC1BNq6J0/0.jpg)](https://youtu.be/1VmC1BNq6J0)

The video shows:
- Setting up the node
- Interactive point selection
- Real-time tracking performance

### Basic Usage

1. Create a YAML configuration file (e.g., `demo.yml`):

```yaml
nodes:
- id: camera
build: pip install opencv-video-capture
path: opencv-video-capture
inputs:
tick: dora/timer/millis/100
outputs:
- image
env:
CAPTURE_PATH: "0"
ENCODING: "rgb8"
IMAGE_WIDTH: "640"
IMAGE_HEIGHT: "480"

- id: tracker
build: pip install -e dora-cotracker
path: dora-cotracker
inputs:
image: camera/image
points_to_track: input/points_to_track
outputs:
- tracked_image
- tracked_points

- id: display
build: pip install dora-rerun
path: dora-rerun
inputs:
image: camera/image
tracked_image: tracker/tracked_image
```

*Note* - this only has the cv2 as an input source. see below to add your nodes workflow and pass points directly.

2. Run the demo:

```bash
dora run demo.yml --uv
```

## Usage Examples

### 1. Interactive Point Selection
Click points directly in the "Raw Feed" window to start tracking them:
- Left-click to add tracking points
- Points will be tracked automatically across frames
- Each point is assigned a unique identifier (C0, C1, etc. for clicked points and I0, I1, etc for input points)

### 2. Dynamic Point Integration
The node can receive tracking points from other models or nodes in your pipeline. Common use cases include:

- Tracking YOLO detection centroids
- Following pose estimation keypoints
- Monitoring segmentation mask centers
- Custom object detection points

example showing how to send tracking points through Dora messages using a custom input node:

```python
import numpy as np
import pyarrow as pa
from dora import Node

class PointInputNode:
def __init__(self):
self.node = Node("point-input")
def send_points(self, points):
"""
Send points to tracker
Args:
points: Nx2 array of (x,y) coordinates
"""
points = np.array(points, dtype=np.float32)
self.node.send_output(
"points_to_track",
pa.array(points.ravel()),
{
"num_points": len(points),
"dtype": "float32",
"shape": (len(points), 2)
}
)

def run(self):
# Example: Track 3 points
points = np.array([
[320, 240], # Center
[160, 120], # Top-left
[480, 360] # Bottom-right
])
self.send_points(points)
```



To connect your existing node that outputs tracking points with the CoTracker node, add the following to your YAML configuration:

```yaml
nodes:
# Your existing point source node (e.g., YOLO detector, pose estimator, etc.)
- id: point_source
build: pip install your-node # Replace with your node's name
path: your-point-source-node # Replace with your node's path
inputs:
image: camera/image # If your node needs image input
outputs:
- points_to_track # Must output points in required format

# CoTracker node configuration
- id: tracker
build: pip install dora-cotracker
path: dora-cotracker
inputs:
image: camera/image
points_to_track: point_source/points_to_track # Connect to your point source
outputs:
- tracked_image
- tracked_points

# Optional visualization
- id: display
build: pip install dora-rerun
path: dora-rerun
inputs:
image: camera/image
tracked_image: tracker/tracked_image
```

Your point source node must output points in the following format:
- Topic name: `points_to_track`
- Data: Flattened numpy array of x,y coordinates
- Metadata:
```python
{
"num_points": len(points), # Number of points
"dtype": "float32", # Data type
"shape": (N, 2) # N points, 2 coordinates each
}
```

Example point source implementations:
- YOLO detection centroids
- Pose estimation keypoints
- Face landmark detectors
- Custom object detectors

For dynamic updates, send new points whenever your source node processes a new frame. The tracker will maintain temporal consistency between updates.
**
## API Reference

### Input Topics
- `image`: Input video stream (RGB format)
- `points_to_track`: Points to track
- Format: Flattened array of x,y coordinates
- Metadata:
- `num_points`: Number of points
- `dtype`: "float32"
- `shape`: (N, 2) where N is number of points

### Output Topics
- `tracked_image`: Visualization with tracked points
- `tracked_points`: Current positions of tracked points
- Same format as input points

## Development

Format code with ruff:
```bash
uv pip install ruff
uv run ruff check . --fix
```

Run tests:
```bash
uv pip install pytest
uv run pytest
```

## License

dora-cotracker's code are released under the MIT License

+ 40
- 0
node-hub/dora-cotracker/demo.yml View File

@@ -0,0 +1,40 @@
nodes:
- id: camera
build: pip install opencv-video-capture
path: opencv-video-capture
inputs:
tick: dora/timer/millis/100
outputs:
- image
env:
CAPTURE_PATH: "0"
ENCODING: "rgb8"
IMAGE_WIDTH: "640"
IMAGE_HEIGHT: "480"

- id: tracker
build: pip install -e .
path: dora-cotracker
inputs:
image: camera/image
# points_to_track: input/points_to_track # uncomment this if using input node
outputs:
- tracked_image
- points

- id: plot
build: pip install dora-rerun
path: dora-rerun
inputs:
image: camera/image
tracked_image: tracker/tracked_image

# replace with your own node that outputs tracking points # uncomment if input via node
# (e.g., YOLO detector, pose estimator, etc.)
# - id: point_source
# build: pip install your-node # Replace with your node's name
# path: your-point-source-node # Replace with your node's path
# inputs:
# image: camera/image # If your node needs image input
# outputs:
# - points_to_track # Must output points in required format

+ 11
- 0
node-hub/dora-cotracker/dora_cotracker/__init__.py View File

@@ -0,0 +1,11 @@
import os

# Define the path to the README file relative to the package directory
readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.md")

# Read the content of the README file
try:
with open(readme_path, "r", encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."

+ 5
- 0
node-hub/dora-cotracker/dora_cotracker/__main__.py View File

@@ -0,0 +1,5 @@
from .main import main


if __name__ == "__main__":
main()

+ 212
- 0
node-hub/dora-cotracker/dora_cotracker/main.py View File

@@ -0,0 +1,212 @@
import os
from collections import deque

import cv2
import numpy as np
import pyarrow as pa
import torch
from dora import Node

INTERACTIVE_MODE = os.getenv("INTERACTIVE_MODE", "false").lower() == "true"


class VideoTrackingNode:
def __init__(self):
self.node = Node("video-tracking-node")
# Initialize CoTracker
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = torch.hub.load("facebookresearch/co-tracker", "cotracker3_online")
self.model = self.model.to(self.device)
self.model.eval()
self.model.step = 8
self.buffer_size = self.model.step * 2
self.window_frames = deque(maxlen=self.buffer_size)
self.is_first_step = True
self.accept_new_points = True
self.clicked_points = []
self.input_points = []
self.input_masks = []

def mouse_callback(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.clicked_points.append([x, y])
self.is_first_step = True
# print(f"Clicked point added at: ({x}, {y})")

def process_tracking(self, frame):
"""Process frame for tracking"""
if len(self.window_frames) == self.buffer_size:
all_points = self.input_points + self.clicked_points

if not all_points:
return None, None
video_chunk = torch.tensor(
np.stack(list(self.window_frames)), device=self.device
).float()
video_chunk = video_chunk / 255.0
# Reshape to [B,T,C,H,W]
video_chunk = video_chunk.permute(0, 3, 1, 2)[None]
query_points = torch.tensor(all_points, device=self.device).float()
time_dim = torch.zeros(len(all_points), 1, device=self.device)
queries = torch.cat([time_dim, query_points], dim=1).unsqueeze(0)
# Track points
pred_tracks, pred_visibility = self.model(
video_chunk,
queries=queries,
is_first_step=self.is_first_step,
grid_size=0,
add_support_grid=False,
)
self.is_first_step = False

if pred_tracks is not None and pred_visibility is not None:
self.accept_new_points = True
tracks = pred_tracks[0, -1].cpu().numpy()
visibility = pred_visibility[0, -1].cpu().numpy()
visible_tracks = []
for pt, vis in zip(tracks, visibility):
if vis > 0.5:
visible_tracks.append([int(pt[0]), int(pt[1])])
visible_tracks = np.array(visible_tracks, dtype=np.float32)

frame_viz = frame.copy()
num_input_stream = len(self.input_points)
# Draw input points in red
for i, (pt, vis) in enumerate(
zip(tracks[:num_input_stream], visibility[:num_input_stream])
):
if vis > 0.5:
x, y = int(pt[0]), int(pt[1])
cv2.circle(
frame_viz, (x, y), radius=3, color=(0, 255, 0), thickness=-1
)
cv2.putText(
frame_viz,
f"I{i}",
(x + 5, y - 5),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
1,
)

# Draw clicked points in red
for i, (pt, vis) in enumerate(
zip(tracks[num_input_stream:], visibility[num_input_stream:])
):
if vis > 0.5:
x, y = int(pt[0]), int(pt[1])
cv2.circle(
frame_viz, (x, y), radius=3, color=(0, 0, 255), thickness=-1
)
cv2.putText(
frame_viz,
f"C{i}",
(x + 5, y - 5),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 0, 255),
1,
)

# Send tracked points
if len(visible_tracks) > 0:
self.node.send_output(
"points",
pa.array(visible_tracks.ravel()),
{
"num_points": len(visible_tracks),
"dtype": "float32",
"shape": (len(visible_tracks), 2),
"width": frame.shape[1],
"height": frame.shape[0],
},
)

return frame, frame_viz

return None, None

def run(self):
"""Main run loop"""
if INTERACTIVE_MODE:
cv2.namedWindow("Interactive Feed to track point", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Interactive Feed to track point", self.mouse_callback)

for event in self.node:
if event["type"] == "INPUT":
if event["id"] == "image":
metadata = event["metadata"]
frame = (
event["value"]
.to_numpy()
.reshape((metadata["height"], metadata["width"], 3))
)
# Add frame to tracking window
self.window_frames.append(frame)
original_frame, tracked_frame = self.process_tracking(frame)
if original_frame is not None and tracked_frame is not None:
self.node.send_output(
"tracked_image", pa.array(tracked_frame.ravel()), metadata
)

if INTERACTIVE_MODE:
display_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.imshow("Interactive Feed to track point", display_frame)
cv2.waitKey(1)

elif event["id"] == "points":
if not self.accept_new_points:
continue
# Handle points from input_stream node
metadata = event["metadata"]
points_array = event["value"].to_numpy()
self.input_points = points_array.reshape((-1, 2)).tolist()
self.accept_new_points = False
self.is_first_step = True
elif event["id"] == "boxes2d":
if not self.accept_new_points:
continue
if len(event["value"]) == 0:
self.input_points = []
self.is_first_step = True
continue

# Handle points from input_stream node
metadata = event["metadata"]
if isinstance(event["value"], pa.StructArray):
boxes2d = (
event["value"]
.get("bbox")
.values.to_numpy()
.reshape((-1, 4))
)
_labels = (
event["value"]
.get("labels")
.values.to_numpy(zero_copy_only=False)
)
else:
boxes2d = event["value"].to_numpy().reshape((-1, 4))
_labels = None

self.input_points = [
[
int(x_min + (x_max - x_min) * 2 / 4),
int(y_min + (y_max - y_min) * i / 10),
]
for i in range(4, 7)
for x_min, y_min, x_max, y_max in boxes2d
]

self.is_first_step = True
self.accept_new_points = False


def main():
tracker = VideoTrackingNode()
tracker.run()


if __name__ == "__main__":
main()

+ 32
- 0
node-hub/dora-cotracker/pyproject.toml View File

@@ -0,0 +1,32 @@
[project]
name = "dora-cotracker"
version = "0.1.0"
authors = [{ name = "Shashwat Patil", email = "shashwatpatil974@gmail.com" }]
description = "A Dora node implementing real-time object tracking using Facebook's CoTracker model"
license = "CC-BY-1.0"
readme = "README.md"
requires-python = ">=3.10"

dependencies = [
"dora-rs>=0.3.9",
"torch>=2.0.0",
"numpy>=1.24.0",
"opencv-python>=4.8.0",
"pyarrow>=14.0.1",
"cotracker @ git+https://github.com/facebookresearch/co-tracker.git",
]

[dependency-groups]
dev = ["pytest >=8.1.1", "ruff >=0.9.1"]

[project.scripts]
dora-cotracker = "dora_cotracker.main:main"

[tool.ruff.lint]
extend-select = [
"PERF", # Performance
"RET", # Return statements
"RSE", # Runtime errors
"NPY", # NumPy
"N", # Naming
]

+ 9
- 0
node-hub/dora-cotracker/tests/test_dora_cotracker.py View File

@@ -0,0 +1,9 @@
import pytest


def test_import_main():
from dora_cotracker.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.
with pytest.raises(RuntimeError):
main()

+ 32
- 0
node-hub/dora-dav1d/Cargo.toml View File

@@ -0,0 +1,32 @@
[package]
name = "dora-dav1d"
edition = "2021"
license = "BSD-2-Clause"
version.workspace = true

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[features]
default = []
python = ["pyo3"]

[dependencies]
dav1d = "0.10"
bitstream-io = "2.0"
log = "0.4"
structopt = "0.3"
dora-node-api = { workspace = true, features = ["tracing"] }
eyre = "0.6.8"
bytemuck = "1.7.0"
pyo3 = { workspace = true, features = [
"extension-module",
"abi3",
"eyre",
"generate-import-lib",
], optional = true }


[lib]
name = "dora_dav1d"
path = "src/lib.rs"
crate-type = ["lib", "cdylib"]

+ 26
- 0
node-hub/dora-dav1d/pyproject.toml View File

@@ -0,0 +1,26 @@
[build-system]
requires = ["maturin>=0.13.2"]
build-backend = "maturin"

[project]
name = "dora-dav1d"
dynamic = ["version"]
license = { text = "BSD-2-Clause" }
requires-python = ">=3.8"

scripts = { "dora-dav1d" = "dora_dav1d:py_main" }

[tool.maturin]
features = ["python", "pyo3/extension-module"]

[tool.ruff.lint]
extend-select = [
"D", # pydocstyle
"UP", # Ruff's UP rule
"PERF", # Ruff's PERF rule
"RET", # Ruff's RET rule
"RSE", # Ruff's RSE rule
"NPY", # Ruff's NPY rule
"N", # Ruff's N rule
"I", # Ruff's I rule
]

+ 219
- 0
node-hub/dora-dav1d/src/lib.rs View File

@@ -0,0 +1,219 @@
use std::env::var;

use dav1d::Settings;
use dora_node_api::{arrow::array::UInt8Array, DoraNode, Event, IntoArrow};
use eyre::{Context, Result};
use log::warn;

fn yuv420_to_bgr(
y_plane: &[u8],
u_plane: &[u8],
v_plane: &[u8],
width: u32,
height: u32,
) -> Vec<u8> {
let width = width as usize;
let height = height as usize;
let mut rgb_data = vec![0u8; width * height * 3]; // Output RGB data buffer

for j in 0..height {
for i in 0..width {
let y_idx = j * width + i; // Index in Y plane
let uv_idx = (j / 2) * (width / 2) + (i / 2); // Index in U/V planes

let y = y_plane[y_idx] as f32;
let u = u_plane[uv_idx] as f32 - 128.0;
let v = v_plane[uv_idx] as f32 - 128.0;

// Convert YUV to RGB using BT.601 standard formula
let r = (y + 1.402 * v).clamp(0.0, 255.0) as u8;
let g = (y - 0.344136 * u - 0.714136 * v).clamp(0.0, 255.0) as u8;
let b = (y + 1.772 * u).clamp(0.0, 255.0) as u8;

// Set the RGB values in the output buffer
let rgb_idx = y_idx * 3;
rgb_data[rgb_idx] = b;
rgb_data[rgb_idx + 1] = g;
rgb_data[rgb_idx + 2] = r;
}
}

rgb_data
}

pub fn lib_main() -> Result<()> {
let mut settings = Settings::new();
// settings.set_n_threads(16);
settings.set_max_frame_delay(1);
let mut dec =
dav1d::Decoder::with_settings(&settings).expect("failed to create decoder instance");

let (mut node, mut events) =
DoraNode::init_from_env().context("Could not initialize dora node")?;

let output_encoding = var("ENCODING").unwrap_or("bgr8".to_string());

loop {
match events.recv() {
Some(Event::Input {
id,
data,
mut metadata,
}) => {
if let Some(data) = data.as_any().downcast_ref::<UInt8Array>() {
let data = data.values().clone();
let encoding = metadata
.parameters
.get("encoding")
.and_then(|p| match p {
dora_node_api::Parameter::String(s) => Some(s),
_ => None,
})
.map(|s| s.as_str())
.unwrap_or("av1");
if encoding != "av1" {
warn!("Unsupported encoding {}", encoding);
continue;
}
match dec.send_data(data, None, None, None) {
Err(e) => {
warn!("Error sending data to the decoder: {}", e);
}
Ok(()) => {
if let Ok(p) = dec.get_picture() {
match p.pixel_layout() {
dav1d::PixelLayout::I420 => {
let y = p.plane(dav1d::PlanarImageComponent::Y);
let u = p.plane(dav1d::PlanarImageComponent::U);
let v = p.plane(dav1d::PlanarImageComponent::V);
match output_encoding.as_str() {
"yuv420" => {
let mut y = y.to_vec();
let mut u = u.to_vec();
let mut v = v.to_vec();
y.append(&mut u);
y.append(&mut v);
let arrow = y.into_arrow();
metadata.parameters.insert(
"encoding".to_string(),
dora_node_api::Parameter::String(
"yuv420".to_string(),
),
);
metadata.parameters.insert(
"width".to_string(),
dora_node_api::Parameter::Integer(
p.width() as i64
),
);
metadata.parameters.insert(
"height".to_string(),
dora_node_api::Parameter::Integer(
p.height() as i64
),
);

node.send_output(id, metadata.parameters, arrow)
.unwrap();
}
"bgr8" => {
let y = yuv420_to_bgr(
&y,
&u,
&v,
p.width(),
p.height(),
);
let arrow = y.into_arrow();
metadata.parameters.insert(
"encoding".to_string(),
dora_node_api::Parameter::String(
"bgr8".to_string(),
),
);
node.send_output(id, metadata.parameters, arrow)
.unwrap();
}
_ => {
warn!(
"Unsupported output encoding {}",
output_encoding
);
continue;
}
}
}
dav1d::PixelLayout::I400 => {
let y = p.plane(dav1d::PlanarImageComponent::Y);
match p.bit_depth() {
8 => {
let y = y.to_vec();
let arrow = y.into_arrow();
metadata.parameters.insert(
"encoding".to_string(),
dora_node_api::Parameter::String(
"mono8".to_string(),
),
);
node.send_output(id, metadata.parameters, arrow)
.unwrap();
}
10 | 12 => {
let vec16: Vec<u16> =
bytemuck::cast_slice(&y).to_vec();
let arrow = vec16.into_arrow();
metadata.parameters.insert(
"encoding".to_string(),
dora_node_api::Parameter::String(
"mono16".to_string(),
),
);
node.send_output(id, metadata.parameters, arrow)
.unwrap();
}
_ => {
warn!("Unsupported bit depth {}", p.bit_depth());
continue;
}
}
}
_ => {
warn!("Unsupported pixel layout");
continue;
}
};
}
}
}
} else {
warn!("Unsupported data type {}", data.data_type());
continue;
}
}
None => break,
Some(_) => break,
}
}
Ok(())
}

#[cfg(feature = "python")]
use pyo3::{
pyfunction, pymodule,
types::{PyModule, PyModuleMethods},
wrap_pyfunction, Bound, PyResult, Python,
};

#[cfg(feature = "python")]
#[pyfunction]
fn py_main(_py: Python) -> eyre::Result<()> {
lib_main()
}

#[cfg(feature = "python")]
#[pymodule]
fn dora_dav1d(_py: Python, m: Bound<'_, PyModule>) -> PyResult<()> {
m.add_function(wrap_pyfunction!(py_main, &m)?)?;
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
Ok(())
}

+ 3
- 0
node-hub/dora-dav1d/src/main.rs View File

@@ -0,0 +1,3 @@
fn main() -> eyre::Result<()> {
dora_dav1d::lib_main()
}

+ 5
- 3
node-hub/dora-distil-whisper/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-distil-whisper" name = "dora-distil-whisper"
version = "0.3.10"
version = "0.3.11"
authors = [ authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" }, { name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" }, { name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },
@@ -8,7 +8,7 @@ authors = [
description = "Dora dora-distil-whisper" description = "Dora dora-distil-whisper"
license = { text = "MIT" } license = { text = "MIT" }
readme = "README.md" readme = "README.md"
requires-python = ">=3.8"
requires-python = ">=3.9"


dependencies = [ dependencies = [
"dora-rs >= 0.3.9", "dora-rs >= 0.3.9",
@@ -16,7 +16,9 @@ dependencies = [
"pyarrow >= 5.0.0", "pyarrow >= 5.0.0",
"transformers >= 4.0.0", "transformers >= 4.0.0",
"accelerate >= 0.29.2", "accelerate >= 0.29.2",
"torch >= 2.2.0",
"torch >= 2.7.0",
"torchvision >= 0.22",
"torchaudio >= 2.7.0",
"modelscope >= 1.18.1", "modelscope >= 1.18.1",
"mlx-whisper >= 0.4.1; sys_platform == 'darwin'", "mlx-whisper >= 0.4.1; sys_platform == 'darwin'",
] ]


+ 1
- 1
node-hub/dora-echo/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-echo" name = "dora-echo"
version = "0.3.10"
version = "0.3.11"
authors = [ authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" }, { name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" }, { name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },


+ 15
- 14
node-hub/dora-internvl/pyproject.toml View File

@@ -1,9 +1,9 @@
[project] [project]
name = "dora-internvl" name = "dora-internvl"
version = "0.3.10"
version = "0.3.11"
authors = [ authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },
] ]
description = "Dora Node for VLM" description = "Dora Node for VLM"
license = { text = "MIT" } license = { text = "MIT" }
@@ -12,17 +12,18 @@ requires-python = ">=3.8"


dependencies = [ dependencies = [


"dora-rs >= 0.3.9",
"numpy < 2.0.0",
"torch >= 2.2.0",
"torchvision >= 0.17",
"transformers >= 4.11.3",
"pillow >= 10.0.0",
"bitsandbytes >= 0.41.0",
"einops >= 0.6.1",
"einops-exts >= 0.0.4",
"timm >= 0.9.12",
"sentencepiece >= 0.1.99",
"dora-rs >= 0.3.9",
"numpy < 2.0.0",
"torch >= 2.7.0",
"torchvision >= 0.22",
"torchaudio >= 2.7.0",
"transformers >= 4.11.3",
"pillow >= 10.0.0",
"bitsandbytes >= 0.41.0",
"einops >= 0.6.1",
"einops-exts >= 0.0.4",
"timm >= 0.9.12",
"sentencepiece >= 0.1.99",
] ]


[dependency-groups] [dependency-groups]


+ 85
- 39
node-hub/dora-ios-lidar/dora_ios_lidar/main.py View File

@@ -1,5 +1,6 @@
"""TODO: Add docstring.""" """TODO: Add docstring."""


import os
from threading import Event from threading import Event


import cv2 import cv2
@@ -7,6 +8,11 @@ import numpy as np
import pyarrow as pa import pyarrow as pa
from dora import Node from dora import Node
from record3d import Record3DStream from record3d import Record3DStream
from scipy.spatial.transform import Rotation

image_width = os.getenv("IMAGE_WIDTH")
image_height = os.getenv("IMAGE_HEIGHT")
ROTATE = os.getenv("ROTATE")




class DemoApp: class DemoApp:
@@ -54,6 +60,21 @@ class DemoApp:
[[coeffs.fx, 0, coeffs.tx], [0, coeffs.fy, coeffs.ty], [0, 0, 1]], [[coeffs.fx, 0, coeffs.tx], [0, coeffs.fy, coeffs.ty], [0, 0, 1]],
) )


def get_camera_pose(self):
"""Get Camera Pose."""
pose = self.session.get_camera_pose()
rot = Rotation.from_quat([pose.qx, pose.qy, pose.qz, pose.qw])
euler = rot.as_euler("xyz", degrees=False)

return [
pose.tx,
pose.ty,
pose.tz,
pose.qx,
euler[1],
euler[2],
]

def start_processing_stream(self): def start_processing_stream(self):
"""TODO: Add docstring.""" """TODO: Add docstring."""
node = Node() node = Node()
@@ -61,47 +82,72 @@ class DemoApp:
for event in node: for event in node:
if self.stop: if self.stop:
break break
if event["type"] == "INPUT":
if event["id"] == "TICK":
self.event.wait() # Wait for new frame to arrive

# Copy the newly arrived RGBD frame
depth = self.session.get_depth_frame()
rgb = self.session.get_rgb_frame()
intrinsic_mat = self.get_intrinsic_mat_from_coeffs(
self.session.get_intrinsic_mat(),
)


if depth.shape != rgb.shape:
rgb = cv2.resize(rgb, (depth.shape[1], depth.shape[0]))

node.send_output(
"image",
pa.array(rgb.ravel()),
metadata={
"encoding": "rgb8",
"width": rgb.shape[1],
"height": rgb.shape[0],
},
)

node.send_output(
"depth",
pa.array(depth.ravel().astype(np.float64())),
metadata={
"width": depth.shape[1],
"height": depth.shape[0],
"encoding": "CV_64F",
"focal": [
int(intrinsic_mat[0, 0]),
int(intrinsic_mat[1, 1]),
],
"resolution": [
int(intrinsic_mat[0, 2]),
int(intrinsic_mat[1, 2]),
],
},
if event["type"] == "INPUT":
self.event.wait() # Wait for new frame to arrive

# Copy the newly arrived RGBD frame
depth = self.session.get_depth_frame()
rgb = self.session.get_rgb_frame()
intrinsic_mat = self.get_intrinsic_mat_from_coeffs(
self.session.get_intrinsic_mat(),
)
# pose = self.get_camera_pose()

if depth.shape != rgb.shape:
rgb = cv2.resize(rgb, (depth.shape[1], depth.shape[0]))
if image_width is not None and image_height is not None:
f_0 = intrinsic_mat[0, 0] * (int(image_height) / rgb.shape[0])
f_1 = intrinsic_mat[1, 1] * (int(image_width) / rgb.shape[1])
r_0 = intrinsic_mat[0, 2] * (int(image_height) / rgb.shape[0])
r_1 = intrinsic_mat[1, 2] * (int(image_width) / rgb.shape[1])
if ROTATE == "ROTATE_90_CLOCKWISE":
rgb = cv2.rotate(rgb, cv2.ROTATE_90_CLOCKWISE)
depth = cv2.rotate(depth, cv2.ROTATE_90_CLOCKWISE)
rgb = cv2.resize(rgb, (int(image_width), int(image_height)))
depth = cv2.resize(
depth,
(int(image_width), int(image_height)),
interpolation=cv2.INTER_NEAREST,
) )
else:
f_0 = intrinsic_mat[0, 0]
f_1 = intrinsic_mat[1, 1]
r_0 = intrinsic_mat[0, 2]
r_1 = intrinsic_mat[1, 2]

node.send_output(
"image",
pa.array(rgb.ravel()),
metadata={
"encoding": "rgb8",
"width": rgb.shape[1],
"height": rgb.shape[0],
},
)

depth = (np.array(depth) * 1_000).astype(np.uint16)
depth = np.clip(depth, 0, 4095) # Clip depth to uint12
node.send_output(
"depth",
pa.array(depth.ravel()),
metadata={
"width": depth.shape[1],
"height": depth.shape[0],
"encoding": "mono16",
"focal": [
int(f_0),
int(f_1),
],
"resolution": [
int(r_0),
int(r_1),
],
# "roll": pose[3],
# "pitch": pose[4], # Adding 90 degrees to pitch
# "yaw": pose[5],
},
)


self.event.clear() self.event.clear()




+ 2
- 2
node-hub/dora-ios-lidar/pyproject.toml View File

@@ -1,13 +1,13 @@
[project] [project]
name = "dora-ios-lidar" name = "dora-ios-lidar"
version = "0.3.10"
version = "0.3.11"
authors = [{ name = "Your Name", email = "email@email.com" }] authors = [{ name = "Your Name", email = "email@email.com" }]
description = "dora-ios-lidar" description = "dora-ios-lidar"
license = { text = "MIT" } license = { text = "MIT" }
readme = "README.md" readme = "README.md"
requires-python = ">=3.8" requires-python = ">=3.8"


dependencies = ["dora-rs >= 0.3.9", "opencv-python>=4.11.0.86", "record3d>=1.4"]
dependencies = ["dora-rs >= 0.3.9", "opencv-python>=4.11.0.86", "record3d>=1.4", "scipy"]


[dependency-groups] [dependency-groups]
dev = ["pytest >=8.1.1", "ruff >=0.9.1"] dev = ["pytest >=8.1.1", "ruff >=0.9.1"]


+ 1
- 1
node-hub/dora-keyboard/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-keyboard" name = "dora-keyboard"
version = "0.3.10"
version = "0.3.11"
authors = [ authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" }, { name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" }, { name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },


+ 41
- 5
node-hub/dora-kit-car/README.md View File

@@ -1,10 +1,16 @@
# Chongyou Car Control
# dora-kit-car control


## Introduce ## Introduce


Control of the movement of the trolley by receiving texts
Dora Kit Car is a DORA node for controlling a differential-drive mobile robot to move forward/backward and turn left/right. Developed in Rust with Python API support.


## Usage
## Highlights

- Compatible with the ROS geometry_msgs/Twist.msg format, utilizing only:
- linear.x (positive: forward movement, negative: backward movement)
- angular.z (positive: left turn, negative: right turn)

## Raw Message Definition


Accepts an array of six f64's Accepts an array of six f64's


@@ -14,6 +20,36 @@ see [https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Twist.html](https


## Environment ## Environment


The default serial port is `/dev/ttyUSB0`
Adds an environment variable `SERIAL_PORT` to specify the serial port for the car device, with `/dev/ttyUSB0` as the default value

## Demo Video

[![Dora Kit Car Video](https://yt3.ggpht.com/92FGXQL59VsiXim13EJQek4IB7CRI-9SjmW3LhH8PFY16oBXqKUvkKhg5UdzLiGCOmoSuTvdpQxIuw=s640-rw-nd-v1)](https://youtu.be/B7zGHtRUZSo)

## Getting Started

```yaml
nodes:
- id: keyboard-listener # Run on car
build: pip install dora-keyboard
path: dora-keyboard
inputs:
tick: dora/timer/millis/10
outputs:
- twist # for example [2.0,0.0,0.0,0.0,0.0,1.0]

- id: car
build: pip install dora-kit-car
path: dora-kit-car
inputs:
keyboard: keyboard-listener/twist
env:
SERIAL_PORT: /dev/ttyUSB0

```

## License

The MIT License (MIT)


Added definition of default serial port number. Can additionally define the environment variable `SERIAL_PORT`
Copyright (c) 2024-present, Leon

+ 1
- 1
node-hub/dora-kokoro-tts/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-kokoro-tts" name = "dora-kokoro-tts"
version = "0.3.10"
version = "0.3.11"
authors = [{ name = "Your Name", email = "email@email.com" }] authors = [{ name = "Your Name", email = "email@email.com" }]
description = "dora-kokoro-tts" description = "dora-kokoro-tts"
license = { text = "MIT" } license = { text = "MIT" }


+ 0
- 3
node-hub/dora-llama-cpp-python/pyproject.toml View File

@@ -9,9 +9,6 @@ requires-python = ">=3.9"


dependencies = [ dependencies = [
"dora-rs >= 0.3.9", "dora-rs >= 0.3.9",
"torch == 2.4.0",
"torchvision >= 0.19",
"torchaudio >= 2.1.0",
"opencv-python >= 4.1.1", "opencv-python >= 4.1.1",
"modelscope >= 1.18.1", "modelscope >= 1.18.1",
"huggingface-hub>=0.29.0", "huggingface-hub>=0.29.0",


+ 3
- 2
node-hub/dora-magma/pyproject.toml View File

@@ -13,8 +13,9 @@ authors = [{ name = "Munish Mummadi", email = "moneymindedmunish1@gmail.com" }]
dependencies = [ dependencies = [
"dora-rs >= 0.3.9", "dora-rs >= 0.3.9",
"numpy < 2", "numpy < 2",
"torch >= 2.4.0",
"torchvision >= 0.19",
"torch >= 2.7.0",
"torchvision >= 0.22",
"torchaudio >= 2.7.0",
"transformers>=4.45", "transformers>=4.45",
"opencv-python >= 4.1.1", "opencv-python >= 4.1.1",
"accelerate>=1.5.1", "accelerate>=1.5.1",


+ 40
- 0
node-hub/dora-mediapipe/README.md View File

@@ -0,0 +1,40 @@
# dora-mediapipe

## Getting started

- Install it with uv:

```bash
uv venv -p 3.11 --seed
uv pip install -e .
```

## Contribution Guide

- Format with [ruff](https://docs.astral.sh/ruff/):

```bash
uv pip install ruff
uv run ruff check . --fix
```

- Lint with ruff:

```bash
uv run ruff check .
```

- Test with [pytest](https://github.com/pytest-dev/pytest)

```bash
uv pip install pytest
uv run pytest . # Test
```

## YAML Specification

## Examples

## License

dora-mediapipe's code are released under the MIT License

+ 13
- 0
node-hub/dora-mediapipe/dora_mediapipe/__init__.py View File

@@ -0,0 +1,13 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory
readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.md")

# Read the content of the README file
try:
with open(readme_path, encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."

+ 6
- 0
node-hub/dora-mediapipe/dora_mediapipe/__main__.py View File

@@ -0,0 +1,6 @@
"""TODO: Add docstring."""

from .main import main

if __name__ == "__main__":
main()

+ 136
- 0
node-hub/dora-mediapipe/dora_mediapipe/main.py View File

@@ -0,0 +1,136 @@
"""TODO: Add docstring."""

import cv2
import mediapipe as mp
import numpy as np
import pyarrow as pa
from dora import Node

# Initialiser MediaPipe Pose
mp_pose = mp.solutions.pose
pose = mp_pose.Pose()
mp_draw = mp.solutions.drawing_utils


def get_3d_coordinates(landmark, depth_frame, w, h, resolution, focal_length):
"""Convert 2D landmark coordinates to 3D coordinates."""
cx, cy = int(landmark.x * w), int(landmark.y * h)
if 0 < cx < w and 0 < cy < h:
depth = depth_frame[cy, cx] / 1_000.0
if depth > 0:
fx, fy = focal_length
ppx, ppy = resolution
x = (cy - ppy) * depth / fy
y = (cx - ppx) * depth / fx

# Convert to right-handed coordinate system
return [x, -y, depth]
return [0, 0, 0]


def get_image(event: dict) -> np.ndarray:
"""Convert the image from the event to a numpy array.

Args:
event (dict): The event containing the image data.

"""
storage = event["value"]
metadata = event["metadata"]
encoding = metadata["encoding"]
width = metadata["width"]
height = metadata["height"]

if (
encoding == "bgr8"
or encoding == "rgb8"
or encoding in ["jpeg", "jpg", "jpe", "bmp", "webp", "png"]
):
channels = 3
storage_type = np.uint8
else:
raise RuntimeError(f"Unsupported image encoding: {encoding}")

if encoding == "bgr8":
frame = (
storage.to_numpy().astype(storage_type).reshape((height, width, channels))
)
frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB)
elif encoding == "rgb8":
frame = (
storage.to_numpy().astype(storage_type).reshape((height, width, channels))
)
elif encoding in ["jpeg", "jpg", "jpe", "bmp", "webp", "png"]:
storage = storage.to_numpy()
frame = cv2.imdecode(storage, cv2.IMREAD_COLOR)
frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB)
else:
raise RuntimeError(f"Unsupported image encoding: {encoding}")
return frame


def main():
"""TODO: Add docstring."""
node = Node()
depth = None
focal_length = None
resolution = None

for event in node:
if event["type"] == "INPUT":
event_id = event["id"]
if "image" in event_id:
rgb_image = get_image(event)
width = rgb_image.shape[1]
height = rgb_image.shape[0]
pose_results = pose.process(rgb_image)
if pose_results.pose_landmarks:
values = pose_results.pose_landmarks.landmark
values = np.array(
[
[landmark.x * width, landmark.y * height]
for landmark in pose_results.pose_landmarks.landmark
]
)
# Warning: Make sure to add my_output_id and my_input_id within the dataflow.
node.send_output(
output_id="points2d",
data=pa.array(values.ravel()),
metadata={},
)
if depth is not None:
values = np.array(
[
get_3d_coordinates(
landmark,
depth,
width,
height,
resolution,
focal_length,
)
for landmark in pose_results.pose_landmarks.landmark
]
)
# Warning: Make sure to add my_output_id and my_input_id within the dataflow.
node.send_output(
output_id="points3d",
data=pa.array(values.ravel()),
metadata={},
)

else:
print("No pose landmarks detected.")
elif "depth" in event_id:
metadata = event["metadata"]
_encoding = metadata["encoding"]
width = metadata["width"]
height = metadata["height"]
focal_length = metadata["focal_length"]
resolution = metadata["resolution"]

depth = event["value"].to_numpy().reshape((height, width))


if __name__ == "__main__":
main()

+ 25
- 0
node-hub/dora-mediapipe/pyproject.toml View File

@@ -0,0 +1,25 @@
[project]
name = "dora-mediapipe"
version = "0.0.0"
authors = [{ name = "Your Name", email = "email@email.com" }]
description = "dora-mediapipe"
license = { text = "MIT" }
readme = "README.md"
requires-python = ">=3.8"

dependencies = [
"dora-rs >= 0.3.9",
"mediapipe>=0.10.14",
]

[dependency-groups]
dev = ["pytest >=8.1.1", "ruff >=0.9.1"]

[project.scripts]
dora-mediapipe = "dora_mediapipe.main:main"

[tool.ruff.lint]
extend-select = [
"D", # pydocstyle
"UP"
]

+ 13
- 0
node-hub/dora-mediapipe/tests/test_dora_mediapipe.py View File

@@ -0,0 +1,13 @@
"""Test module for dora_mediapipe package."""

import pytest


def test_import_main():
"""Test importing and running the main function."""
from dora_mediapipe.main import main

# Check that everything is working, and catch Dora RuntimeError
# as we're not running in a Dora dataflow.
with pytest.raises(RuntimeError):
main()

+ 2252
- 0
node-hub/dora-mediapipe/uv.lock
File diff suppressed because it is too large
View File


+ 1
- 1
node-hub/dora-microphone/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-microphone" name = "dora-microphone"
version = "0.3.10"
version = "0.3.11"
authors = [ authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" }, { name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" }, { name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },


+ 1
- 1
node-hub/dora-object-to-pose/Cargo.toml View File

@@ -1,6 +1,6 @@
[package] [package]
name = "dora-object-to-pose" name = "dora-object-to-pose"
version = "0.3.10"
version = "0.3.11"
edition = "2021" edition = "2021"


# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html


+ 0
- 2
node-hub/dora-object-to-pose/pyproject.toml View File

@@ -8,8 +8,6 @@ dynamic = ["version"]
license = { text = "MIT" } license = { text = "MIT" }
requires-python = ">=3.8" requires-python = ">=3.8"


dependencies = ["maturin>=1.8.2"]

scripts = { "dora-object-to-pose" = "dora_object_to_pose:py_main" } scripts = { "dora-object-to-pose" = "dora_object_to_pose:py_main" }


[tool.maturin] [tool.maturin]


+ 56
- 45
node-hub/dora-object-to-pose/src/lib.rs View File

@@ -1,7 +1,7 @@
use core::f32; use core::f32;
use dora_node_api::{ use dora_node_api::{
arrow::{ arrow::{
array::{AsArray, Float64Array, UInt8Array},
array::{AsArray, UInt16Array, UInt8Array},
datatypes::{Float32Type, Int64Type}, datatypes::{Float32Type, Int64Type},
}, },
dora_core::config::DataId, dora_core::config::DataId,
@@ -11,49 +11,58 @@ use eyre::Result;
use std::collections::HashMap; use std::collections::HashMap;


fn points_to_pose(points: &[(f32, f32, f32)]) -> Vec<f32> { fn points_to_pose(points: &[(f32, f32, f32)]) -> Vec<f32> {
let (_x, _y, _z, sum_xy, sum_x2, sum_y2, n, x_min, x_max, y_min, y_max, z_min, z_max) =
points.iter().fold(
let (
_sum_x,
_sum_y,
sum_z,
sum_xy,
sum_x2,
sum_y2,
n,
x_min,
x_max,
y_min,
y_max,
_z_min,
_z_max,
) = points.iter().fold(
(
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, -10.0, 10.0, -10.0, 10., -10.0,
),
|(
acc_x,
acc_y,
acc_z,
acc_xy,
acc_x2,
acc_y2,
acc_n,
acc_x_min,
acc_x_max,
acc_y_min,
acc_y_max,
acc_z_min,
acc_z_max,
),
(x, y, z)| {
( (
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, -10.0, 10.0, -10.0, 10., -10.0,
),
|(
acc_x,
acc_y,
acc_z,
acc_xy,
acc_x2,
acc_y2,
acc_n,
acc_x_min,
acc_x_max,
acc_y_min,
acc_y_max,
acc_z_min,
acc_z_max,
),
(x, y, z)| {
(
acc_x + x,
acc_y + y,
acc_z + z,
acc_xy + x * y,
acc_x2 + x * x,
acc_y2 + y * y,
acc_n + 1.,
f32::min(acc_x_min, *x),
f32::max(acc_x_max, *x),
f32::min(acc_y_min, *y),
f32::max(acc_y_max, *y),
f32::min(acc_z_min, *z),
f32::max(acc_z_max, *z),
)
},
);
let (mean_x, mean_y, mean_z) = (
(x_max + x_min) / 2.,
(y_max + y_min) / 2.,
(z_max + z_min) / 2.,
acc_x + x,
acc_y + y,
acc_z + z,
acc_xy + x * y,
acc_x2 + x * x,
acc_y2 + y * y,
acc_n + 1.,
f32::min(acc_x_min, *x),
f32::max(acc_x_max, *x),
f32::min(acc_y_min, *y),
f32::max(acc_y_max, *y),
f32::min(acc_z_min, *z),
f32::max(acc_z_max, *z),
)
},
); );
let (mean_x, mean_y, mean_z) = ((x_max + x_min) / 2., (y_max + y_min) / 2., sum_z / n);


// Compute covariance and standard deviations // Compute covariance and standard deviations
let cov = sum_xy / n - mean_x * mean_y; let cov = sum_xy / n - mean_x * mean_y;
@@ -116,7 +125,8 @@ pub fn lib_main() -> Result<()> {
} else { } else {
vec![640, 480] vec![640, 480]
}; };
let buffer: &Float64Array = data.as_any().downcast_ref().unwrap();
let buffer: &UInt16Array = data.as_any().downcast_ref().unwrap();

depth_frame = Some(buffer.clone()); depth_frame = Some(buffer.clone());
} }
"masks" => { "masks" => {
@@ -150,11 +160,12 @@ pub fn lib_main() -> Result<()> {
let v = i as f32 / width as f32; // Calculate y-coordinate (v) let v = i as f32 / width as f32; // Calculate y-coordinate (v)


if let Some(z) = z { if let Some(z) = z {
let z = z as f32;
let z = (z as f32) / 1000.;
// Skip points that have empty depth or is too far away // Skip points that have empty depth or is too far away
if z == 0. || z > 20.0 { if z == 0. || z > 20.0 {
return; return;
} }

if data[i] { if data[i] {
let y = (u - resolution[0] as f32) * z let y = (u - resolution[0] as f32) * z
/ focal_length[0] as f32; / focal_length[0] as f32;
@@ -215,7 +226,7 @@ pub fn lib_main() -> Result<()> {
let v = i as f32 / width as f32; // Calculate y-coordinate (v) let v = i as f32 / width as f32; // Calculate y-coordinate (v)


if let Some(z) = z { if let Some(z) = z {
let z = z as f32;
let z = (z as f32) / 1000.;
// Skip points that have empty depth or is too far away // Skip points that have empty depth or is too far away
if z == 0. || z > 5.0 { if z == 0. || z > 5.0 {
return; return;


+ 1
- 1
node-hub/dora-openai-server/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-openai-server" name = "dora-openai-server"
version = "0.3.10"
version = "0.3.11"
authors = [ authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" }, { name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" }, { name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },


+ 14
- 12
node-hub/dora-opus/pyproject.toml View File

@@ -1,25 +1,27 @@
[project] [project]
name = "dora-opus" name = "dora-opus"
version = "0.3.10"
version = "0.3.11"
description = "Dora Node for Text translating using Opus" description = "Dora Node for Text translating using Opus"
authors = [ authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },
{ name = "Félix Huang", email = "felix.huang.net@gmail.com" },
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },
{ name = "Félix Huang", email = "felix.huang.net@gmail.com" },
] ]


license = { text = "MIT" } license = { text = "MIT" }
readme = "README.md" readme = "README.md"
requires-python = ">=3.8"
requires-python = ">=3.9"


dependencies = [ dependencies = [
"dora-rs >= 0.3.9",
"numpy < 2.0.0",

"transformers >= 4.45",
"modelscope >= 1.18.1",
"sentencepiece >= 0.1.99",
"torch >= 2.2.0",
"dora-rs >= 0.3.9",
"numpy < 2.0.0",
"transformers >= 4.45",
"modelscope >= 1.18.1",
"sentencepiece >= 0.1.99",
"torch >= 2.7.0",
"torchvision >= 0.22",
"torchaudio >= 2.7.0",
"sacremoses>=0.1.1",
] ]


[dependency-groups] [dependency-groups]


+ 4
- 5
node-hub/dora-opus/tests/test_translate.py View File

@@ -1,12 +1,11 @@
"""TODO: Add docstring.""" """TODO: Add docstring."""


import pytest



def test_import_main(): def test_import_main():
"""TODO: Add docstring.""" """TODO: Add docstring."""
from dora_opus.main import main
pass # OPUS is no longer maintained in favor of dora-phi4
# from dora_opus.main import main


# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.
with pytest.raises(RuntimeError):
main()
# nwith pytest.raises(RuntimeError):
# nmain()

+ 1175
- 0
node-hub/dora-opus/uv.lock
File diff suppressed because it is too large
View File


+ 2
- 0
node-hub/dora-outtetts/README.md View File

@@ -1,5 +1,7 @@
# dora-outtetts # dora-outtetts


> dora-outtetts is no longer maintained in favor of dora-kokorotts

## Getting started ## Getting started


- Install it with pip: - Install it with pip:


+ 4
- 14
node-hub/dora-outtetts/dora_outtetts/tests/test_main.py View File

@@ -2,26 +2,16 @@


import os import os


import pytest

from dora_outtetts.main import load_interface, main

CI = os.getenv("CI", "false") in ["True", "true"] CI = os.getenv("CI", "false") in ["True", "true"]




def test_import_main(): def test_import_main():
"""TODO: Add docstring.""" """TODO: Add docstring."""
with pytest.raises(RuntimeError):
main([])
pass # Outetts is no longer maintained in favor of dora-kokorotts
# with pytest.raises(RuntimeError):
# main([])




def test_load_interface(): def test_load_interface():
"""TODO: Add docstring.""" """TODO: Add docstring."""
try:
interface = load_interface()
except RuntimeError:
# Error raised by MPS out of memory.
if CI:
interface = "ok"

assert interface is not None
pass

+ 1
- 1
node-hub/dora-outtetts/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-outtetts" name = "dora-outtetts"
version = "0.3.10"
version = "0.3.11"
authors = [] authors = []
description = "dora-outtetts" description = "dora-outtetts"
license = { text = "MIT" } license = { text = "MIT" }


+ 9
- 11
node-hub/dora-parler/pyproject.toml View File

@@ -1,9 +1,9 @@
[project] [project]
name = "dora-parler" name = "dora-parler"
version = "0.3.10"
version = "0.3.11"
authors = [ authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },
] ]
description = "Dora Node for Text to speech with dora Parler-TTS" description = "Dora Node for Text to speech with dora Parler-TTS"
license = { text = "MIT" } license = { text = "MIT" }
@@ -11,14 +11,12 @@ readme = "README.md"
requires-python = ">=3.8" requires-python = ">=3.8"


dependencies = [ dependencies = [
"dora-rs >= 0.3.9",
"numpy < 2.0.0",
"torch >= 2.2.0",
"torchaudio >= 2.2.2",
"sentencepiece >= 0.1.99",
"pyaudio >= 0.2.14",
"modelscope >= 1.18.1",
"transformers >=4.48.0,<=4.48.0",
"dora-rs >= 0.3.9",
"numpy < 2.0.0",
"sentencepiece >= 0.1.99",
"pyaudio >= 0.2.14",
"modelscope >= 1.18.1",
"transformers >=4.48.0,<=4.48.0",
] ]


[tool.uv.sources] [tool.uv.sources]


+ 1
- 1
node-hub/dora-phi4/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-phi4" name = "dora-phi4"
version = "0.0.0"
version = "0.3.11"
authors = [{ name = "Somay", email = "ssomay2002@gmail.com" }] authors = [{ name = "Somay", email = "ssomay2002@gmail.com" }]
description = "DORA node for Phi-4 multimodal model" description = "DORA node for Phi-4 multimodal model"
license = { text = "MIT" } license = { text = "MIT" }


+ 1
- 1
node-hub/dora-piper/pyproject.toml View File

@@ -1,6 +1,6 @@
[project] [project]
name = "dora-piper" name = "dora-piper"
version = "0.3.10"
version = "0.3.11"
authors = [{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" }] authors = [{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" }]
description = "Dora Node for using Agilex piper" description = "Dora Node for using Agilex piper"
license = { text = "MIT" } license = { text = "MIT" }


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save