Browse Source

Fixed D100 issues

tags/test-git
Mati-ur-rehman-017 10 months ago
parent
commit
3332c0a1ce
100 changed files with 333 additions and 2 deletions
  1. +3
    -0
      .vscode/settings.json
  2. +2
    -0
      apis/python/node/dora/cuda.py
  3. +15
    -0
      apis/python/node/generate_stubs.py
  4. +2
    -0
      binaries/cli/src/template/python/__node-name__/__node_name__/__init__.py
  5. +2
    -0
      binaries/cli/src/template/python/__node-name__/__node_name__/__main__.py
  6. +3
    -0
      binaries/cli/src/template/python/__node-name__/__node_name__/main.py
  7. +2
    -0
      binaries/cli/src/template/python/operator/operator-template.py
  8. +2
    -0
      examples/cuda-benchmark/demo_receiver.py
  9. +2
    -0
      examples/cuda-benchmark/demo_sender.py
  10. +3
    -1
      examples/cuda-benchmark/helper.py
  11. +2
    -0
      examples/cuda-benchmark/receiver.py
  12. +2
    -0
      examples/cuda-benchmark/sender.py
  13. +4
    -0
      examples/openai-server/openai_api_client.py
  14. +6
    -0
      examples/piper/convert.py
  15. +2
    -0
      examples/piper/dummy_inference.py
  16. +2
    -0
      examples/piper/dummy_inference_2.py
  17. +2
    -0
      examples/piper/post_process_action.py
  18. +3
    -0
      examples/piper/record.py
  19. +2
    -0
      examples/piper/replay.py
  20. +4
    -0
      examples/python-operator-dataflow/file_saver_op.py
  21. +2
    -0
      examples/python-operator-dataflow/keyboard_op.py
  22. +5
    -0
      examples/python-operator-dataflow/llm_op.py
  23. +3
    -0
      examples/python-operator-dataflow/microphone_op.py
  24. +3
    -0
      examples/python-operator-dataflow/object_detection.py
  25. +4
    -0
      examples/python-operator-dataflow/plot.py
  26. +6
    -0
      examples/python-operator-dataflow/sentence_transformers_op.py
  27. +2
    -0
      examples/python-operator-dataflow/utils.py
  28. +5
    -0
      examples/python-operator-dataflow/webcam.py
  29. +3
    -0
      examples/python-operator-dataflow/whisper_op.py
  30. +2
    -0
      examples/python-ros2-dataflow/control_node.py
  31. +2
    -0
      examples/python-ros2-dataflow/random_turtle.py
  32. +2
    -0
      examples/reachy2/parse_bbox.py
  33. +2
    -0
      examples/reachy2/parse_bbox_minimal.py
  34. +6
    -1
      examples/reachy2/pick_place.py
  35. +4
    -0
      examples/reachy2/state_machine.py
  36. +4
    -0
      examples/translation/pretty_print.py
  37. +2
    -0
      libraries/extensions/ros2-bridge/python/test_utils.py
  38. +2
    -0
      node-hub/dora-argotranslate/dora_argotranslate/__init__.py
  39. +3
    -0
      node-hub/dora-argotranslate/dora_argotranslate/main.py
  40. +3
    -0
      node-hub/dora-argotranslate/tests/test_translate.py
  41. +2
    -0
      node-hub/dora-distil-whisper/dora_distil_whisper/__init__.py
  42. +5
    -0
      node-hub/dora-distil-whisper/dora_distil_whisper/main.py
  43. +3
    -0
      node-hub/dora-distil-whisper/tests/test_distil_whisper.py
  44. +2
    -0
      node-hub/dora-echo/dora_echo/__init__.py
  45. +3
    -0
      node-hub/dora-echo/dora_echo/main.py
  46. +3
    -0
      node-hub/dora-echo/tests/test_dora_echo.py
  47. +2
    -0
      node-hub/dora-internvl/dora_internvl/__init__.py
  48. +7
    -0
      node-hub/dora-internvl/dora_internvl/main.py
  49. +3
    -0
      node-hub/dora-internvl/tests/test_dora_internvl.py
  50. +2
    -0
      node-hub/dora-ios-lidar/dora_ios_lidar/__init__.py
  51. +2
    -0
      node-hub/dora-ios-lidar/dora_ios_lidar/__main__.py
  52. +10
    -0
      node-hub/dora-ios-lidar/dora_ios_lidar/main.py
  53. +3
    -0
      node-hub/dora-ios-lidar/tests/test_dora_ios_lidar.py
  54. +2
    -0
      node-hub/dora-keyboard/dora_keyboard/__init__.py
  55. +3
    -0
      node-hub/dora-keyboard/dora_keyboard/main.py
  56. +3
    -0
      node-hub/dora-keyboard/tests/test_keyboard.py
  57. +2
    -0
      node-hub/dora-kokoro-tts/dora_kokoro_tts/__init__.py
  58. +2
    -0
      node-hub/dora-kokoro-tts/dora_kokoro_tts/__main__.py
  59. +3
    -0
      node-hub/dora-kokoro-tts/dora_kokoro_tts/main.py
  60. +3
    -0
      node-hub/dora-kokoro-tts/tests/test_dora_kokoro_tts.py
  61. +2
    -0
      node-hub/dora-microphone/dora_microphone/__init__.py
  62. +4
    -0
      node-hub/dora-microphone/dora_microphone/main.py
  63. +3
    -0
      node-hub/dora-microphone/tests/test_microphone.py
  64. +2
    -0
      node-hub/dora-openai-server/dora_openai_server/__init__.py
  65. +12
    -0
      node-hub/dora-openai-server/dora_openai_server/main.py
  66. +3
    -0
      node-hub/dora-openai-server/tests/test_dora_openai_server.py
  67. +2
    -0
      node-hub/dora-opus/dora_opus/__init__.py
  68. +4
    -0
      node-hub/dora-opus/dora_opus/main.py
  69. +3
    -0
      node-hub/dora-opus/tests/test_translate.py
  70. +2
    -0
      node-hub/dora-outtetts/dora_outtetts/__init__.py
  71. +2
    -0
      node-hub/dora-outtetts/dora_outtetts/__main__.py
  72. +5
    -0
      node-hub/dora-outtetts/dora_outtetts/main.py
  73. +4
    -0
      node-hub/dora-outtetts/dora_outtetts/tests/test_main.py
  74. +2
    -0
      node-hub/dora-parler/dora_parler/__init__.py
  75. +10
    -0
      node-hub/dora-parler/dora_parler/main.py
  76. +3
    -0
      node-hub/dora-parler/tests/test_parler_tts.py
  77. +2
    -0
      node-hub/dora-piper/dora_piper/__init__.py
  78. +3
    -0
      node-hub/dora-piper/dora_piper/main.py
  79. +3
    -0
      node-hub/dora-piper/tests/test_piper.py
  80. +2
    -0
      node-hub/dora-pyaudio/dora_pyaudio/__init__.py
  81. +2
    -0
      node-hub/dora-pyaudio/dora_pyaudio/__main__.py
  82. +2
    -0
      node-hub/dora-pyaudio/dora_pyaudio/main.py
  83. +3
    -0
      node-hub/dora-pyaudio/tests/test_dora_pyaudio.py
  84. +2
    -0
      node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/__init__.py
  85. +13
    -0
      node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/main.py
  86. +3
    -0
      node-hub/dora-pyorbbecksdk/tests/test_pyorbbecksdk.py
  87. +2
    -0
      node-hub/dora-pyrealsense/dora_pyrealsense/__init__.py
  88. +3
    -0
      node-hub/dora-pyrealsense/dora_pyrealsense/main.py
  89. +3
    -0
      node-hub/dora-pyrealsense/tests/test_dora_pyrealsense.py
  90. +2
    -0
      node-hub/dora-qwen/dora_qwen/__init__.py
  91. +2
    -0
      node-hub/dora-qwen/dora_qwen/__main__.py
  92. +7
    -0
      node-hub/dora-qwen/dora_qwen/main.py
  93. +3
    -0
      node-hub/dora-qwen/tests/test_dora_qwen.py
  94. +2
    -0
      node-hub/dora-qwen2-5-vl/dora_qwen2_5_vl/__init__.py
  95. +3
    -0
      node-hub/dora-qwen2-5-vl/dora_qwen2_5_vl/main.py
  96. +3
    -0
      node-hub/dora-qwen2-5-vl/tests/test_dora_qwenvl.py
  97. +2
    -0
      node-hub/dora-qwenvl/dora_qwenvl/__init__.py
  98. +3
    -0
      node-hub/dora-qwenvl/dora_qwenvl/main.py
  99. +3
    -0
      node-hub/dora-qwenvl/tests/test_dora_qwenvl.py
  100. +2
    -0
      node-hub/dora-rdt-1b/dora_rdt_1b/__init__.py

+ 3
- 0
.vscode/settings.json View File

@@ -0,0 +1,3 @@
{
"cmake.sourceDirectory": "/home/granger/coding/dora/examples/cmake-dataflow"
}

+ 2
- 0
apis/python/node/dora/cuda.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import pyarrow as pa

# Make sure to install torch with cuda


+ 15
- 0
apis/python/node/generate_stubs.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import argparse
import ast
import importlib
@@ -11,6 +13,7 @@ from typing import Any, Dict, List, Optional, Set, Tuple, Union


def path_to_type(*elements: str) -> ast.AST:
"""TODO: Add docstring."""
base: ast.AST = ast.Name(id=elements[0], ctx=ast.Load())
for e in elements[1:]:
base = ast.Attribute(value=base, attr=e, ctx=ast.Load())
@@ -66,6 +69,7 @@ BUILTINS: Dict[str, Union[None, Tuple[List[ast.AST], ast.AST]]] = {


def module_stubs(module: Any) -> ast.Module:
"""TODO: Add docstring."""
types_to_import = {"typing"}
classes = []
functions = []
@@ -100,6 +104,7 @@ def module_stubs(module: Any) -> ast.Module:
def class_stubs(
cls_name: str, cls_def: Any, element_path: List[str], types_to_import: Set[str],
) -> ast.ClassDef:
"""TODO: Add docstring."""
attributes: List[ast.AST] = []
methods: List[ast.AST] = []
magic_methods: List[ast.AST] = []
@@ -200,6 +205,7 @@ def data_descriptor_stub(
element_path: List[str],
types_to_import: Set[str],
) -> Union[Tuple[ast.AnnAssign, ast.Expr], Tuple[ast.AnnAssign]]:
"""TODO: Add docstring."""
annotation = None
doc_comment = None

@@ -231,6 +237,7 @@ def function_stub(
*,
in_class: bool,
) -> ast.FunctionDef:
"""TODO: Add docstring."""
body: List[ast.AST] = []
doc = inspect.getdoc(fn_def)
if doc is not None:
@@ -261,6 +268,7 @@ def arguments_stub(
element_path: List[str],
types_to_import: Set[str],
) -> ast.arguments:
"""TODO: Add docstring."""
real_parameters: Mapping[str, inspect.Parameter] = inspect.signature(
callable_def,
).parameters
@@ -360,6 +368,7 @@ def arguments_stub(
def returns_stub(
callable_name: str, doc: str, element_path: List[str], types_to_import: Set[str],
) -> Optional[ast.AST]:
"""TODO: Add docstring."""
m = re.findall(r"^ *:rtype: *([^\n]*) *$", doc, re.MULTILINE)
if len(m) == 0:
builtin = BUILTINS.get(callable_name)
@@ -379,6 +388,7 @@ def returns_stub(
def convert_type_from_doc(
type_str: str, element_path: List[str], types_to_import: Set[str],
) -> ast.AST:
"""TODO: Add docstring."""
type_str = type_str.strip()
return parse_type_to_ast(type_str, element_path, types_to_import)

@@ -387,6 +397,7 @@ def parse_type_to_ast(
type_str: str, element_path: List[str], types_to_import: Set[str],
) -> ast.AST:
# let's tokenize
"""TODO: Add docstring."""
tokens = []
current_token = ""
for c in type_str:
@@ -416,6 +427,7 @@ def parse_type_to_ast(
# then it's easy
def parse_sequence(sequence: List[Any]) -> ast.AST:
# we split based on "or"
"""TODO: Add docstring."""
or_groups: List[List[str]] = [[]]
print(sequence)
# TODO: Fix sequence
@@ -467,6 +479,7 @@ def parse_type_to_ast(
def concatenated_path_to_type(
path: str, element_path: List[str], types_to_import: Set[str],
) -> ast.AST:
"""TODO: Add docstring."""
parts = path.split(".")
if any(not p for p in parts):
raise ValueError(
@@ -478,6 +491,7 @@ def concatenated_path_to_type(


def build_doc_comment(doc: str) -> Optional[ast.Expr]:
"""TODO: Add docstring."""
lines = [line.strip() for line in doc.split("\n")]
clean_lines = []
for line in lines:
@@ -489,6 +503,7 @@ def build_doc_comment(doc: str) -> Optional[ast.Expr]:


def format_with_ruff(file: str) -> None:
"""TODO: Add docstring."""
subprocess.check_call(["python", "-m", "ruff", "format", file])




+ 2
- 0
binaries/cli/src/template/python/__node-name__/__node_name__/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 2
- 0
binaries/cli/src/template/python/__node-name__/__node_name__/__main__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from .main import main

if __name__ == "__main__":


+ 3
- 0
binaries/cli/src/template/python/__node-name__/__node_name__/main.py View File

@@ -1,8 +1,11 @@
"""TODO: Add docstring."""

import pyarrow as pa
from dora import Node


def main():
"""TODO: Add docstring."""
node = Node()

for event in node:


+ 2
- 0
binaries/cli/src/template/python/operator/operator-template.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from dora import DoraStatus




+ 2
- 0
examples/cuda-benchmark/demo_receiver.py View File

@@ -1,4 +1,6 @@
#!/usr/bin/env python
"""TODO: Add docstring."""


import os
import time


+ 2
- 0
examples/cuda-benchmark/demo_sender.py View File

@@ -1,4 +1,6 @@
#!/usr/bin/env python
"""TODO: Add docstring."""


import os
import time


+ 3
- 1
examples/cuda-benchmark/helper.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import csv
import os
import platform
@@ -27,7 +29,7 @@ LOG_HEADER = [


def record_results(name, current_size, latencies):
"""TODO: Add docstring."""
avg_latency = np.array(latencies).mean()

# Calculate Q1 (25th percentile), median (50th percentile), and Q3 (75th percentile)


+ 2
- 0
examples/cuda-benchmark/receiver.py View File

@@ -1,4 +1,6 @@
#!/usr/bin/env python
"""TODO: Add docstring."""


import os
import time


+ 2
- 0
examples/cuda-benchmark/sender.py View File

@@ -1,4 +1,6 @@
#!/usr/bin/env python
"""TODO: Add docstring."""


import os
import time


+ 4
- 0
examples/openai-server/openai_api_client.py View File

@@ -1,9 +1,12 @@
"""TODO: Add docstring."""

from openai import OpenAI

client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy_api_key")


def test_list_models():
"""TODO: Add docstring."""
try:
models = client.models.list()
print("Available models:")
@@ -14,6 +17,7 @@ def test_list_models():


def test_chat_completion(user_input):
"""TODO: Add docstring."""
try:
response = client.chat.completions.create(
model="gpt-3.5-turbo",


+ 6
- 0
examples/piper/convert.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import numpy as np
from scipy.spatial.transform import Rotation as R

@@ -34,12 +36,14 @@ def convert_rotation_matrix_to_euler(rotmat):


def normalize_vector(v):
"""TODO: Add docstring."""
v_mag = np.linalg.norm(v, axis=-1, keepdims=True)
v_mag = np.maximum(v_mag, 1e-8)
return v / v_mag


def cross_product(u, v):
"""TODO: Add docstring."""
i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1]
j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2]
k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0]
@@ -49,6 +53,7 @@ def cross_product(u, v):


def compute_rotation_matrix_from_ortho6d(ortho6d):
"""TODO: Add docstring."""
x_raw = ortho6d[:, 0:3]
y_raw = ortho6d[:, 3:6]

@@ -69,5 +74,6 @@ def compute_ortho6d_from_rotation_matrix(matrix):
# rotation matrix: [ | , |, | ]
# [ a1, a2, a3]
# [ | , |, | ]
"""TODO: Add docstring."""
ortho6d = matrix[:, :, :2].transpose(0, 2, 1).reshape(matrix.shape[0], -1)
return ortho6d

+ 2
- 0
examples/piper/dummy_inference.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from dora import Node

node = Node()


+ 2
- 0
examples/piper/dummy_inference_2.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import h5py
from dora import Node



+ 2
- 0
examples/piper/post_process_action.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from dora import Node

node = Node()


+ 3
- 0
examples/piper/record.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import datetime
import os

@@ -112,6 +114,7 @@ if not os.path.exists(DATA_DIR):


def save_data(data_dict, dataset_path, data_size):
"""TODO: Add docstring."""
with h5py.File(dataset_path + ".hdf5", "w", rdcc_nbytes=1024**2 * 2) as root:
root.attrs["sim"] = False
root.attrs["compress"] = False


+ 2
- 0
examples/piper/replay.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

import h5py


+ 4
- 0
examples/python-operator-dataflow/file_saver_op.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import pyarrow as pa
from dora import DoraStatus

@@ -6,6 +8,7 @@ class Operator:
"""Inferring object from images."""

def __init__(self):
"""TODO: Add docstring."""
self.last_file = ""
self.last_path = ""
self.last_netadata = None
@@ -15,6 +18,7 @@ class Operator:
dora_event,
send_output,
) -> DoraStatus:
"""TODO: Add docstring."""
if dora_event["type"] == "INPUT" and dora_event["id"] == "file":
input = dora_event["value"][0].as_py()



+ 2
- 0
examples/python-operator-dataflow/keyboard_op.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import pyarrow as pa
from dora import Node
from pynput import keyboard


+ 5
- 0
examples/python-operator-dataflow/llm_op.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import json
import os
import re
@@ -183,12 +185,14 @@ def replace_code_in_source(source_code, replacement_block: str):


class Operator:
"""TODO: Add docstring."""

def on_event(
self,
dora_event,
send_output,
) -> DoraStatus:
"""TODO: Add docstring."""
if dora_event["type"] == "INPUT" and dora_event["id"] == "code_modifier":
input = dora_event["value"][0].as_py()

@@ -261,6 +265,7 @@ class Operator:

# Generate output
# prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt))
"""TODO: Add docstring."""
input = tokenizer(prompt, return_tensors="pt")
input_ids = input.input_ids.cuda()



+ 3
- 0
examples/python-operator-dataflow/microphone_op.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import numpy as np
import pyarrow as pa
import sounddevice as sd
@@ -16,6 +18,7 @@ class Operator:
dora_event,
send_output,
) -> DoraStatus:
"""TODO: Add docstring."""
if dora_event["type"] == "INPUT":
audio_data = sd.rec(
int(SAMPLE_RATE * MAX_DURATION),


+ 3
- 0
examples/python-operator-dataflow/object_detection.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import numpy as np
import pyarrow as pa
from dora import DoraStatus
@@ -18,6 +20,7 @@ class Operator:
dora_event,
send_output,
) -> DoraStatus:
"""TODO: Add docstring."""
if dora_event["type"] == "INPUT":
frame = (
dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))


+ 4
- 0
examples/python-operator-dataflow/plot.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

import cv2
@@ -16,6 +18,7 @@ class Operator:
"""Plot image and bounding box."""

def __init__(self):
"""TODO: Add docstring."""
self.bboxs = []
self.buffer = ""
self.submitted = []
@@ -26,6 +29,7 @@ class Operator:
dora_event,
send_output,
):
"""TODO: Add docstring."""
if dora_event["type"] == "INPUT":
id = dora_event["id"]
value = dora_event["value"]


+ 6
- 0
examples/python-operator-dataflow/sentence_transformers_op.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
import sys

@@ -15,6 +17,7 @@ SHOULD_BE_INCLUDED = [

## Get all python files path in given directory
def get_all_functions(path):
"""TODO: Add docstring."""
raw = []
paths = []
for root, dirs, files in os.walk(path):
@@ -34,6 +37,7 @@ def get_all_functions(path):


def search(query_embedding, corpus_embeddings, paths, raw, k=5, file_extension=None):
"""TODO: Add docstring."""
cos_scores = util.cos_sim(query_embedding, corpus_embeddings)[0]
top_results = torch.topk(cos_scores, k=min(k, len(cos_scores)), sorted=True)
out = []
@@ -47,6 +51,7 @@ class Operator:

def __init__(self):
## TODO: Add a initialisation step
"""TODO: Add docstring."""
self.model = SentenceTransformer("BAAI/bge-large-en-v1.5")
self.encoding = []
# file directory
@@ -61,6 +66,7 @@ class Operator:
dora_event,
send_output,
) -> DoraStatus:
"""TODO: Add docstring."""
if dora_event["type"] == "INPUT":
if dora_event["id"] == "query":
values = dora_event["value"].to_pylist()


+ 2
- 0
examples/python-operator-dataflow/utils.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

LABELS = [
"person",
"bicycle",


+ 5
- 0
examples/python-operator-dataflow/webcam.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
import time

@@ -18,6 +20,7 @@ class Operator:
"""Sending image from webcam to the dataflow."""

def __init__(self):
"""TODO: Add docstring."""
self.video_capture = cv2.VideoCapture(CAMERA_INDEX)
self.start_time = time.time()
self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
@@ -29,6 +32,7 @@ class Operator:
dora_event: str,
send_output,
) -> DoraStatus:
"""TODO: Add docstring."""
event_type = dora_event["type"]
if event_type == "INPUT":
ret, frame = self.video_capture.read()
@@ -67,4 +71,5 @@ class Operator:
return DoraStatus.STOP

def __del__(self):
"""TODO: Add docstring."""
self.video_capture.release()

+ 3
- 0
examples/python-operator-dataflow/whisper_op.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import pyarrow as pa
import whisper
from dora import DoraStatus
@@ -13,6 +15,7 @@ class Operator:
dora_event,
send_output,
) -> DoraStatus:
"""TODO: Add docstring."""
if dora_event["type"] == "INPUT":
audio = dora_event["value"].to_numpy()
audio = whisper.pad_or_trim(audio)


+ 2
- 0
examples/python-ros2-dataflow/control_node.py View File

@@ -1,4 +1,6 @@
#!/usr/bin/env python
"""TODO: Add docstring."""


import random



+ 2
- 0
examples/python-ros2-dataflow/random_turtle.py View File

@@ -1,4 +1,6 @@
#!/usr/bin/env python
"""TODO: Add docstring."""


from dora import Node, Ros2Context, Ros2NodeOptions, Ros2QosPolicies



+ 2
- 0
examples/reachy2/parse_bbox.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import json
import os



+ 2
- 0
examples/reachy2/parse_bbox_minimal.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import json
import os



+ 6
- 1
examples/reachy2/pick_place.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

# State Machine
import json
import os
@@ -114,6 +116,7 @@ def extract_bboxes(json_text) -> (np.ndarray, np.ndarray):


def handle_speech(last_text):
"""TODO: Add docstring."""
global stop
words = last_text.lower().split()
if len(ACTIVATION_WORDS) > 0 and any(word in ACTIVATION_WORDS for word in words):
@@ -137,7 +140,7 @@ def handle_speech(last_text):


def wait_for_event(id, timeout=None, cache={}):
"""TODO: Add docstring."""
while True:
event = node.next(timeout=timeout)
if event is None:
@@ -156,6 +159,7 @@ def wait_for_event(id, timeout=None, cache={}):


def wait_for_events(ids: list[str], timeout=None, cache={}):
"""TODO: Add docstring."""
response = {}
while True:
event = node.next(timeout=timeout)
@@ -176,6 +180,7 @@ def wait_for_events(ids: list[str], timeout=None, cache={}):


def get_prompt():
"""TODO: Add docstring."""
text = wait_for_event(id="text", timeout=0.3)
if text is None:
return None


+ 4
- 0
examples/reachy2/state_machine.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

# State Machine
import os

@@ -76,6 +78,7 @@ l_release_closed_pose = [


def wait_for_event(id, timeout=None):
"""TODO: Add docstring."""
while True:
event = node.next(timeout=timeout)
if event["type"] == "INPUT":
@@ -87,6 +90,7 @@ def wait_for_event(id, timeout=None):


def wait_for_events(ids: list[str], timeout=None):
"""TODO: Add docstring."""
response = {}
while True:
event = node.next(timeout=timeout)


+ 4
- 0
examples/translation/pretty_print.py View File

@@ -1,14 +1,18 @@
"""TODO: Add docstring."""

import os
import shutil


def clear_screen():
# Clear the screen based on the operating system
"""TODO: Add docstring."""
os.system("cls" if os.name == "nt" else "clear")


def print_centered(texts):
# Get terminal size
"""TODO: Add docstring."""
terminal_size = shutil.get_terminal_size()

# Print newlines to move cursor to the middle vertically


+ 2
- 0
libraries/extensions/ros2-bridge/python/test_utils.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import numpy as np
import pyarrow as pa



+ 2
- 0
node-hub/dora-argotranslate/dora_argotranslate/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 3
- 0
node-hub/dora-argotranslate/dora_argotranslate/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

os.environ["ARGOS_DEVICE_TYPE"] = "auto"
@@ -22,6 +24,7 @@ argostranslate.package.install_from_path(package_to_install.download())


def main():
"""TODO: Add docstring."""
node = Node()
while True:
event = node.next()


+ 3
- 0
node-hub/dora-argotranslate/tests/test_translate.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_argotranslate.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-distil-whisper/dora_distil_whisper/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 5
- 0
node-hub/dora-distil-whisper/dora_distil_whisper/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
import sys
from pathlib import Path
@@ -12,6 +14,7 @@ TRANSLATE = bool(os.getenv("TRANSLATE", "False") in ["True", "true"])


def load_model():
"""TODO: Add docstring."""
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline

MODEL_NAME_OR_PATH = os.getenv("MODEL_NAME_OR_PATH", DEFAULT_PATH)
@@ -70,6 +73,7 @@ BAD_SENTENCES = [


def cut_repetition(text, min_repeat_length=4, max_repeat_length=50):
"""TODO: Add docstring."""
if len(text) == 0:
return text
# Check if the text is primarily Chinese (you may need to adjust this threshold)
@@ -103,6 +107,7 @@ def cut_repetition(text, min_repeat_length=4, max_repeat_length=50):


def main():
"""TODO: Add docstring."""
node = Node()

# For macos use mlx:


+ 3
- 0
node-hub/dora-distil-whisper/tests/test_distil_whisper.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_distil_whisper.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-echo/dora_echo/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 3
- 0
node-hub/dora-echo/dora_echo/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import argparse
import os

@@ -8,6 +10,7 @@ RUNNER_CI = True if os.getenv("CI") == "true" else False

def main():
# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
"""TODO: Add docstring."""
parser = argparse.ArgumentParser(description="Simple arrow sender")

parser.add_argument(


+ 3
- 0
node-hub/dora-echo/tests/test_dora_echo.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_echo.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-internvl/dora_internvl/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 7
- 0
node-hub/dora-internvl/dora_internvl/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

import numpy as np
@@ -14,6 +16,7 @@ IMAGENET_STD = (0.229, 0.224, 0.225)


def build_transform(input_size):
"""TODO: Add docstring."""
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
transform = T.Compose(
[
@@ -27,6 +30,7 @@ def build_transform(input_size):


def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
"""TODO: Add docstring."""
best_ratio_diff = float("inf")
best_ratio = (1, 1)
area = width * height
@@ -45,6 +49,7 @@ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_
def dynamic_preprocess(
image, min_num=1, max_num=12, image_size=448, use_thumbnail=False,
):
"""TODO: Add docstring."""
orig_width, orig_height = image.size
aspect_ratio = orig_width / orig_height

@@ -89,6 +94,7 @@ def dynamic_preprocess(


def load_image(image_array: np.array, input_size=448, max_num=12):
"""TODO: Add docstring."""
image = Image.fromarray(image_array).convert("RGB")
transform = build_transform(input_size=input_size)
images = dynamic_preprocess(
@@ -101,6 +107,7 @@ def load_image(image_array: np.array, input_size=448, max_num=12):

def main():
# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
"""TODO: Add docstring."""
model_path = os.getenv("MODEL", "OpenGVLab/InternVL2-1B")
device = "cuda:0" if torch.cuda.is_available() else "cpu"



+ 3
- 0
node-hub/dora-internvl/tests/test_dora_internvl.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_internvl.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-ios-lidar/dora_ios_lidar/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 2
- 0
node-hub/dora-ios-lidar/dora_ios_lidar/__main__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from .main import main

if __name__ == "__main__":


+ 10
- 0
node-hub/dora-ios-lidar/dora_ios_lidar/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from threading import Event

import cv2
@@ -8,7 +10,10 @@ from record3d import Record3DStream


class DemoApp:
"""TODO: Add docstring."""

def __init__(self):
"""TODO: Add docstring."""
self.event = Event()
self.session = None
self.DEVICE_TYPE__TRUEDEPTH = 0
@@ -20,10 +25,12 @@ class DemoApp:
self.event.set() # Notify the main thread to stop waiting and process new frame.

def on_stream_stopped(self):
"""TODO: Add docstring."""
self.stop = True
print("Stream stopped")

def connect_to_device(self, dev_idx):
"""TODO: Add docstring."""
print("Searching for devices")
devs = Record3DStream.get_connected_devices()
print(f"{len(devs)} device(s) found")
@@ -42,11 +49,13 @@ class DemoApp:
self.session.connect(dev) # Initiate connection and start capturing

def get_intrinsic_mat_from_coeffs(self, coeffs):
"""TODO: Add docstring."""
return np.array(
[[coeffs.fx, 0, coeffs.tx], [0, coeffs.fy, coeffs.ty], [0, 0, 1]],
)

def start_processing_stream(self):
"""TODO: Add docstring."""
node = Node()

for event in node:
@@ -98,6 +107,7 @@ class DemoApp:


def main():
"""TODO: Add docstring."""
app = DemoApp()
app.connect_to_device(dev_idx=0)
app.start_processing_stream()


+ 3
- 0
node-hub/dora-ios-lidar/tests/test_dora_ios_lidar.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_ios_lidar.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-keyboard/dora_keyboard/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 3
- 0
node-hub/dora-keyboard/dora_keyboard/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import pyarrow as pa
from dora import Node
from pynput import keyboard
@@ -5,6 +7,7 @@ from pynput.keyboard import Events


def main():
"""TODO: Add docstring."""
node = Node()

always_none = node.next(timeout=0.001) is None


+ 3
- 0
node-hub/dora-keyboard/tests/test_keyboard.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_keyboard.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-kokoro-tts/dora_kokoro_tts/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 2
- 0
node-hub/dora-kokoro-tts/dora_kokoro_tts/__main__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from .main import main

if __name__ == "__main__":


+ 3
- 0
node-hub/dora-kokoro-tts/dora_kokoro_tts/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import pyarrow as pa
from dora import Node
from kokoro import KPipeline
@@ -6,6 +8,7 @@ pipeline = KPipeline(lang_code="a") # <= make sure lang_code matches voice


def main():
"""TODO: Add docstring."""
node = Node()

for event in node:


+ 3
- 0
node-hub/dora-kokoro-tts/tests/test_dora_kokoro_tts.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_kokoro_tts.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-microphone/dora_microphone/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 4
- 0
node-hub/dora-microphone/dora_microphone/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
import time as tm

@@ -12,6 +14,7 @@ SAMPLE_RATE = int(os.getenv("SAMPLE_RATE", "16000"))

def main():
# Initialize buffer and recording flag
"""TODO: Add docstring."""
buffer = []
start_recording_time = tm.time()
node = Node()
@@ -20,6 +23,7 @@ def main():
finished = False

def callback(indata, frames, time, status):
"""TODO: Add docstring."""
nonlocal buffer, node, start_recording_time, finished

if tm.time() - start_recording_time > MAX_DURATION:


+ 3
- 0
node-hub/dora-microphone/tests/test_microphone.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_microphone.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-openai-server/dora_openai_server/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 12
- 0
node-hub/dora-openai-server/dora_openai_server/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import ast
import asyncio
from typing import List, Optional
@@ -13,11 +15,15 @@ app = FastAPI()


class ChatCompletionMessage(BaseModel):
"""TODO: Add docstring."""

role: str
content: str


class ChatCompletionRequest(BaseModel):
"""TODO: Add docstring."""

model: str
messages: List[ChatCompletionMessage]
temperature: Optional[float] = 1.0
@@ -25,6 +31,8 @@ class ChatCompletionRequest(BaseModel):


class ChatCompletionResponse(BaseModel):
"""TODO: Add docstring."""

id: str
object: str
created: int
@@ -38,6 +46,7 @@ node = Node() # provide the name to connect to the dataflow if dynamic node

@app.post("/v1/chat/completions")
async def create_chat_completion(request: ChatCompletionRequest):
"""TODO: Add docstring."""
data = next(
(msg.content for msg in request.messages if msg.role == "user"),
"No user message found.",
@@ -95,6 +104,7 @@ async def create_chat_completion(request: ChatCompletionRequest):

@app.get("/v1/models")
async def list_models():
"""TODO: Add docstring."""
return {
"object": "list",
"data": [
@@ -109,6 +119,7 @@ async def list_models():


async def run_fastapi():
"""TODO: Add docstring."""
config = uvicorn.Config(app, host="0.0.0.0", port=8000, log_level="info")
server = uvicorn.Server(config)

@@ -121,6 +132,7 @@ async def run_fastapi():


def main():
"""TODO: Add docstring."""
asyncio.run(run_fastapi())




+ 3
- 0
node-hub/dora-openai-server/tests/test_dora_openai_server.py View File

@@ -1,2 +1,5 @@
"""TODO: Add docstring."""

def test_import_main():
"""TODO: Add docstring."""
pass

+ 2
- 0
node-hub/dora-opus/dora_opus/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 4
- 0
node-hub/dora-opus/dora_opus/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
from pathlib import Path

@@ -26,6 +28,7 @@ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME_OR_PATH)

def cut_repetition(text, min_repeat_length=4, max_repeat_length=50):
# Check if the text is primarily Chinese (you may need to adjust this threshold)
"""TODO: Add docstring."""
if sum(1 for char in text if "\u4e00" <= char <= "\u9fff") / len(text) > 0.5:
# Chinese text processing
for repeat_length in range(
@@ -54,6 +57,7 @@ def cut_repetition(text, min_repeat_length=4, max_repeat_length=50):


def main():
"""TODO: Add docstring."""
node = Node()
while True:
event = node.next()


+ 3
- 0
node-hub/dora-opus/tests/test_translate.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_opus.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-outtetts/dora_outtetts/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 2
- 0
node-hub/dora-outtetts/dora_outtetts/__main__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from .main import main

if __name__ == "__main__":


+ 5
- 0
node-hub/dora-outtetts/dora_outtetts/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import argparse # Add argparse import
import os
import pathlib
@@ -15,6 +17,7 @@ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32


def load_interface():
"""TODO: Add docstring."""
if os.getenv("INTERFACE", "HF") == "HF":
model_config = outetts.HFModelConfig_v1(
model_path="OuteAI/OuteTTS-0.2-500M",
@@ -39,6 +42,7 @@ def load_interface():


def create_speaker(interface, path):
"""TODO: Add docstring."""
speaker = interface.create_speaker(
audio_path=path,
# If transcript is not provided, it will be automatically transcribed using Whisper
@@ -53,6 +57,7 @@ def create_speaker(interface, path):

def main(arg_list: list[str] | None = None):
# Parse cli args
"""TODO: Add docstring."""
parser = argparse.ArgumentParser(description="Dora Outetts Node")
parser.add_argument("--create-speaker", type=str, help="Path to audio file")
parser.add_argument("--test", action="store_true", help="Run tests")


+ 4
- 0
node-hub/dora-outtetts/dora_outtetts/tests/test_main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

import pytest
@@ -7,11 +9,13 @@ CI = os.getenv("CI", "false") in ["True", "true"]


def test_import_main():
"""TODO: Add docstring."""
with pytest.raises(RuntimeError):
main([])


def test_load_interface():
"""TODO: Add docstring."""
try:
interface = load_interface()
except RuntimeError:


+ 2
- 0
node-hub/dora-parler/dora_parler/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 10
- 0
node-hub/dora-parler/dora_parler/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
import time
from pathlib import Path
@@ -58,6 +60,7 @@ stream = p.open(format=pyaudio.paInt16, channels=1, rate=sampling_rate, output=T


def play_audio(audio_array):
"""TODO: Add docstring."""
if np.issubdtype(audio_array.dtype, np.floating):
max_val = np.max(np.abs(audio_array))
audio_array = (audio_array / max_val) * 32767
@@ -67,16 +70,21 @@ def play_audio(audio_array):


class InterruptStoppingCriteria(StoppingCriteria):
"""TODO: Add docstring."""

def __init__(self):
"""TODO: Add docstring."""
super().__init__()
self.stop_signal = False

def __call__(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs,
) -> bool:
"""TODO: Add docstring."""
return self.stop_signal

def stop(self):
"""TODO: Add docstring."""
self.stop_signal = True


@@ -86,6 +94,7 @@ def generate_base(
description=default_description,
play_steps_in_s=0.5,
):
"""TODO: Add docstring."""
prev_time = time.time()
play_steps = int(frame_rate * play_steps_in_s)
inputs = tokenizer(description, return_tensors="pt").to(device)
@@ -133,6 +142,7 @@ def generate_base(


def main():
"""TODO: Add docstring."""
generate_base(None, "Ready !", default_description, 0.5)
node = Node()
while True:


+ 3
- 0
node-hub/dora-parler/tests/test_parler_tts.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_parler.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-piper/dora_piper/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 3
- 0
node-hub/dora-piper/dora_piper/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
import time

@@ -43,6 +45,7 @@ def enable_fun(piper: C_PiperInterface):


def main():
"""TODO: Add docstring."""
elapsed_time = time.time()
CAN_BUS = os.getenv("CAN_BUS", "")
piper = C_PiperInterface(CAN_BUS)


+ 3
- 0
node-hub/dora-piper/tests/test_piper.py View File

@@ -1,4 +1,7 @@
"""TODO: Add docstring."""

def test_import_main():
"""TODO: Add docstring."""
from piper_sdk import C_PiperInterface

## Test piper installation


+ 2
- 0
node-hub/dora-pyaudio/dora_pyaudio/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 2
- 0
node-hub/dora-pyaudio/dora_pyaudio/__main__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from .main import main

if __name__ == "__main__":


+ 2
- 0
node-hub/dora-pyaudio/dora_pyaudio/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

import numpy as np


+ 3
- 0
node-hub/dora-pyaudio/tests/test_dora_pyaudio.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_pyaudio.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 13
- 0
node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

# ******************************************************************************
# Copyright (c) 2023 Orbbec 3D Technology, Inc
#
@@ -40,11 +42,15 @@ except ImportError as err:


class TemporalFilter:
"""TODO: Add docstring."""

def __init__(self, alpha):
"""TODO: Add docstring."""
self.alpha = alpha
self.previous_frame = None

def process(self, frame):
"""TODO: Add docstring."""
if self.previous_frame is None:
result = frame
else:
@@ -56,18 +62,21 @@ class TemporalFilter:


def yuyv_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
yuyv = frame.reshape((height, width, 2))
bgr_image = cv2.cvtColor(yuyv, cv2.COLOR_YUV2BGR_YUY2)
return bgr_image


def uyvy_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
uyvy = frame.reshape((height, width, 2))
bgr_image = cv2.cvtColor(uyvy, cv2.COLOR_YUV2BGR_UYVY)
return bgr_image


def i420_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
y = frame[0:height, :]
u = frame[height : height + height // 4].reshape(height // 2, width // 2)
v = frame[height + height // 4 :].reshape(height // 2, width // 2)
@@ -77,6 +86,7 @@ def i420_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:


def nv21_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
y = frame[0:height, :]
uv = frame[height : height + height // 2].reshape(height // 2, width)
yuv_image = cv2.merge([y, uv])
@@ -85,6 +95,7 @@ def nv21_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:


def nv12_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
y = frame[0:height, :]
uv = frame[height : height + height // 2].reshape(height // 2, width)
yuv_image = cv2.merge([y, uv])
@@ -93,6 +104,7 @@ def nv12_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:


def frame_to_bgr_image(frame: VideoFrame):
"""TODO: Add docstring."""
width = frame.get_width()
height = frame.get_height()
color_format = frame.get_format()
@@ -135,6 +147,7 @@ DEVICE_INDEX = int(os.getenv("DEVICE_INDEX", "0"))


def main():
"""TODO: Add docstring."""
node = Node()
config = Config()
ctx = Context()


+ 3
- 0
node-hub/dora-pyorbbecksdk/tests/test_pyorbbecksdk.py View File

@@ -1,4 +1,7 @@
"""TODO: Add docstring."""

def test_import_main():
"""TODO: Add docstring."""
return # Remove this if you want to test pyorbbecksdk installation
# import pyorbbecksdk



+ 2
- 0
node-hub/dora-pyrealsense/dora_pyrealsense/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 3
- 0
node-hub/dora-pyrealsense/dora_pyrealsense/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
import time

@@ -11,6 +13,7 @@ RUNNER_CI = True if os.getenv("CI") == "true" else False


def main():
"""TODO: Add docstring."""
FLIP = os.getenv("FLIP", "")
DEVICE_SERIAL = os.getenv("DEVICE_SERIAL", "")
image_height = int(os.getenv("IMAGE_HEIGHT", "480"))


+ 3
- 0
node-hub/dora-pyrealsense/tests/test_dora_pyrealsense.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_pyrealsense.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-qwen/dora_qwen/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 2
- 0
node-hub/dora-qwen/dora_qwen/__main__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

from .main import main

if __name__ == "__main__":


+ 7
- 0
node-hub/dora-qwen/dora_qwen/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
import sys

@@ -12,6 +14,7 @@ SYSTEM_PROMPT = os.getenv(


def get_model_gguf():
"""TODO: Add docstring."""
from llama_cpp import Llama

llm = Llama.from_pretrained(
@@ -21,6 +24,7 @@ def get_model_gguf():


def get_model_darwin():
"""TODO: Add docstring."""
from mlx_lm import load

model, tokenizer = load("mlx-community/Qwen2.5-0.5B-Instruct-8bit")
@@ -28,6 +32,7 @@ def get_model_darwin():


def get_model_huggingface():
"""TODO: Add docstring."""
model_name = "Qwen/Qwen2.5-0.5B-Instruct"

model = AutoModelForCausalLM.from_pretrained(
@@ -41,6 +46,7 @@ ACTIVATION_WORDS = os.getenv("ACTIVATION_WORDS", "what how who where you").split


def generate_hf(model, tokenizer, prompt: str, history) -> str:
"""TODO: Add docstring."""
history += [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(
history, tokenize=False, add_generation_prompt=True,
@@ -57,6 +63,7 @@ def generate_hf(model, tokenizer, prompt: str, history) -> str:


def main():
"""TODO: Add docstring."""
history = []
# If OS is not Darwin, use Huggingface model
if sys.platform != "":


+ 3
- 0
node-hub/dora-qwen/tests/test_dora_qwen.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_qwen.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-qwen2-5-vl/dora_qwen2_5_vl/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 3
- 0
node-hub/dora-qwen2-5-vl/dora_qwen2_5_vl/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
from pathlib import Path

@@ -135,6 +137,7 @@ def generate(frames: dict, question, history, past_key_values=None, image_id=Non


def main():
"""TODO: Add docstring."""
pa.array([]) # initialize pyarrow array
node = Node()



+ 3
- 0
node-hub/dora-qwen2-5-vl/tests/test_dora_qwenvl.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_qwen2_5_vl.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-qwenvl/dora_qwenvl/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os

# Define the path to the README file relative to the package directory


+ 3
- 0
node-hub/dora-qwenvl/dora_qwenvl/main.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
from pathlib import Path

@@ -106,6 +108,7 @@ def generate(frames: dict, question):


def main():
"""TODO: Add docstring."""
pa.array([]) # initialize pyarrow array
node = Node()



+ 3
- 0
node-hub/dora-qwenvl/tests/test_dora_qwenvl.py View File

@@ -1,7 +1,10 @@
"""TODO: Add docstring."""

import pytest


def test_import_main():
"""TODO: Add docstring."""
from dora_qwenvl.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 2
- 0
node-hub/dora-rdt-1b/dora_rdt_1b/__init__.py View File

@@ -1,3 +1,5 @@
"""TODO: Add docstring."""

import os
import sys
from pathlib import Path


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save