Browse Source

Replace pylint with ruff

tags/v0.3.9-rc1
haixuantao 1 year ago
parent
commit
7a0654a1f8
44 changed files with 134 additions and 150 deletions
  1. +2
    -3
      .github/workflows/ci.yml
  2. +1
    -1
      .github/workflows/node-hub-ci-cd.yml
  3. +1
    -2
      .github/workflows/node_hub_test.sh
  4. +4
    -4
      binaries/cli/src/template/python/__node-name__/README.md
  5. +1
    -2
      binaries/cli/src/template/python/__node-name__/pyproject.toml
  6. +2
    -2
      binaries/cli/src/template/python/mod.rs
  7. +0
    -1
      examples/piper/dummy_inference_2.py
  8. +0
    -1
      examples/piper/replay.py
  9. +0
    -1
      examples/python-operator-dataflow/plot.py
  10. +2
    -2
      node-hub/dora-argotranslate/dora_argotranslate/main.py
  11. +0
    -1
      node-hub/dora-distil-whisper/tests/test_distil_whisper.py
  12. +1
    -1
      node-hub/dora-echo/dora_echo/main.py
  13. +2
    -1
      node-hub/dora-internvl/dora_internvl/main.py
  14. +2
    -2
      node-hub/dora-keyboard/dora_keyboard/main.py
  15. +5
    -5
      node-hub/dora-microphone/dora_microphone/main.py
  16. +6
    -5
      node-hub/dora-openai-server/dora_openai_server/main.py
  17. +3
    -0
      node-hub/dora-openai-server/pyproject.toml
  18. +4
    -4
      node-hub/dora-opus/dora_opus/main.py
  19. +4
    -4
      node-hub/dora-outtetts/README.md
  20. +0
    -1
      node-hub/dora-outtetts/dora_outtetts/__main__.py
  21. +5
    -4
      node-hub/dora-outtetts/dora_outtetts/main.py
  22. +1
    -3
      node-hub/dora-outtetts/dora_outtetts/tests/test_main.py
  23. +1
    -2
      node-hub/dora-outtetts/pyproject.toml
  24. +7
    -9
      node-hub/dora-parler/dora_parler/main.py
  25. +5
    -5
      node-hub/dora-piper/dora_piper/main.py
  26. +4
    -4
      node-hub/dora-pyaudio/README.md
  27. +2
    -2
      node-hub/dora-pyaudio/pyproject.toml
  28. +15
    -11
      node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/main.py
  29. +1
    -3
      node-hub/dora-pyrealsense/dora_pyrealsense/main.py
  30. +0
    -1
      node-hub/dora-pyrealsense/tests/test_dora_pyrealsense.py
  31. +8
    -8
      node-hub/dora-qwenvl/dora_qwenvl/main.py
  32. +1
    -1
      node-hub/dora-rdt-1b/dora_rdt_1b/RoboticsDiffusionTransformer
  33. +11
    -12
      node-hub/dora-rdt-1b/dora_rdt_1b/main.py
  34. +4
    -4
      node-hub/dora-rdt-1b/pyproject.toml
  35. +8
    -9
      node-hub/dora-rdt-1b/tests/test_dora_rdt_1b.py
  36. +3
    -2
      node-hub/dora-ugv/dora_ugv/main.py
  37. +5
    -5
      node-hub/dora-vad/dora_vad/main.py
  38. +1
    -2
      node-hub/dora-yolo/dora_yolo/main.py
  39. +8
    -7
      node-hub/llama-factory-recorder/llama_factory_recorder/main.py
  40. +0
    -3
      node-hub/opencv-plot/opencv_plot/main.py
  41. +0
    -2
      node-hub/opencv-video-capture/opencv_video_capture/main.py
  42. +1
    -2
      node-hub/pyarrow-assert/pyarrow_assert/main.py
  43. +1
    -3
      node-hub/pyarrow-sender/pyarrow_sender/main.py
  44. +2
    -3
      node-hub/terminal-input/terminal_input/main.py

+ 2
- 3
.github/workflows/ci.yml View File

@@ -310,15 +310,14 @@ jobs:
mv .venv/Scripts .venv/bin # venv is placed under `Scripts` on Windows
fi
source .venv/bin/activate
pip3 install maturin black pylint pytest
pip3 install maturin ruff pytest
maturin build -m apis/python/node/Cargo.toml
pip3 install target/wheels/*
dora new test_python_project --lang python --internal-create-with-path-dependencies
cd test_python_project

# Check Compliancy
black . --check
pylint --disable=C,R **/*.py
ruff . --check
pip install -e ./*/
pytest



+ 1
- 1
.github/workflows/node-hub-ci-cd.yml View File

@@ -86,7 +86,7 @@ jobs:
run: |
curl -sSL https://install.python-poetry.org | python3 -
echo "$HOME/.local/bin" >> $GITHUB_PATH
pip install black pylint pytest
pip install ruff pytest

- name: Set up Rust
if: runner.os == 'Linux' || github.event_name == 'workflow_dispatch' || (github.event_name == 'release' && startsWith(github.ref, 'refs/tags/'))


+ 1
- 2
.github/workflows/node_hub_test.sh View File

@@ -43,8 +43,7 @@ else
if [ -f "$dir/pyproject.toml" ]; then
echo "Running linting and tests for Python project in $dir..."
pip install .
poetry run black --check .
poetry run pylint --disable=C,R --ignored-modules=cv2,pyrealsense2 **/*.py
ruff check .
poetry run pytest
fi
fi


+ 4
- 4
binaries/cli/src/template/python/__node-name__/README.md View File

@@ -10,16 +10,16 @@ pip install -e .

## Contribution Guide

- Format with [black](https://github.com/psf/black):
- Format with [ruff](https://docs.astral.sh/ruff/):

```bash
black . # Format
ruff check . --fix
```

- Lint with [pylint](https://github.com/pylint-dev/pylint):
- Lint with ruff:

```bash
pylint --disable=C,R --ignored-modules=cv2 . # Lint
ruff check .
```

- Test with [pytest](https://github.com/pytest-dev/pytest)


+ 1
- 2
binaries/cli/src/template/python/__node-name__/pyproject.toml View File

@@ -17,8 +17,7 @@ python = "^3.7"

[tool.poetry.dev-dependencies]
pytest = ">= 6.3.4"
pylint = ">= 3.3.2"
black = ">= 22.10"
ruff = ">= 0.9.1"

[tool.poetry.scripts]
__node-name__ = "__node_name__.main:main"


+ 2
- 2
binaries/cli/src/template/python/mod.rs View File

@@ -91,8 +91,8 @@ fn create_custom_node(
);
println!(" cd {}", Path::new(".").join(&root).display());
println!(" pip install -e . # Install",);
println!(" black . # Format");
println!(" pylint --disable=C,R . # Lint",);
println!(" ruff check . --fix # Format");
println!(" ruff check . # Lint",);
println!(" pytest . # Test");

Ok(())


+ 0
- 1
examples/piper/dummy_inference_2.py View File

@@ -1,7 +1,6 @@
from dora import Node


import numpy as np
import h5py

f = h5py.File("data/episode_0.hdf5", "r")


+ 0
- 1
examples/piper/replay.py View File

@@ -1,7 +1,6 @@
from dora import Node


import numpy as np
import h5py
import os



+ 0
- 1
examples/python-operator-dataflow/plot.py View File

@@ -1,6 +1,5 @@
import os
import cv2
import time

from dora import DoraStatus
from utils import LABELS


+ 2
- 2
node-hub/dora-argotranslate/dora_argotranslate/main.py View File

@@ -2,10 +2,10 @@ import os

os.environ["ARGOS_DEVICE_TYPE"] = "auto"

from dora import Node
import pyarrow as pa
import argostranslate.package
import argostranslate.translate
import pyarrow as pa
from dora import Node

from_code = os.getenv("SOURCE_LANGUAGE", "fr")
to_code = os.getenv("TARGET_LANGUAGE", "en")


+ 0
- 1
node-hub/dora-distil-whisper/tests/test_distil_whisper.py View File

@@ -2,7 +2,6 @@ import pytest


def test_import_main():

from dora_distil_whisper.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 1
- 1
node-hub/dora-echo/dora_echo/main.py View File

@@ -1,12 +1,12 @@
import argparse
import os

from dora import Node

RUNNER_CI = True if os.getenv("CI") == "true" else False


def main():

# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
parser = argparse.ArgumentParser(description="Simple arrow sender")



+ 2
- 1
node-hub/dora-internvl/dora_internvl/main.py View File

@@ -1,9 +1,10 @@
import os
from dora import Node
import numpy as np
import pyarrow as pa
import torch
import torchvision.transforms as T
from dora import Node
from PIL import Image
from torchvision.transforms.functional import InterpolationMode
from transformers import AutoModel, AutoTokenizer


+ 2
- 2
node-hub/dora-keyboard/dora_keyboard/main.py View File

@@ -1,7 +1,7 @@
from pynput import keyboard
from pynput.keyboard import Events
import pyarrow as pa
from dora import Node
from pynput import keyboard
from pynput.keyboard import Events


def main():


+ 5
- 5
node-hub/dora-microphone/dora_microphone/main.py View File

@@ -1,9 +1,9 @@
import sounddevice as sd
import numpy as np
import pyarrow as pa
import time as tm
import os
import time as tm

import numpy as np
import pyarrow as pa
import sounddevice as sd
from dora import Node

MAX_DURATION = float(os.getenv("MAX_DURATION", "0.1"))
@@ -19,7 +19,7 @@ def main():
always_none = node.next(timeout=0.001) is None
finished = False

# pylint: disable=unused-argument
# noqa
def callback(indata, frames, time, status):
nonlocal buffer, node, start_recording_time, finished



+ 6
- 5
node-hub/dora-openai-server/dora_openai_server/main.py View File

@@ -1,11 +1,12 @@
from fastapi import FastAPI
from pydantic import BaseModel
import ast
import asyncio
from typing import List, Optional

import pyarrow as pa
import uvicorn
from dora import Node
import asyncio
import pyarrow as pa
import ast
from fastapi import FastAPI
from pydantic import BaseModel

DORA_RESPONSE_TIMEOUT = 10
app = FastAPI()


+ 3
- 0
node-hub/dora-openai-server/pyproject.toml View File

@@ -28,3 +28,6 @@ dora-openai-server = "dora_openai_server.main:main"
[build-system]
requires = ["poetry-core>=1.8.0"]
build-backend = "poetry.core.masonry.api"

[tool.ruff.lint]
extend-select = ["I"]

+ 4
- 4
node-hub/dora-opus/dora_opus/main.py View File

@@ -1,10 +1,10 @@
import os
from pathlib import Path
from dora import Node
import pyarrow as pa
import numpy as np
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

import numpy as np
import pyarrow as pa
from dora import Node
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer

from_code = os.getenv("SOURCE_LANGUAGE", "zh")
to_code = os.getenv("TARGET_LANGUAGE", "en")


+ 4
- 4
node-hub/dora-outtetts/README.md View File

@@ -10,16 +10,16 @@ pip install -e .

## Contribution Guide

- Format with [black](https://github.com/psf/black):
- Format with [ruff](https://docs.astral.sh/ruff/):

```bash
black . # Format
ruff check . --fix
```

- Lint with [pylint](https://github.com/pylint-dev/pylint):
- Lint with ruff:

```bash
pylint --disable=C,R --ignored-modules=cv2 . # Lint
ruff check .
```

- Test with [pytest](https://github.com/pytest-dev/pytest)


+ 0
- 1
node-hub/dora-outtetts/dora_outtetts/__main__.py View File

@@ -1,5 +1,4 @@
from .main import main


if __name__ == "__main__":
main()

+ 5
- 4
node-hub/dora-outtetts/dora_outtetts/main.py View File

@@ -1,10 +1,11 @@
from dora import Node
import outetts
import argparse # Add argparse import
import pathlib
import os
import torch
import pathlib

import outetts
import pyarrow as pa
import torch
from dora import Node

PATH_SPEAKER = os.getenv("PATH_SPEAKER", "speaker.json")



+ 1
- 3
node-hub/dora-outtetts/dora_outtetts/tests/test_main.py View File

@@ -1,7 +1,5 @@
import pytest

from dora_outtetts.main import load_interface
from dora_outtetts.main import main
from dora_outtetts.main import load_interface, main


def test_import_main():


+ 1
- 2
node-hub/dora-outtetts/pyproject.toml View File

@@ -21,8 +21,7 @@ outetts = "^0.2.3"

[tool.poetry.dev-dependencies]
pytest = ">= 6.3.4"
pylint = ">= 3.3.2"
black = ">= 22.10"
ruff = ">= 0.9.1"

[tool.poetry.scripts]
dora-outtetts = "dora_outtetts.main:main"


+ 7
- 9
node-hub/dora-parler/dora_parler/main.py View File

@@ -1,19 +1,19 @@
from threading import Thread
from dora import Node
import os
import time
from pathlib import Path
from threading import Thread

import numpy as np
import torch
import time
import pyaudio

import torch
from dora import Node
from parler_tts import ParlerTTSForConditionalGeneration, ParlerTTSStreamer
from transformers import (
AutoTokenizer,
AutoFeatureExtractor,
set_seed,
AutoTokenizer,
StoppingCriteria,
StoppingCriteriaList,
set_seed,
)

device = "cuda:0" # if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
@@ -58,7 +58,6 @@ stream = p.open(format=pyaudio.paInt16, channels=1, rate=sampling_rate, output=T


def play_audio(audio_array):

if np.issubdtype(audio_array.dtype, np.floating):
max_val = np.max(np.abs(audio_array))
audio_array = (audio_array / max_val) * 32767
@@ -109,7 +108,6 @@ def generate_base(
thread.start()

for new_audio in streamer:

current_time = time.time()

print(f"Time between iterations: {round(current_time - prev_time, 2)} seconds")


+ 5
- 5
node-hub/dora-piper/dora_piper/main.py View File

@@ -1,10 +1,11 @@
from piper_sdk import C_PiperInterface
from dora import Node
import pyarrow as pa
import numpy as np
import os
import time

import numpy as np
import pyarrow as pa
from dora import Node
from piper_sdk import C_PiperInterface

TEACH_MODE = os.getenv("TEACH_MODE", "False") in ["True", "true"]


@@ -144,7 +145,6 @@ def main():
)

elif event["type"] == "STOP":

if not TEACH_MODE:
piper.MotionCtrl_2(0x01, 0x01, 50, 0x00)
piper.JointCtrl(0, 0, 0, 0, 0, 0)


+ 4
- 4
node-hub/dora-pyaudio/README.md View File

@@ -22,16 +22,16 @@ pip install -e .

## Contribution Guide

- Format with [black](https://github.com/psf/black):
- Format with [ruff](https://docs.astral.sh/ruff/):

```bash
black . # Format
ruff check . --fix
```

- Lint with [pylint](https://github.com/pylint-dev/pylint):
- Lint with ruff:

```bash
pylint --disable=C,R --ignored-modules=cv2 . # Lint
ruff check .
```

- Test with [pytest](https://github.com/pytest-dev/pytest)


+ 2
- 2
node-hub/dora-pyaudio/pyproject.toml View File

@@ -18,8 +18,8 @@ pyaudio = ">= 0.1.0"

[tool.poetry.dev-dependencies]
pytest = ">= 6.3.4"
pylint = ">= 2.5.2"
black = ">= 22.10"
ruff = ">= 0.9.1"

[tool.poetry.scripts]
dora-pyaudio = "dora_pyaudio.main:main"


+ 15
- 11
node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/main.py View File

@@ -13,18 +13,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import cv2
import os

import cv2
import numpy as np
import pyarrow as pa
from dora import Node

try:
from pyorbbecsdk import Context
from pyorbbecsdk import Config
from pyorbbecsdk import OBError
from pyorbbecsdk import OBSensorType, OBFormat
from pyorbbecsdk import Pipeline, FrameSet
from pyorbbecsdk import VideoStreamProfile
from pyorbbecsdk import VideoFrame
from pyorbbecsdk import (
Config,
Context,
FrameSet,
OBError,
OBFormat,
OBSensorType,
Pipeline,
VideoFrame,
VideoStreamProfile,
)
except ImportError as err:
print(
"Please install pyorbbecsdk first by following the instruction at: https://github.com/orbbec/pyorbbecsdk"
@@ -120,9 +127,6 @@ def frame_to_bgr_image(frame: VideoFrame):
return image


from dora import Node
import pyarrow as pa

ESC_KEY = 27
MIN_DEPTH_METERS = 0.01
MAX_DEPTH_METERS = 15.0


+ 1
- 3
node-hub/dora-pyrealsense/dora_pyrealsense/main.py View File

@@ -4,11 +4,10 @@ import time
import cv2
import numpy as np
import pyarrow as pa
import pyrealsense2 as rs
from dora import Node

RUNNER_CI = True if os.getenv("CI") == "true" else False
import pyrealsense2 as rs


def main():
@@ -53,7 +52,6 @@ def main():
pa.array([]) # initialize pyarrow array

for event in node:

# Run this example in the CI for 10 seconds only.
if RUNNER_CI and time.time() - start_time > 10:
break


+ 0
- 1
node-hub/dora-pyrealsense/tests/test_dora_pyrealsense.py View File

@@ -2,7 +2,6 @@ import pytest


def test_import_main():

from dora_pyrealsense.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.


+ 8
- 8
node-hub/dora-qwenvl/dora_qwenvl/main.py View File

@@ -1,13 +1,14 @@
import os
from dora import Node
import torch
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info
from pathlib import Path

import cv2
import numpy as np
import pyarrow as pa
import torch
from dora import Node
from PIL import Image
from pathlib import Path
import cv2
from qwen_vl_utils import process_vision_info
from transformers import AutoProcessor, Qwen2VLForConditionalGeneration

DEFAULT_PATH = "Qwen/Qwen2-VL-2B-Instruct"

@@ -27,7 +28,7 @@ ADAPTER_PATH = os.getenv("ADAPTER_PATH", "")

# Check if flash_attn is installed
try:
import flash_attn as _
import flash_attn as _ # noqa

model = Qwen2VLForConditionalGeneration.from_pretrained(
MODEL_NAME_OR_PATH,
@@ -118,7 +119,6 @@ def main():
event_type = event["type"]

if event_type == "INPUT":

event_id = event["id"]

if "image" in event_id:


+ 1
- 1
node-hub/dora-rdt-1b/dora_rdt_1b/RoboticsDiffusionTransformer

@@ -1 +1 @@
Subproject commit 198374ea8c4a2ec2ddae86c35448d21aa9756f37
Subproject commit b2889e65cfe62571ced3ce88f00e7d80b41fee69

+ 11
- 12
node-hub/dora-rdt-1b/dora_rdt_1b/main.py View File

@@ -1,17 +1,18 @@
# install dependencies as shown in the README here https://github.com/alik-git/RoboticsDiffusionTransformer?tab=readme-ov-file#installation
import yaml
import torch
import os
from pathlib import Path

import cv2
import numpy as np
import pyarrow as pa
import torch
import yaml
from dora import Node
from PIL import Image

from dora_rdt_1b.RoboticsDiffusionTransformer.configs.state_vec import (
STATE_VEC_IDX_MAPPING,
)
from dora import Node
import cv2
import pyarrow as pa
import os
from pathlib import Path

VISION_DEFAULT_PATH = "robotics-diffusion-transformer/rdt-1b"
ROBOTIC_MODEL_NAME_OR_PATH = os.getenv(
@@ -174,15 +175,15 @@ def get_states(proprio):
)

state_elem_mask[:, STATE_INDICES] = True
states, state_elem_mask = states.to(DEVICE, dtype=DTYPE), state_elem_mask.to(
DEVICE, dtype=DTYPE
states, state_elem_mask = (
states.to(DEVICE, dtype=DTYPE),
state_elem_mask.to(DEVICE, dtype=DTYPE),
)
states = states[:, -1:, :] # only use the last state
return states, state_elem_mask, STATE_INDICES


def main():

rdt = get_policy()
lang_embeddings = get_language_embeddings()
vision_encoder, image_processor = get_vision_model()
@@ -195,11 +196,9 @@ def main():
frames = {}
joints = {}
with torch.no_grad():

for event in node:
event_type = event["type"]
if event_type == "INPUT":

event_id = event["id"]

if "image" in event_id:


+ 4
- 4
node-hub/dora-rdt-1b/pyproject.toml View File

@@ -29,12 +29,9 @@ huggingface_hub = "0.23.5"
# flash_attn = "^2.6.1" # Install using: pip install -U flash-attn --no-build-isolation


[tool.pylint.MASTER]
ignore-paths = '^dora_rdt_1b/RoboticsDiffusionTransformer.*$'

[tool.poetry.dev-dependencies]
pytest = "^8.3.4"
pylint = "^3.3.2"
ruff = ">= 0.9.1"

[tool.black]
extend-exclude = 'dora_rdt_1b/RoboticsDiffusionTransformer'
@@ -46,3 +43,6 @@ dora-rdt-1b = "dora_rdt_1b.main:main"
[build-system]
requires = ["poetry-core>=1.8.0"]
build-backend = "poetry.core.masonry.api"

[tool.ruff]
exclude = ["dora_rdt_1b/RoboticsDiffusionTransformer"]

+ 8
- 9
node-hub/dora-rdt-1b/tests/test_dora_rdt_1b.py View File

@@ -1,10 +1,10 @@
import os

import numpy as np
import pytest
import torch
import numpy as np
from PIL import Image
from torchvision import transforms
import os


CI = os.environ.get("CI")

@@ -20,8 +20,8 @@ def test_import_main():
# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.
# with pytest.raises(RuntimeError):
# main()
import dora_rdt_1b.RoboticsDiffusionTransformer as _
import dora_rdt_1b as _
import dora_rdt_1b.RoboticsDiffusionTransformer as _ # noqa
import dora_rdt_1b as _ # noqa


def test_download_policy():
@@ -44,7 +44,6 @@ def test_download_vision_model():


def test_download_language_embeddings():

## in the future we should add this test within CI
if CI:
return
@@ -55,7 +54,6 @@ def test_download_language_embeddings():


def test_load_dummy_image():

from dora_rdt_1b.main import config

# Load pretrained model (in HF style)
@@ -187,8 +185,9 @@ def test_dummy_states():
]

state_elem_mask[:, STATE_INDICES] = True
states, state_elem_mask = states.to(DEVICE, dtype=DTYPE), state_elem_mask.to(
DEVICE, dtype=DTYPE
states, state_elem_mask = (
states.to(DEVICE, dtype=DTYPE),
state_elem_mask.to(DEVICE, dtype=DTYPE),
)
states = states[:, -1:, :] # only use the last state
pytest.states = states


+ 3
- 2
node-hub/dora-ugv/dora_ugv/main.py View File

@@ -7,10 +7,11 @@ except ImportError as err:
)
raise err

from dora import Node
import pyarrow as pa
import os

import pyarrow as pa
from dora import Node


def main():
# Create an instance of HunterRobot


+ 5
- 5
node-hub/dora-vad/dora_vad/main.py View File

@@ -1,9 +1,10 @@
from dora import Node
import pyarrow as pa
import numpy as np
import os
from silero_vad import load_silero_vad, get_speech_timestamps

import numpy as np
import pyarrow as pa
import torch
from dora import Node
from silero_vad import get_speech_timestamps, load_silero_vad

model = load_silero_vad()
MIN_SILENCE_DURATION_MS = int(os.getenv("MIN_SILENCE_DURATION_MS", "200"))
@@ -38,7 +39,6 @@ def main():
len(speech_timestamps) > 0
and len(last_audios) > MIN_AUDIO_SAMPLING_DURAION_S
):

# Check if the audio is not cut at the end. And only return if there is a long time spent
if speech_timestamps[-1]["end"] == len(audio):
continue


+ 1
- 2
node-hub/dora-yolo/dora_yolo/main.py View File

@@ -3,9 +3,8 @@ import os

import numpy as np
import pyarrow as pa
from ultralytics import YOLO

from dora import Node
from ultralytics import YOLO


def main():


+ 8
- 7
node-hub/llama-factory-recorder/llama_factory_recorder/main.py View File

@@ -1,11 +1,12 @@
import os
import json
from dora import Node
import os
from pathlib import Path

import cv2
import numpy as np
import pyarrow as pa
from dora import Node
from PIL import Image
from pathlib import Path
import cv2

DEFAULT_QUESTION = os.getenv(
"DEFAULT_QUESTION",
@@ -94,9 +95,9 @@ def main():
pa.array([]) # initialize pyarrow array
node = Node()

assert os.getenv(
"LLAMA_FACTORY_ROOT_PATH"
), "LLAMA_FACTORY_ROOT_PATH is not set, Either git clone the repo or set the environment variable"
assert os.getenv("LLAMA_FACTORY_ROOT_PATH"), (
"LLAMA_FACTORY_ROOT_PATH is not set, Either git clone the repo or set the environment variable"
)
llama_factory_root_path = Path(os.getenv("LLAMA_FACTORY_ROOT_PATH")) / "data"

entry_name = os.getenv("ENTRY_NAME", "dora_demo")


+ 0
- 3
node-hub/opencv-plot/opencv_plot/main.py View File

@@ -4,7 +4,6 @@ import os
import cv2
import numpy as np
import pyarrow as pa

from dora import Node

RUNNER_CI = True if os.getenv("CI") == "true" else False
@@ -76,7 +75,6 @@ def yuv420p_to_bgr_opencv(yuv_array, width, height):


def main():

# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
parser = argparse.ArgumentParser(
description="OpenCV Plotter: This node is used to plot text and bounding boxes on an image."
@@ -168,7 +166,6 @@ def main():
plot.frame = cv2.imdecode(storage, cv2.IMREAD_COLOR)

elif encoding == "yuv420":

storage = storage.to_numpy()

# Convert back to BGR results in more saturated image.


+ 0
- 2
node-hub/opencv-video-capture/opencv_video_capture/main.py View File

@@ -5,7 +5,6 @@ import time
import cv2
import numpy as np
import pyarrow as pa

from dora import Node

RUNNER_CI = True if os.getenv("CI") == "true" else False
@@ -77,7 +76,6 @@ def main():
pa.array([]) # initialize pyarrow array

for event in node:

# Run this example in the CI for 10 seconds only.
if RUNNER_CI and time.time() - start_time > 10:
break


+ 1
- 2
node-hub/pyarrow-assert/pyarrow_assert/main.py View File

@@ -1,6 +1,6 @@
import argparse
import os
import ast
import os

import pyarrow as pa
from dora import Node
@@ -9,7 +9,6 @@ RUNNER_CI = True if os.getenv("CI") == "true" else False


def main():

# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
parser = argparse.ArgumentParser(description="Simple arrow sender")



+ 1
- 3
node-hub/pyarrow-sender/pyarrow_sender/main.py View File

@@ -1,16 +1,14 @@
import argparse
import os
import ast
import os

import pyarrow as pa

from dora import Node

RUNNER_CI = True if os.getenv("CI") == "true" else False


def main():

# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
parser = argparse.ArgumentParser(description="Simple arrow sender")



+ 2
- 3
node-hub/terminal-input/terminal_input/main.py View File

@@ -1,16 +1,15 @@
import argparse
import os
import ast
import os
import time
import pyarrow as pa

import pyarrow as pa
from dora import Node

RUNNER_CI = True if os.getenv("CI") == "true" else False


def main():

# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
parser = argparse.ArgumentParser(description="Simple arrow sender")



Loading…
Cancel
Save