diff --git a/apis/python/node/dora/__init__.pyi b/apis/python/node/dora/__init__.pyi index a6dcb659..9e35f62f 100644 --- a/apis/python/node/dora/__init__.pyi +++ b/apis/python/node/dora/__init__.pyi @@ -1,86 +1,96 @@ -import dora -import pyarrow import typing +import pyarrow + +import dora + @typing.final class Enum: """Generic enumeration. -Derive from this class to define new enumerations.""" + Derive from this class to define new enumerations. + """ + __members__: mappingproxy = ... @typing.final class Node: """The custom node API lets you integrate `dora` into your application. -It allows you to retrieve input and send output in any fashion you want. + It allows you to retrieve input and send output in any fashion you want. -Use with: + Use with: -```python -from dora import Node + ```python + from dora import Node -node = Node() -```""" + node = Node() + ``` + """ def __init__(self, node_id: str=None) -> None: """The custom node API lets you integrate `dora` into your application. -It allows you to retrieve input and send output in any fashion you want. + It allows you to retrieve input and send output in any fashion you want. -Use with: + Use with: -```python -from dora import Node + ```python + from dora import Node -node = Node() -```""" + node = Node() + ``` + """ def dataflow_descriptor(self) -> dict: """Returns the full dataflow descriptor that this node is part of. -This method returns the parsed dataflow YAML file.""" + This method returns the parsed dataflow YAML file. + """ def dataflow_id(self) -> str: """Returns the dataflow id.""" def merge_external_events(self, subscription: dora.Ros2Subscription) -> None: """Merge an external event stream with dora main loop. -This currently only work with ROS2.""" + This currently only work with ROS2. + """ def next(self, timeout: float=None) -> dict: """`.next()` gives you the next input that the node has received. -It blocks until the next event becomes available. -You can use timeout in seconds to return if no input is available. -It will return `None` when all senders has been dropped. + It blocks until the next event becomes available. + You can use timeout in seconds to return if no input is available. + It will return `None` when all senders has been dropped. -```python -event = node.next() -``` + ```python + event = node.next() + ``` -You can also iterate over the event stream with a loop + You can also iterate over the event stream with a loop -```python -for event in node: -match event["type"]: -case "INPUT": -match event["id"]: -case "image": -```""" + ```python + for event in node: + match event["type"]: + case "INPUT": + match event["id"]: + case "image": + ``` + """ def send_output(self, output_id: str, data: pyarrow.Array, metadata: dict=None) -> None: """`send_output` send data from the node. -```python -Args: -output_id: str, -data: pyarrow.Array, -metadata: Option[Dict], -``` + ```python + Args: + output_id: str, + data: pyarrow.Array, + metadata: Option[Dict], + ``` -ex: + ex: -```python -node.send_output("string", b"string", {"open_telemetry_context": "7632e76"}) -```""" + ```python + node.send_output("string", b"string", {"open_telemetry_context": "7632e76"}) + ``` + """ def __iter__(self) -> typing.Any: """Implement iter(self).""" @@ -92,63 +102,66 @@ node.send_output("string", b"string", {"open_telemetry_context": "7632e76"}) class Ros2Context: """ROS2 Context holding all messages definition for receiving and sending messages to ROS2. -By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. + By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. -AMENT_PREFIX_PATH folder structure should be the following: + AMENT_PREFIX_PATH folder structure should be the following: -- For messages: /msg/.msg -- For services: /srv/.srv + - For messages: /msg/.msg + - For services: /srv/.srv -You can also use `ros_paths` if you don't want to use env variable. + You can also use `ros_paths` if you don't want to use env variable. -warning:: -dora Ros2 bridge functionality is considered **unstable**. It may be changed -at any point without it being considered a breaking change. + warning:: + dora Ros2 bridge functionality is considered **unstable**. It may be changed + at any point without it being considered a breaking change. -```python -context = Ros2Context() -```""" + ```python + context = Ros2Context() + ``` + """ def __init__(self, ros_paths: typing.List[str]=None) -> None: """ROS2 Context holding all messages definition for receiving and sending messages to ROS2. -By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. + By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. -AMENT_PREFIX_PATH folder structure should be the following: + AMENT_PREFIX_PATH folder structure should be the following: -- For messages: /msg/.msg -- For services: /srv/.srv + - For messages: /msg/.msg + - For services: /srv/.srv -You can also use `ros_paths` if you don't want to use env variable. + You can also use `ros_paths` if you don't want to use env variable. -warning:: -dora Ros2 bridge functionality is considered **unstable**. It may be changed -at any point without it being considered a breaking change. + warning:: + dora Ros2 bridge functionality is considered **unstable**. It may be changed + at any point without it being considered a breaking change. -```python -context = Ros2Context() -```""" + ```python + context = Ros2Context() + ``` + """ def new_node(self, name: str, namespace: str, options: dora.Ros2NodeOptions) -> dora.Ros2Node: """Create a new ROS2 node -```python -ros2_node = ros2_context.new_node( -"turtle_teleop", -"/ros2_demo", -Ros2NodeOptions(rosout=True), -) -``` + ```python + ros2_node = ros2_context.new_node( + "turtle_teleop", + "/ros2_demo", + Ros2NodeOptions(rosout=True), + ) + ``` -warning:: -dora Ros2 bridge functionality is considered **unstable**. It may be changed -at any point without it being considered a breaking change.""" + warning:: + dora Ros2 bridge functionality is considered **unstable**. It may be changed + at any point without it being considered a breaking change. + """ @typing.final class Ros2Durability: """DDS 2.2.3.4 DURABILITY""" - def __eq__(self, value: typing.Any) -> bool: + def __eq__(self, value: object) -> bool: """Return self==value.""" def __ge__(self, value: typing.Any) -> bool: @@ -166,11 +179,9 @@ class Ros2Durability: def __lt__(self, value: typing.Any) -> bool: """Return self bool: + def __ne__(self, value: object) -> bool: """Return self!=value.""" - def __repr__(self) -> str: - """Return repr(self).""" Persistent: Ros2Durability = ... Transient: Ros2Durability = ... TransientLocal: Ros2Durability = ... @@ -180,7 +191,7 @@ class Ros2Durability: class Ros2Liveliness: """DDS 2.2.3.11 LIVELINESS""" - def __eq__(self, value: typing.Any) -> bool: + def __eq__(self, value: object) -> bool: """Return self==value.""" def __ge__(self, value: typing.Any) -> bool: @@ -198,11 +209,9 @@ class Ros2Liveliness: def __lt__(self, value: typing.Any) -> bool: """Return self bool: + def __ne__(self, value: object) -> bool: """Return self!=value.""" - def __repr__(self) -> str: - """Return repr(self).""" Automatic: Ros2Liveliness = ... ManualByParticipant: Ros2Liveliness = ... ManualByTopic: Ros2Liveliness = ... @@ -211,41 +220,46 @@ class Ros2Liveliness: class Ros2Node: """ROS2 Node -warnings:: -- dora Ros2 bridge functionality is considered **unstable**. It may be changed -at any point without it being considered a breaking change. -- There's a known issue about ROS2 nodes not being discoverable by ROS2 -See: https://github.com/jhelovuo/ros2-client/issues/4""" + warnings:: + - dora Ros2 bridge functionality is considered **unstable**. It may be changed + at any point without it being considered a breaking change. + - There's a known issue about ROS2 nodes not being discoverable by ROS2 + See: https://github.com/jhelovuo/ros2-client/issues/4 + """ def create_publisher(self, topic: dora.Ros2Topic, qos: dora.Ros2QosPolicies=None) -> dora.Ros2Publisher: """Create a ROS2 publisher -```python -pose_publisher = ros2_node.create_publisher(turtle_pose_topic) -``` -warnings: -- dora Ros2 bridge functionality is considered **unstable**. It may be changed -at any point without it being considered a breaking change.""" + ```python + pose_publisher = ros2_node.create_publisher(turtle_pose_topic) + ``` + warnings: + - dora Ros2 bridge functionality is considered **unstable**. It may be changed + at any point without it being considered a breaking change. + """ def create_subscription(self, topic: dora.Ros2Topic, qos: dora.Ros2QosPolicies=None) -> dora.Ros2Subscription: """Create a ROS2 subscription -```python -pose_reader = ros2_node.create_subscription(turtle_pose_topic) -``` + ```python + pose_reader = ros2_node.create_subscription(turtle_pose_topic) + ``` + + Warnings: + - dora Ros2 bridge functionality is considered **unstable**. It may be changed + at any point without it being considered a breaking change. -warnings: -- dora Ros2 bridge functionality is considered **unstable**. It may be changed -at any point without it being considered a breaking change.""" + """ def create_topic(self, name: str, message_type: str, qos: dora.Ros2QosPolicies) -> dora.Ros2Topic: """Create a ROS2 topic to connect to. -```python -turtle_twist_topic = ros2_node.create_topic( -"/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos -) -```""" + ```python + turtle_twist_topic = ros2_node.create_topic( + "/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos + ) + ``` + """ @typing.final class Ros2NodeOptions: @@ -258,28 +272,31 @@ class Ros2NodeOptions: class Ros2Publisher: """ROS2 Publisher -warnings: -- dora Ros2 bridge functionality is considered **unstable**. It may be changed -at any point without it being considered a breaking change.""" + Warnings: + - dora Ros2 bridge functionality is considered **unstable**. It may be changed + at any point without it being considered a breaking change. + + """ def publish(self, data: pyarrow.Array) -> None: """Publish a message into ROS2 topic. -Remember that the data format should respect the structure of the ROS2 message using an arrow Structure. - -ex: -```python -gripper_command.publish( -pa.array( -[ -{ -"name": "gripper", -"cmd": np.float32(5), -} -] -), -) -```""" + Remember that the data format should respect the structure of the ROS2 message using an arrow Structure. + + ex: + ```python + gripper_command.publish( + pa.array( + [ + { + "name": "gripper", + "cmd": np.float32(5), + } + ] + ), + ) + ``` + """ @typing.final class Ros2QosPolicies: @@ -292,10 +309,11 @@ class Ros2QosPolicies: class Ros2Subscription: """ROS2 Subscription + Warnings: + - dora Ros2 bridge functionality is considered **unstable**. It may be changed + at any point without it being considered a breaking change. -warnings: -- dora Ros2 bridge functionality is considered **unstable**. It may be changed -at any point without it being considered a breaking change.""" + """ def next(self):... @@ -303,9 +321,11 @@ at any point without it being considered a breaking change.""" class Ros2Topic: """ROS2 Topic -warnings: -- dora Ros2 bridge functionality is considered **unstable**. It may be changed -at any point without it being considered a breaking change.""" + Warnings: + - dora Ros2 bridge functionality is considered **unstable**. It may be changed + at any point without it being considered a breaking change. + + """ def start_runtime() -> None: - """Start a runtime for Operators""" \ No newline at end of file + """Start a runtime for Operators""" diff --git a/apis/python/node/generate_stubs.py b/apis/python/node/generate_stubs.py index db9e3f83..39aae8e1 100644 --- a/apis/python/node/generate_stubs.py +++ b/apis/python/node/generate_stubs.py @@ -5,8 +5,9 @@ import inspect import logging import re import subprocess +from collections.abc import Mapping from functools import reduce -from typing import Any, Dict, List, Mapping, Optional, Set, Tuple, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Union def path_to_type(*elements: str) -> ast.AST: @@ -70,13 +71,11 @@ def module_stubs(module: Any) -> ast.Module: functions = [] for member_name, member_value in inspect.getmembers(module): element_path = [module.__name__, member_name] - if member_name.startswith("__"): - pass - elif member_name.startswith("DoraStatus"): + if member_name.startswith("__") or member_name.startswith("DoraStatus"): pass elif inspect.isclass(member_value): classes.append( - class_stubs(member_name, member_value, element_path, types_to_import) + class_stubs(member_name, member_value, element_path, types_to_import), ) elif inspect.isbuiltin(member_value): functions.append( @@ -86,7 +85,7 @@ def module_stubs(module: Any) -> ast.Module: element_path, types_to_import, in_class=False, - ) + ), ) else: logging.warning(f"Unsupported root construction {member_name}") @@ -99,7 +98,7 @@ def module_stubs(module: Any) -> ast.Module: def class_stubs( - cls_name: str, cls_def: Any, element_path: List[str], types_to_import: Set[str] + cls_name: str, cls_def: Any, element_path: List[str], types_to_import: Set[str], ) -> ast.ClassDef: attributes: List[ast.AST] = [] methods: List[ast.AST] = [] @@ -123,7 +122,7 @@ def class_stubs( except ValueError as e: if "no signature found" not in str(e): raise ValueError( - f"Error while parsing signature of {cls_name}.__init_" + f"Error while parsing signature of {cls_name}.__init_", ) from e elif ( member_value == OBJECT_MEMBERS.get(member_name) @@ -133,8 +132,8 @@ def class_stubs( elif inspect.isdatadescriptor(member_value): attributes.extend( data_descriptor_stub( - member_name, member_value, current_element_path, types_to_import - ) + member_name, member_value, current_element_path, types_to_import, + ), ) elif inspect.isroutine(member_value): (magic_methods if member_name.startswith("__") else methods).append( @@ -144,7 +143,7 @@ def class_stubs( current_element_path, types_to_import, in_class=True, - ) + ), ) elif member_name == "__match_args__": constants.append( @@ -153,28 +152,28 @@ def class_stubs( annotation=ast.Subscript( value=path_to_type("tuple"), slice=ast.Tuple( - elts=[path_to_type("str"), ast.Ellipsis()], ctx=ast.Load() + elts=[path_to_type("str"), ast.Ellipsis()], ctx=ast.Load(), ), ctx=ast.Load(), ), value=ast.Constant(member_value), simple=1, - ) + ), ) elif member_value is not None: constants.append( ast.AnnAssign( target=ast.Name(id=member_name, ctx=ast.Store()), annotation=concatenated_path_to_type( - member_value.__class__.__name__, element_path, types_to_import + member_value.__class__.__name__, element_path, types_to_import, ), value=ast.Ellipsis(), simple=1, - ) + ), ) else: logging.warning( - f"Unsupported member {member_name} of class {'.'.join(element_path)}" + f"Unsupported member {member_name} of class {'.'.join(element_path)}", ) doc = inspect.getdoc(cls_def) @@ -212,7 +211,7 @@ def data_descriptor_stub( doc_comment = m[0] elif len(m) > 1: raise ValueError( - f"Multiple return annotations found with :return: in {'.'.join(element_path)} documentation" + f"Multiple return annotations found with :return: in {'.'.join(element_path)} documentation", ) assign = ast.AnnAssign( @@ -263,7 +262,7 @@ def arguments_stub( types_to_import: Set[str], ) -> ast.arguments: real_parameters: Mapping[str, inspect.Parameter] = inspect.signature( - callable_def + callable_def, ).parameters if callable_name == "__init__": real_parameters = { @@ -285,19 +284,19 @@ def arguments_stub( # Types from comment for match in re.findall( - r"^ *:type *([a-zA-Z0-9_]+): ([^\n]*) *$", doc, re.MULTILINE + r"^ *:type *([a-zA-Z0-9_]+): ([^\n]*) *$", doc, re.MULTILINE, ): if match[0] not in real_parameters: raise ValueError( f"The parameter {match[0]} of {'.'.join(element_path)} " - "is defined in the documentation but not in the function signature" + "is defined in the documentation but not in the function signature", ) type = match[1] if type.endswith(", optional"): optional_params.add(match[0]) type = type[:-10] parsed_param_types[match[0]] = convert_type_from_doc( - type, element_path, types_to_import + type, element_path, types_to_import, ) # we parse the parameters @@ -312,10 +311,10 @@ def arguments_stub( if param.name != "self" and param.name not in parsed_param_types: raise ValueError( f"The parameter {param.name} of {'.'.join(element_path)} " - "has no type definition in the function documentation" + "has no type definition in the function documentation", ) param_ast = ast.arg( - arg=param.name, annotation=parsed_param_types.get(param.name) + arg=param.name, annotation=parsed_param_types.get(param.name), ) default_ast = None @@ -324,12 +323,12 @@ def arguments_stub( if param.name not in optional_params: raise ValueError( f"Parameter {param.name} of {'.'.join(element_path)} " - "is optional according to the type but not flagged as such in the doc" + "is optional according to the type but not flagged as such in the doc", ) elif param.name in optional_params: raise ValueError( f"Parameter {param.name} of {'.'.join(element_path)} " - "is optional according to the documentation but has no default value" + "is optional according to the documentation but has no default value", ) if param.kind == param.POSITIONAL_ONLY: @@ -359,7 +358,7 @@ def arguments_stub( def returns_stub( - callable_name: str, doc: str, element_path: List[str], types_to_import: Set[str] + callable_name: str, doc: str, element_path: List[str], types_to_import: Set[str], ) -> Optional[ast.AST]: m = re.findall(r"^ *:rtype: *([^\n]*) *$", doc, re.MULTILINE) if len(m) == 0: @@ -368,24 +367,24 @@ def returns_stub( return builtin[1] raise ValueError( f"The return type of {'.'.join(element_path)} " - "has no type definition using :rtype: in the function documentation" + "has no type definition using :rtype: in the function documentation", ) if len(m) > 1: raise ValueError( - f"Multiple return type annotations found with :rtype: for {'.'.join(element_path)}" + f"Multiple return type annotations found with :rtype: for {'.'.join(element_path)}", ) return convert_type_from_doc(m[0], element_path, types_to_import) def convert_type_from_doc( - type_str: str, element_path: List[str], types_to_import: Set[str] + type_str: str, element_path: List[str], types_to_import: Set[str], ) -> ast.AST: type_str = type_str.strip() return parse_type_to_ast(type_str, element_path, types_to_import) def parse_type_to_ast( - type_str: str, element_path: List[str], types_to_import: Set[str] + type_str: str, element_path: List[str], types_to_import: Set[str], ) -> ast.AST: # let's tokenize tokens = [] @@ -420,9 +419,7 @@ def parse_type_to_ast( or_groups: List[List[str]] = [[]] print(sequence) # TODO: Fix sequence - if "Ros" in sequence and "2" in sequence: - sequence = ["".join(sequence)] - elif "dora.Ros" in sequence and "2" in sequence: + if ("Ros" in sequence and "2" in sequence) or ("dora.Ros" in sequence and "2" in sequence): sequence = ["".join(sequence)] for e in sequence: @@ -432,14 +429,14 @@ def parse_type_to_ast( or_groups[-1].append(e) if any(not g for g in or_groups): raise ValueError( - f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}" + f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}", ) new_elements: List[ast.AST] = [] for group in or_groups: if len(group) == 1 and isinstance(group[0], str): new_elements.append( - concatenated_path_to_type(group[0], element_path, types_to_import) + concatenated_path_to_type(group[0], element_path, types_to_import), ) elif ( len(group) == 2 @@ -449,15 +446,15 @@ def parse_type_to_ast( new_elements.append( ast.Subscript( value=concatenated_path_to_type( - group[0], element_path, types_to_import + group[0], element_path, types_to_import, ), slice=parse_sequence(group[1]), ctx=ast.Load(), - ) + ), ) else: raise ValueError( - f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}" + f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}", ) return reduce( lambda left, right: ast.BinOp(left=left, op=ast.BitOr(), right=right), @@ -468,12 +465,12 @@ def parse_type_to_ast( def concatenated_path_to_type( - path: str, element_path: List[str], types_to_import: Set[str] + path: str, element_path: List[str], types_to_import: Set[str], ) -> ast.AST: parts = path.split(".") if any(not p for p in parts): raise ValueError( - f"Not able to parse type '{path}' used by {'.'.join(element_path)}" + f"Not able to parse type '{path}' used by {'.'.join(element_path)}", ) if len(parts) > 1: types_to_import.add(".".join(parts[:-1])) @@ -497,10 +494,10 @@ def format_with_ruff(file: str) -> None: if __name__ == "__main__": parser = argparse.ArgumentParser( - description="Extract Python type stub from a python module." + description="Extract Python type stub from a python module.", ) parser.add_argument( - "module_name", help="Name of the Python module for which generate stubs" + "module_name", help="Name of the Python module for which generate stubs", ) parser.add_argument( "out", @@ -508,7 +505,7 @@ if __name__ == "__main__": type=argparse.FileType("wt"), ) parser.add_argument( - "--ruff", help="Formats the generated stubs using Ruff", action="store_true" + "--ruff", help="Formats the generated stubs using Ruff", action="store_true", ) args = parser.parse_args() stub_content = ast.unparse(module_stubs(importlib.import_module(args.module_name))) diff --git a/binaries/cli/src/template/python/__node-name__/__node_name__/__init__.py b/binaries/cli/src/template/python/__node-name__/__node_name__/__init__.py index ac3cbef9..cde7a377 100644 --- a/binaries/cli/src/template/python/__node-name__/__node_name__/__init__.py +++ b/binaries/cli/src/template/python/__node-name__/__node_name__/__init__.py @@ -5,7 +5,7 @@ readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.m # Read the content of the README file try: - with open(readme_path, "r", encoding="utf-8") as f: + with open(readme_path, encoding="utf-8") as f: __doc__ = f.read() except FileNotFoundError: __doc__ = "README file not found." diff --git a/binaries/cli/src/template/python/__node-name__/__node_name__/__main__.py b/binaries/cli/src/template/python/__node-name__/__node_name__/__main__.py index bcbfde6d..40e2b013 100644 --- a/binaries/cli/src/template/python/__node-name__/__node_name__/__main__.py +++ b/binaries/cli/src/template/python/__node-name__/__node_name__/__main__.py @@ -1,5 +1,4 @@ from .main import main - if __name__ == "__main__": main() diff --git a/binaries/cli/src/template/python/__node-name__/__node_name__/main.py b/binaries/cli/src/template/python/__node-name__/__node_name__/main.py index 823e2e09..51ecda33 100644 --- a/binaries/cli/src/template/python/__node-name__/__node_name__/main.py +++ b/binaries/cli/src/template/python/__node-name__/__node_name__/main.py @@ -1,5 +1,5 @@ -from dora import Node import pyarrow as pa +from dora import Node def main(): @@ -12,13 +12,13 @@ def main(): f"""Node received: id: {event["id"]}, value: {event["value"]}, - metadata: {event["metadata"]}""" + metadata: {event["metadata"]}""", ) elif event["id"] == "my_input_id": # Warning: Make sure to add my_output_id and my_input_id within the dataflow. node.send_output( - output_id="my_output_id", data=pa.array([1, 2, 3]), metadata={} + output_id="my_output_id", data=pa.array([1, 2, 3]), metadata={}, ) diff --git a/binaries/cli/src/template/python/operator/operator-template.py b/binaries/cli/src/template/python/operator/operator-template.py index a6713444..c67e46c6 100644 --- a/binaries/cli/src/template/python/operator/operator-template.py +++ b/binaries/cli/src/template/python/operator/operator-template.py @@ -2,22 +2,18 @@ from dora import DoraStatus class Operator: - """ - Template docstring + """Template docstring """ def __init__(self): """Called on initialisation""" - pass def on_event( self, dora_event, send_output, ) -> DoraStatus: - """ - - Args: + """Args: dora_event: Event containing an `id`, `data` and `metadata`. send_output Callable[[str, bytes | pa.Array, Optional[dict]], None]: Function for sending output to the dataflow: @@ -35,11 +31,10 @@ class Operator: """ if dora_event["type"] == "INPUT": print( - f"Received input {dora_event['id']}, with data: {dora_event['value']}" + f"Received input {dora_event['id']}, with data: {dora_event['value']}", ) return DoraStatus.CONTINUE def __del__(self): """Called before being deleted""" - pass diff --git a/binaries/cli/src/template/python/talker/talker-template.py b/binaries/cli/src/template/python/talker/talker-template.py index 7cc5456a..f99b2792 100644 --- a/binaries/cli/src/template/python/talker/talker-template.py +++ b/binaries/cli/src/template/python/talker/talker-template.py @@ -1,5 +1,5 @@ -from dora import Node import pyarrow as pa +from dora import Node def main(): @@ -11,7 +11,7 @@ def main(): f"""Node received: id: {event["id"]}, value: {event["value"]}, - metadata: {event["metadata"]}""" + metadata: {event["metadata"]}""", ) node.send_output("speech", pa.array(["Hello World"])) diff --git a/examples/camera/notebook.ipynb b/examples/camera/notebook.ipynb index d8ecd73c..4875dfcc 100644 --- a/examples/camera/notebook.ipynb +++ b/examples/camera/notebook.ipynb @@ -6,8 +6,8 @@ "metadata": {}, "outputs": [], "source": [ - "from dora import Node\n", "import IPython.display\n", + "from dora import Node\n", "from IPython.display import clear_output" ] }, diff --git a/examples/cuda-benchmark/demo_receiver.py b/examples/cuda-benchmark/demo_receiver.py index 1e207c2b..461df20a 100644 --- a/examples/cuda-benchmark/demo_receiver.py +++ b/examples/cuda-benchmark/demo_receiver.py @@ -1,17 +1,15 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- import os import time - +import numpy as np import pyarrow as pa -from tqdm import tqdm +import torch from dora import Node -from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch +from dora.cuda import cudabuffer_to_torch, ipc_buffer_to_ipc_handle from helper import record_results -import torch -import numpy as np +from tqdm import tqdm torch.tensor([], device="cuda") @@ -31,7 +29,7 @@ NAME = f"dora torch {DEVICE}" ctx = pa.cuda.Context() -print("") +print() print("Receiving 40MB packets using default dora-rs") while True: @@ -79,7 +77,7 @@ pbar.close() time.sleep(2) -print("") +print() print("----") print(f"Node communication duration with default dora-rs: {mean_cpu/1000:.1f}ms") print(f"Node communication duration with dora CUDA->CUDA: {mean_cuda/1000:.1f}ms") diff --git a/examples/cuda-benchmark/demo_sender.py b/examples/cuda-benchmark/demo_sender.py index 11868462..0c13c6d3 100644 --- a/examples/cuda-benchmark/demo_sender.py +++ b/examples/cuda-benchmark/demo_sender.py @@ -1,13 +1,13 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- -import time import os +import time + import numpy as np import pyarrow as pa +import torch from dora import Node from dora.cuda import torch_to_ipc_buffer -import torch torch.tensor([], device="cuda") @@ -24,7 +24,7 @@ time.sleep(1) # test latency first for size in SIZES: - for _ in range(0, 100): + for _ in range(100): now = time.time() random_data = np.random.randint(1000, size=size, dtype=np.int64) torch_tensor = torch.tensor(random_data, dtype=torch.int64, device="cuda") @@ -51,7 +51,7 @@ DEVICE = "cuda" time.sleep(1) for size in SIZES: - for _ in range(0, 100): + for _ in range(100): now = time.time() random_data = np.random.randint(1000, size=size, dtype=np.int64) torch_tensor = torch.tensor(random_data, dtype=torch.int64, device="cuda") diff --git a/examples/cuda-benchmark/receiver.py b/examples/cuda-benchmark/receiver.py index 7ca2aae7..7b700a9c 100644 --- a/examples/cuda-benchmark/receiver.py +++ b/examples/cuda-benchmark/receiver.py @@ -1,16 +1,14 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- import os import time - import pyarrow as pa -from tqdm import tqdm +import torch from dora import Node -from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch +from dora.cuda import cudabuffer_to_torch, ipc_buffer_to_ipc_handle from helper import record_results -import torch +from tqdm import tqdm torch.tensor([], device="cuda") diff --git a/examples/cuda-benchmark/sender.py b/examples/cuda-benchmark/sender.py index be3a64b1..ec7fcb94 100644 --- a/examples/cuda-benchmark/sender.py +++ b/examples/cuda-benchmark/sender.py @@ -1,13 +1,13 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- -import time import os +import time + import numpy as np import pyarrow as pa +import torch from dora import Node from dora.cuda import torch_to_ipc_buffer -import torch torch.tensor([], device="cuda") @@ -22,7 +22,7 @@ node = Node() time.sleep(4) # test latency first for size in SIZES: - for _ in range(0, 100): + for _ in range(100): now = time.time() random_data = np.random.randint(1000, size=size, dtype=np.int64) torch_tensor = torch.tensor(random_data, dtype=torch.int64, device="cuda") diff --git a/examples/piper/convert.py b/examples/piper/convert.py index d2715dcc..5f4be030 100644 --- a/examples/piper/convert.py +++ b/examples/piper/convert.py @@ -3,8 +3,7 @@ from scipy.spatial.transform import Rotation as R def convert_quaternion_to_euler(quat): - """ - Convert Quaternion (xyzw) to Euler angles (rpy) + """Convert Quaternion (xyzw) to Euler angles (rpy) """ # Normalize quat = quat / np.linalg.norm(quat) @@ -14,8 +13,7 @@ def convert_quaternion_to_euler(quat): def convert_euler_to_quaternion(euler): - """ - Convert Euler angles (rpy) to Quaternion (xyzw) + """Convert Euler angles (rpy) to Quaternion (xyzw) """ quat = R.from_euler("xyz", euler).as_quat() @@ -23,8 +21,7 @@ def convert_euler_to_quaternion(euler): def convert_euler_to_rotation_matrix(euler): - """ - Convert Euler angles (rpy) to rotation matrix (3x3). + """Convert Euler angles (rpy) to rotation matrix (3x3). """ quat = R.from_euler("xyz", euler).as_matrix() @@ -32,8 +29,7 @@ def convert_euler_to_rotation_matrix(euler): def convert_rotation_matrix_to_euler(rotmat): - """ - Convert rotation matrix (3x3) to Euler angles (rpy). + """Convert rotation matrix (3x3) to Euler angles (rpy). """ r = R.from_matrix(rotmat) euler = r.as_euler("xyz", degrees=False) diff --git a/examples/piper/dummy_inference.py b/examples/piper/dummy_inference.py index 0a970c97..08732eed 100644 --- a/examples/piper/dummy_inference.py +++ b/examples/piper/dummy_inference.py @@ -215,11 +215,12 @@ pred = np.array( 0.0913, 0.1221, ], - ] - ] + ], + ], ) import time + import pyarrow as pa data = pred[0] diff --git a/examples/piper/dummy_inference_2.py b/examples/piper/dummy_inference_2.py index 0d764b32..946a2a8c 100644 --- a/examples/piper/dummy_inference_2.py +++ b/examples/piper/dummy_inference_2.py @@ -1,7 +1,5 @@ -from dora import Node - - import h5py +from dora import Node f = h5py.File("data/episode_0.hdf5", "r") @@ -10,19 +8,19 @@ data = f["action"][:] STATE_VEC_IDX_MAPPING = { # [0, 10): right arm joint positions - **{"arm_joint_{}_pos".format(i): i for i in range(10)}, - **{"right_arm_joint_{}_pos".format(i): i for i in range(10)}, + **{f"arm_joint_{i}_pos": i for i in range(10)}, + **{f"right_arm_joint_{i}_pos": i for i in range(10)}, # [10, 15): right gripper joint positions - **{"gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, - **{"right_gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, + **{f"gripper_joint_{i}_pos": i + 10 for i in range(5)}, + **{f"right_gripper_joint_{i}_pos": i + 10 for i in range(5)}, "gripper_open": 10, # alias of right_gripper_joint_0_pos "right_gripper_open": 10, # [15, 25): right arm joint velocities - **{"arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, - **{"right_arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, + **{f"arm_joint_{i}_vel": i + 15 for i in range(10)}, + **{f"right_arm_joint_{i}_vel": i + 15 for i in range(10)}, # [25, 30): right gripper joint velocities - **{"gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, - **{"right_gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, + **{f"gripper_joint_{i}_vel": i + 25 for i in range(5)}, + **{f"right_gripper_joint_{i}_vel": i + 25 for i in range(5)}, "gripper_open_vel": 25, # alias of right_gripper_joint_0_vel "right_gripper_open_vel": 25, # [30, 33): right end effector positions @@ -61,14 +59,14 @@ STATE_VEC_IDX_MAPPING = { "right_eef_angular_vel_yaw": 44, # [45, 50): reserved # [50, 60): left arm joint positions - **{"left_arm_joint_{}_pos".format(i): i + 50 for i in range(10)}, + **{f"left_arm_joint_{i}_pos": i + 50 for i in range(10)}, # [60, 65): left gripper joint positions - **{"left_gripper_joint_{}_pos".format(i): i + 60 for i in range(5)}, + **{f"left_gripper_joint_{i}_pos": i + 60 for i in range(5)}, "left_gripper_open": 60, # alias of left_gripper_joint_0_pos # [65, 75): left arm joint velocities - **{"left_arm_joint_{}_vel".format(i): i + 65 for i in range(10)}, + **{f"left_arm_joint_{i}_vel": i + 65 for i in range(10)}, # [75, 80): left gripper joint velocities - **{"left_gripper_joint_{}_vel".format(i): i + 75 for i in range(5)}, + **{f"left_gripper_joint_{i}_vel": i + 75 for i in range(5)}, "left_gripper_open_vel": 75, # alias of left_gripper_joint_0_vel # [80, 83): left end effector positions "left_eef_pos_x": 80, @@ -99,6 +97,7 @@ STATE_VEC_IDX_MAPPING = { } import time + import pyarrow as pa node = Node() @@ -109,17 +108,17 @@ RIGHT_UNI_STATE_INDICES = [ STATE_VEC_IDX_MAPPING[f"right_arm_joint_{i}_pos"] for i in range(6) ] + [STATE_VEC_IDX_MAPPING["right_gripper_open"]] MOBILE_BASE_UNI_STATE_INDICES = [STATE_VEC_IDX_MAPPING["base_vel_x"]] + [ - STATE_VEC_IDX_MAPPING["base_angular_vel"] + STATE_VEC_IDX_MAPPING["base_angular_vel"], ] for joint in data: node.send_output( - "jointstate_left", pa.array(joint[LEFT_UNI_STATE_INDICES], type=pa.float32()) + "jointstate_left", pa.array(joint[LEFT_UNI_STATE_INDICES], type=pa.float32()), ) node.send_output( - "jointstate_right", pa.array(joint[RIGHT_UNI_STATE_INDICES], type=pa.float32()) + "jointstate_right", pa.array(joint[RIGHT_UNI_STATE_INDICES], type=pa.float32()), ) node.send_output( - "mobile_base", pa.array(joint[MOBILE_BASE_UNI_STATE_INDICES], type=pa.float32()) + "mobile_base", pa.array(joint[MOBILE_BASE_UNI_STATE_INDICES], type=pa.float32()), ) time.sleep(0.05) diff --git a/examples/piper/post_process_action.py b/examples/piper/post_process_action.py index 96f27b39..1da02a60 100644 --- a/examples/piper/post_process_action.py +++ b/examples/piper/post_process_action.py @@ -4,6 +4,7 @@ node = Node() import time + import pyarrow as pa for event in node: @@ -25,7 +26,7 @@ for event in node: node.send_output("jointstate_left", pa.array(action[:7], type=pa.float32())) node.send_output( - "jointstate_right", pa.array(action[7:], type=pa.float32()) + "jointstate_right", pa.array(action[7:], type=pa.float32()), ) time.sleep(0.02) print(actions) diff --git a/examples/piper/record.py b/examples/piper/record.py index c12d0b5e..ec00de04 100644 --- a/examples/piper/record.py +++ b/examples/piper/record.py @@ -1,30 +1,29 @@ -import h5py - -import os import datetime +import os -from dora import Node +import h5py import numpy as np from convert import ( - convert_euler_to_rotation_matrix, compute_ortho6d_from_rotation_matrix, + convert_euler_to_rotation_matrix, ) +from dora import Node STATE_VEC_IDX_MAPPING = { # [0, 10): right arm joint positions - **{"arm_joint_{}_pos".format(i): i for i in range(10)}, - **{"right_arm_joint_{}_pos".format(i): i for i in range(10)}, + **{f"arm_joint_{i}_pos": i for i in range(10)}, + **{f"right_arm_joint_{i}_pos": i for i in range(10)}, # [10, 15): right gripper joint positions - **{"gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, - **{"right_gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, + **{f"gripper_joint_{i}_pos": i + 10 for i in range(5)}, + **{f"right_gripper_joint_{i}_pos": i + 10 for i in range(5)}, "gripper_open": 10, # alias of right_gripper_joint_0_pos "right_gripper_open": 10, # [15, 25): right arm joint velocities - **{"arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, - **{"right_arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, + **{f"arm_joint_{i}_vel": i + 15 for i in range(10)}, + **{f"right_arm_joint_{i}_vel": i + 15 for i in range(10)}, # [25, 30): right gripper joint velocities - **{"gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, - **{"right_gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, + **{f"gripper_joint_{i}_vel": i + 25 for i in range(5)}, + **{f"right_gripper_joint_{i}_vel": i + 25 for i in range(5)}, "gripper_open_vel": 25, # alias of right_gripper_joint_0_vel "right_gripper_open_vel": 25, # [30, 33): right end effector positions @@ -63,14 +62,14 @@ STATE_VEC_IDX_MAPPING = { "right_eef_angular_vel_yaw": 44, # [45, 50): reserved # [50, 60): left arm joint positions - **{"left_arm_joint_{}_pos".format(i): i + 50 for i in range(10)}, + **{f"left_arm_joint_{i}_pos": i + 50 for i in range(10)}, # [60, 65): left gripper joint positions - **{"left_gripper_joint_{}_pos".format(i): i + 60 for i in range(5)}, + **{f"left_gripper_joint_{i}_pos": i + 60 for i in range(5)}, "left_gripper_open": 60, # alias of left_gripper_joint_0_pos # [65, 75): left arm joint velocities - **{"left_arm_joint_{}_vel".format(i): i + 65 for i in range(10)}, + **{f"left_arm_joint_{i}_vel": i + 65 for i in range(10)}, # [75, 80): left gripper joint velocities - **{"left_gripper_joint_{}_vel".format(i): i + 75 for i in range(5)}, + **{f"left_gripper_joint_{i}_vel": i + 75 for i in range(5)}, "left_gripper_open_vel": 75, # alias of left_gripper_joint_0_vel # [80, 83): left end effector positions "left_eef_pos_x": 80, @@ -193,9 +192,7 @@ for event in node: elif char == "s": start = True - elif "image" in event["id"]: - tmp_dict[event["id"]] = event["value"].to_numpy() - elif "qpos" in event["id"]: + elif "image" in event["id"] or "qpos" in event["id"]: tmp_dict[event["id"]] = event["value"].to_numpy() elif "pose" in event["id"]: value = event["value"].to_numpy() @@ -213,7 +210,7 @@ for event in node: ortho6d[3], ortho6d[4], ortho6d[5], - ] + ], ) tmp_dict[event["id"]] = values elif "base_vel" in event["id"]: @@ -230,7 +227,7 @@ for event in node: tmp_dict["/observations/pose_left"], tmp_dict["/observations/pose_right"], # tmp_dict["/observations/base_vel"], - ] + ], ) UNI_STATE_INDICES = ( [STATE_VEC_IDX_MAPPING[f"left_arm_joint_{i}_pos"] for i in range(6)] @@ -264,11 +261,11 @@ for event in node: # We reproduce obs and action data_dict["/action"].append(universal_vec) data_dict["/observations/images/cam_high"].append( - tmp_dict["/observations/images/cam_high"] + tmp_dict["/observations/images/cam_high"], ) data_dict["/observations/images/cam_left_wrist"].append( - tmp_dict["/observations/images/cam_left_wrist"] + tmp_dict["/observations/images/cam_left_wrist"], ) data_dict["/observations/images/cam_right_wrist"].append( - tmp_dict["/observations/images/cam_right_wrist"] + tmp_dict["/observations/images/cam_right_wrist"], ) diff --git a/examples/piper/replay.py b/examples/piper/replay.py index 55fe3af4..31c0ac7c 100644 --- a/examples/piper/replay.py +++ b/examples/piper/replay.py @@ -1,8 +1,7 @@ -from dora import Node - +import os import h5py -import os +from dora import Node EPISODE_PATH = os.getenv("EPISODE_PATH", "data/episode_0.hdf5") @@ -13,19 +12,19 @@ data = f["action"][:] STATE_VEC_IDX_MAPPING = { # [0, 10): right arm joint positions - **{"arm_joint_{}_pos".format(i): i for i in range(10)}, - **{"right_arm_joint_{}_pos".format(i): i for i in range(10)}, + **{f"arm_joint_{i}_pos": i for i in range(10)}, + **{f"right_arm_joint_{i}_pos": i for i in range(10)}, # [10, 15): right gripper joint positions - **{"gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, - **{"right_gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, + **{f"gripper_joint_{i}_pos": i + 10 for i in range(5)}, + **{f"right_gripper_joint_{i}_pos": i + 10 for i in range(5)}, "gripper_open": 10, # alias of right_gripper_joint_0_pos "right_gripper_open": 10, # [15, 25): right arm joint velocities - **{"arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, - **{"right_arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, + **{f"arm_joint_{i}_vel": i + 15 for i in range(10)}, + **{f"right_arm_joint_{i}_vel": i + 15 for i in range(10)}, # [25, 30): right gripper joint velocities - **{"gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, - **{"right_gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, + **{f"gripper_joint_{i}_vel": i + 25 for i in range(5)}, + **{f"right_gripper_joint_{i}_vel": i + 25 for i in range(5)}, "gripper_open_vel": 25, # alias of right_gripper_joint_0_vel "right_gripper_open_vel": 25, # [30, 33): right end effector positions @@ -64,14 +63,14 @@ STATE_VEC_IDX_MAPPING = { "right_eef_angular_vel_yaw": 44, # [45, 50): reserved # [50, 60): left arm joint positions - **{"left_arm_joint_{}_pos".format(i): i + 50 for i in range(10)}, + **{f"left_arm_joint_{i}_pos": i + 50 for i in range(10)}, # [60, 65): left gripper joint positions - **{"left_gripper_joint_{}_pos".format(i): i + 60 for i in range(5)}, + **{f"left_gripper_joint_{i}_pos": i + 60 for i in range(5)}, "left_gripper_open": 60, # alias of left_gripper_joint_0_pos # [65, 75): left arm joint velocities - **{"left_arm_joint_{}_vel".format(i): i + 65 for i in range(10)}, + **{f"left_arm_joint_{i}_vel": i + 65 for i in range(10)}, # [75, 80): left gripper joint velocities - **{"left_gripper_joint_{}_vel".format(i): i + 75 for i in range(5)}, + **{f"left_gripper_joint_{i}_vel": i + 75 for i in range(5)}, "left_gripper_open_vel": 75, # alias of left_gripper_joint_0_vel # [80, 83): left end effector positions "left_eef_pos_x": 80, @@ -102,6 +101,7 @@ STATE_VEC_IDX_MAPPING = { } import time + import pyarrow as pa node = Node() @@ -112,15 +112,15 @@ RIGHT_UNI_STATE_INDICES = [ STATE_VEC_IDX_MAPPING[f"right_arm_joint_{i}_pos"] for i in range(6) ] + [STATE_VEC_IDX_MAPPING["right_gripper_open"]] MOBILE_BASE_UNI_STATE_INDICES = [STATE_VEC_IDX_MAPPING["base_vel_x"]] + [ - STATE_VEC_IDX_MAPPING["base_angular_vel"] + STATE_VEC_IDX_MAPPING["base_angular_vel"], ] for joint in data: node.send_output( - "jointstate_left", pa.array(joint[LEFT_UNI_STATE_INDICES], type=pa.float32()) + "jointstate_left", pa.array(joint[LEFT_UNI_STATE_INDICES], type=pa.float32()), ) node.send_output( - "jointstate_right", pa.array(joint[RIGHT_UNI_STATE_INDICES], type=pa.float32()) + "jointstate_right", pa.array(joint[RIGHT_UNI_STATE_INDICES], type=pa.float32()), ) # node.send_output( # "mobile_base", pa.array(joint[MOBILE_BASE_UNI_STATE_INDICES], type=pa.float32()) diff --git a/examples/python-operator-dataflow/file_saver_op.py b/examples/python-operator-dataflow/file_saver_op.py index 592e10f5..e12028ab 100644 --- a/examples/python-operator-dataflow/file_saver_op.py +++ b/examples/python-operator-dataflow/file_saver_op.py @@ -1,11 +1,9 @@ import pyarrow as pa - from dora import DoraStatus class Operator: - """ - Inferring object from images + """Inferring object from images """ def __init__(self): @@ -21,7 +19,7 @@ class Operator: if dora_event["type"] == "INPUT" and dora_event["id"] == "file": input = dora_event["value"][0].as_py() - with open(input["path"], "r") as file: + with open(input["path"]) as file: self.last_file = file.read() self.last_path = input["path"] self.last_metadata = dora_event["metadata"] @@ -36,8 +34,8 @@ class Operator: "raw": input["raw"], "path": input["path"], "origin": dora_event["id"], - } - ] + }, + ], ), dora_event["metadata"], ) diff --git a/examples/python-operator-dataflow/keyboard_op.py b/examples/python-operator-dataflow/keyboard_op.py index 2d179ac6..70d9e2bc 100644 --- a/examples/python-operator-dataflow/keyboard_op.py +++ b/examples/python-operator-dataflow/keyboard_op.py @@ -1,8 +1,7 @@ -from pynput import keyboard -from pynput.keyboard import Key, Events import pyarrow as pa from dora import Node - +from pynput import keyboard +from pynput.keyboard import Events, Key node = Node() buffer_text = "" @@ -30,36 +29,35 @@ with keyboard.Events() as events: cursor = 0 buffer_text += event.key.char node.send_output("buffer", pa.array([buffer_text])) - else: - if event.key == Key.backspace: - buffer_text = buffer_text[:-1] - node.send_output("buffer", pa.array([buffer_text])) - elif event.key == Key.esc: - buffer_text = "" - node.send_output("buffer", pa.array([buffer_text])) - elif event.key == Key.enter: - node.send_output("submitted", pa.array([buffer_text])) - first_word = buffer_text.split(" ")[0] - if first_word in NODE_TOPIC: - node.send_output(first_word, pa.array([buffer_text])) - submitted_text.append(buffer_text) - buffer_text = "" + elif event.key == Key.backspace: + buffer_text = buffer_text[:-1] + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.esc: + buffer_text = "" + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.enter: + node.send_output("submitted", pa.array([buffer_text])) + first_word = buffer_text.split(" ")[0] + if first_word in NODE_TOPIC: + node.send_output(first_word, pa.array([buffer_text])) + submitted_text.append(buffer_text) + buffer_text = "" + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.ctrl: + ctrl = True + elif event.key == Key.space: + buffer_text += " " + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.up: + if len(submitted_text) > 0: + cursor = max(cursor - 1, -len(submitted_text)) + buffer_text = submitted_text[cursor] node.send_output("buffer", pa.array([buffer_text])) - elif event.key == Key.ctrl: - ctrl = True - elif event.key == Key.space: - buffer_text += " " + elif event.key == Key.down: + if len(submitted_text) > 0: + cursor = min(cursor + 1, 0) + buffer_text = submitted_text[cursor] node.send_output("buffer", pa.array([buffer_text])) - elif event.key == Key.up: - if len(submitted_text) > 0: - cursor = max(cursor - 1, -len(submitted_text)) - buffer_text = submitted_text[cursor] - node.send_output("buffer", pa.array([buffer_text])) - elif event.key == Key.down: - if len(submitted_text) > 0: - cursor = min(cursor + 1, 0) - buffer_text = submitted_text[cursor] - node.send_output("buffer", pa.array([buffer_text])) elif event is not None and isinstance(event, Events.Release): if event.key == Key.ctrl: ctrl = False diff --git a/examples/python-operator-dataflow/llm_op.py b/examples/python-operator-dataflow/llm_op.py index 0e8d484a..01d256f6 100644 --- a/examples/python-operator-dataflow/llm_op.py +++ b/examples/python-operator-dataflow/llm_op.py @@ -1,13 +1,13 @@ -from dora import DoraStatus -import pylcs -import os -import pyarrow as pa -from transformers import AutoModelForCausalLM, AutoTokenizer import json - +import os import re import time +import pyarrow as pa +import pylcs +from dora import DoraStatus +from transformers import AutoModelForCausalLM, AutoTokenizer + MODEL_NAME_OR_PATH = "TheBloke/deepseek-coder-6.7B-instruct-GPTQ" # MODEL_NAME_OR_PATH = "hanspeterlyngsoeraaschoujensen/deepseek-math-7b-instruct-GPTQ" @@ -64,14 +64,16 @@ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True) def extract_python_code_blocks(text): - """ - Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier. + """Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier. - Parameters: + Parameters + ---------- - text: A string that may contain one or more Python code blocks. - Returns: + Returns + ------- - A list of strings, where each string is a block of Python code extracted from the text. + """ pattern = r"```python\n(.*?)\n```" matches = re.findall(pattern, text, re.DOTALL) @@ -80,21 +82,22 @@ def extract_python_code_blocks(text): matches = re.findall(pattern, text, re.DOTALL) if len(matches) == 0: return [text] - else: - matches = [remove_last_line(matches[0])] + matches = [remove_last_line(matches[0])] return matches def extract_json_code_blocks(text): - """ - Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier. + """Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier. - Parameters: + Parameters + ---------- - text: A string that may contain one or more json code blocks. - Returns: + Returns + ------- - A list of strings, where each string is a block of json code extracted from the text. + """ pattern = r"```json\n(.*?)\n```" matches = re.findall(pattern, text, re.DOTALL) @@ -108,14 +111,16 @@ def extract_json_code_blocks(text): def remove_last_line(python_code): - """ - Removes the last line from a given string of Python code. + """Removes the last line from a given string of Python code. - Parameters: + Parameters + ---------- - python_code: A string representing Python source code. - Returns: + Returns + ------- - A string with the last line removed. + """ lines = python_code.split("\n") # Split the string into lines if lines: # Check if there are any lines to remove @@ -124,8 +129,7 @@ def remove_last_line(python_code): def calculate_similarity(source, target): - """ - Calculate a similarity score between the source and target strings. + """Calculate a similarity score between the source and target strings. This uses the edit distance relative to the length of the strings. """ edit_distance = pylcs.edit_distance(source, target) @@ -136,8 +140,7 @@ def calculate_similarity(source, target): def find_best_match_location(source_code, target_block): - """ - Find the best match for the target_block within the source_code by searching line by line, + """Find the best match for the target_block within the source_code by searching line by line, considering blocks of varying lengths. """ source_lines = source_code.split("\n") @@ -167,8 +170,7 @@ def find_best_match_location(source_code, target_block): def replace_code_in_source(source_code, replacement_block: str): - """ - Replace the best matching block in the source_code with the replacement_block, considering variable block lengths. + """Replace the best matching block in the source_code with the replacement_block, considering variable block lengths. """ replacement_block = extract_python_code_blocks(replacement_block)[0] start_index, end_index = find_best_match_location(source_code, replacement_block) @@ -178,8 +180,7 @@ def replace_code_in_source(source_code, replacement_block: str): source_code[:start_index] + replacement_block + source_code[end_index:] ) return new_source - else: - return source_code + return source_code class Operator: @@ -192,13 +193,13 @@ class Operator: if dora_event["type"] == "INPUT" and dora_event["id"] == "code_modifier": input = dora_event["value"][0].as_py() - with open(input["path"], "r", encoding="utf8") as f: + with open(input["path"], encoding="utf8") as f: code = f.read() user_message = input["user_message"] start_llm = time.time() output = self.ask_llm( - CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message) + CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message), ) source_code = replace_code_in_source(code, output) @@ -212,8 +213,8 @@ class Operator: "path": input["path"], "response": output, "prompt": input["user_message"], - } - ] + }, + ], ), dora_event["metadata"], ) @@ -226,7 +227,7 @@ class Operator: elif dora_event["type"] == "INPUT" and dora_event["id"] == "message_sender": user_message = dora_event["value"][0].as_py() output = self.ask_llm( - MESSAGE_SENDER_TEMPLATE.format(user_message=user_message) + MESSAGE_SENDER_TEMPLATE.format(user_message=user_message), ) outputs = extract_json_code_blocks(output)[0] try: @@ -293,7 +294,7 @@ if __name__ == "__main__": current_directory = os.path.dirname(current_file_path) path = current_directory + "object_detection.py" - with open(path, "r", encoding="utf8") as f: + with open(path, encoding="utf8") as f: raw = f.read() op.on_event( @@ -306,7 +307,7 @@ if __name__ == "__main__": "path": path, "user_message": "send a star ", }, - ] + ], ), "metadata": [], }, diff --git a/examples/python-operator-dataflow/microphone_op.py b/examples/python-operator-dataflow/microphone_op.py index b6fb6e63..ae573be2 100644 --- a/examples/python-operator-dataflow/microphone_op.py +++ b/examples/python-operator-dataflow/microphone_op.py @@ -1,7 +1,6 @@ import numpy as np import pyarrow as pa import sounddevice as sd - from dora import DoraStatus # Set the parameters for recording @@ -10,8 +9,7 @@ MAX_DURATION = 5 class Operator: - """ - Microphone operator that records the audio + """Microphone operator that records the audio """ def on_event( diff --git a/examples/python-operator-dataflow/object_detection.py b/examples/python-operator-dataflow/object_detection.py index c542e2c3..2ce4af57 100755 --- a/examples/python-operator-dataflow/object_detection.py +++ b/examples/python-operator-dataflow/object_detection.py @@ -1,10 +1,8 @@ import numpy as np import pyarrow as pa - from dora import DoraStatus from ultralytics import YOLO - CAMERA_WIDTH = 640 CAMERA_HEIGHT = 480 @@ -13,8 +11,7 @@ model = YOLO("yolov8n.pt") class Operator: - """ - Inferring object from images + """Inferring object from images """ def on_event( diff --git a/examples/python-operator-dataflow/plot.py b/examples/python-operator-dataflow/plot.py index 5effc9d8..093c9e0f 100755 --- a/examples/python-operator-dataflow/plot.py +++ b/examples/python-operator-dataflow/plot.py @@ -1,10 +1,9 @@ import os -import cv2 +import cv2 from dora import DoraStatus from utils import LABELS - CI = os.environ.get("CI") CAMERA_WIDTH = 640 @@ -14,8 +13,7 @@ FONT = cv2.FONT_HERSHEY_SIMPLEX class Operator: - """ - Plot image and bounding box + """Plot image and bounding box """ def __init__(self): @@ -63,7 +61,7 @@ class Operator: ) cv2.putText( - image, self.buffer, (20, 14 + 21 * 14), FONT, 0.5, (190, 250, 0), 1 + image, self.buffer, (20, 14 + 21 * 14), FONT, 0.5, (190, 250, 0), 1, ) i = 0 @@ -111,7 +109,7 @@ class Operator: { "role": id, "content": value[0].as_py(), - } + }, ] return DoraStatus.CONTINUE diff --git a/examples/python-operator-dataflow/sentence_transformers_op.py b/examples/python-operator-dataflow/sentence_transformers_op.py index c4619cf7..11952014 100644 --- a/examples/python-operator-dataflow/sentence_transformers_op.py +++ b/examples/python-operator-dataflow/sentence_transformers_op.py @@ -1,11 +1,10 @@ -from sentence_transformers import SentenceTransformer -from sentence_transformers import util - -from dora import DoraStatus import os import sys -import torch + import pyarrow as pa +import torch +from dora import DoraStatus +from sentence_transformers import SentenceTransformer, util SHOULD_BE_INCLUDED = [ "webcam.py", @@ -24,7 +23,7 @@ def get_all_functions(path): if file not in SHOULD_BE_INCLUDED: continue path = os.path.join(root, file) - with open(path, "r", encoding="utf8") as f: + with open(path, encoding="utf8") as f: ## add file folder to system path sys.path.append(root) ## import module from path diff --git a/examples/python-operator-dataflow/webcam.py b/examples/python-operator-dataflow/webcam.py index 43ce7e20..fc58db68 100755 --- a/examples/python-operator-dataflow/webcam.py +++ b/examples/python-operator-dataflow/webcam.py @@ -4,7 +4,6 @@ import time import cv2 import numpy as np import pyarrow as pa - from dora import DoraStatus CAMERA_WIDTH = 640 @@ -16,8 +15,7 @@ font = cv2.FONT_HERSHEY_SIMPLEX class Operator: - """ - Sending image from webcam to the dataflow + """Sending image from webcam to the dataflow """ def __init__(self): @@ -39,22 +37,21 @@ class Operator: frame = cv2.resize(frame, (CAMERA_WIDTH, CAMERA_HEIGHT)) self.failure_count = 0 ## Push an error image in case the camera is not available. + elif self.failure_count > 10: + frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) + cv2.putText( + frame, + "No Webcam was found at index %d" % (CAMERA_INDEX), + (30, 30), + font, + 0.75, + (255, 255, 255), + 2, + 1, + ) else: - if self.failure_count > 10: - frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) - cv2.putText( - frame, - "No Webcam was found at index %d" % (CAMERA_INDEX), - (int(30), int(30)), - font, - 0.75, - (255, 255, 255), - 2, - 1, - ) - else: - self.failure_count += 1 - return DoraStatus.CONTINUE + self.failure_count += 1 + return DoraStatus.CONTINUE send_output( "image", @@ -68,8 +65,7 @@ class Operator: if time.time() - self.start_time < 20 or CI != "true": return DoraStatus.CONTINUE - else: - return DoraStatus.STOP + return DoraStatus.STOP def __del__(self): self.video_capture.release() diff --git a/examples/python-operator-dataflow/whisper_op.py b/examples/python-operator-dataflow/whisper_op.py index feab8b92..c5915e00 100644 --- a/examples/python-operator-dataflow/whisper_op.py +++ b/examples/python-operator-dataflow/whisper_op.py @@ -1,15 +1,12 @@ import pyarrow as pa import whisper - from dora import DoraStatus - model = whisper.load_model("base") class Operator: - """ - Transforming Speech to Text using OpenAI Whisper model + """Transforming Speech to Text using OpenAI Whisper model """ def on_event( diff --git a/examples/python-ros2-dataflow/control_node.py b/examples/python-ros2-dataflow/control_node.py index 7540c092..53a9503f 100755 --- a/examples/python-ros2-dataflow/control_node.py +++ b/examples/python-ros2-dataflow/control_node.py @@ -1,9 +1,9 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- import random -from dora import Node + import pyarrow as pa +from dora import Node node = Node() @@ -16,8 +16,8 @@ for i in range(500): if event_id == "turtle_pose": print( f"""Pose: {event["value"].tolist()}""".replace("\r", "").replace( - "\n", " " - ) + "\n", " ", + ), ) elif event_id == "tick": direction = { diff --git a/examples/python-ros2-dataflow/random_turtle.py b/examples/python-ros2-dataflow/random_turtle.py index 3b25ac21..64632902 100755 --- a/examples/python-ros2-dataflow/random_turtle.py +++ b/examples/python-ros2-dataflow/random_turtle.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- from dora import Node, Ros2Context, Ros2NodeOptions, Ros2QosPolicies @@ -18,7 +17,7 @@ topic_qos = Ros2QosPolicies(reliable=True, max_blocking_time=0.1) # Create a publisher to cmd_vel topic turtle_twist_topic = ros2_node.create_topic( - "/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos + "/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos, ) twist_writer = ros2_node.create_publisher(turtle_twist_topic) diff --git a/examples/reachy2/parse_bbox.py b/examples/reachy2/parse_bbox.py index fcc25b05..40f19ec0 100644 --- a/examples/reachy2/parse_bbox.py +++ b/examples/reachy2/parse_bbox.py @@ -11,14 +11,16 @@ IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0")) def extract_bboxes(json_text): - """ - Extracts bounding boxes from a JSON string with markdown markers and returns them as a NumPy array. + """Extracts bounding boxes from a JSON string with markdown markers and returns them as a NumPy array. - Parameters: + Parameters + ---------- json_text (str): JSON string containing bounding box data, including ```json markers. - Returns: + Returns + ------- np.ndarray: NumPy array of bounding boxes. + """ # Ensure all lines are stripped of whitespace and markers lines = json_text.strip().splitlines() diff --git a/examples/reachy2/parse_bbox_minimal.py b/examples/reachy2/parse_bbox_minimal.py index 93daa507..3df24e8c 100644 --- a/examples/reachy2/parse_bbox_minimal.py +++ b/examples/reachy2/parse_bbox_minimal.py @@ -11,14 +11,16 @@ IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0")) def extract_bboxes(json_text) -> (np.ndarray, np.ndarray): - """ - Extracts bounding boxes from a JSON string with markdown markers and returns them as a NumPy array. + """Extracts bounding boxes from a JSON string with markdown markers and returns them as a NumPy array. - Parameters: + Parameters + ---------- json_text (str): JSON string containing bounding box data, including ```json markers. - Returns: + Returns + ------- np.ndarray: NumPy array of bounding boxes. + """ # Ensure all lines are stripped of whitespace and markers lines = json_text.strip().splitlines() diff --git a/examples/reachy2/pick_place.py b/examples/reachy2/pick_place.py index 8d3b94b9..4bfd2b94 100644 --- a/examples/reachy2/pick_place.py +++ b/examples/reachy2/pick_place.py @@ -80,14 +80,16 @@ stop = True def extract_bboxes(json_text) -> (np.ndarray, np.ndarray): - """ - Extracts bounding boxes from a JSON string with markdown markers and returns them as a NumPy array. + """Extracts bounding boxes from a JSON string with markdown markers and returns them as a NumPy array. - Parameters: + Parameters + ---------- json_text (str): JSON string containing bounding box data, including ```json markers. - Returns: + Returns + ------- np.ndarray: NumPy array of bounding boxes. + """ # Ensure all lines are stripped of whitespace and markers lines = json_text.strip().splitlines() @@ -120,8 +122,8 @@ def handle_speech(last_text): "text_vlm", pa.array( [ - f"Given the prompt: {cache['text']}. Output the two bounding boxes for the two objects" - ] + f"Given the prompt: {cache['text']}. Output the two bounding boxes for the two objects", + ], ), metadata={"image_id": "image_depth"}, ) @@ -176,16 +178,15 @@ def wait_for_events(ids: list[str], timeout=None, cache={}): def get_prompt(): text = wait_for_event(id="text", timeout=0.3) if text is None: - return + return None text = text[0].as_py() words = text.lower().split() if len(ACTIVATION_WORDS) > 0 and all( word not in ACTIVATION_WORDS for word in words ): - return - else: - return text + return None + return text last_text = "" @@ -205,7 +206,7 @@ while True: metadata={"encoding": "jointstate", "duration": 1}, ) _, cache = wait_for_events( - ids=["response_r_arm", "response_l_arm"], timeout=2, cache=cache + ids=["response_r_arm", "response_l_arm"], timeout=2, cache=cache, ) # handle_speech(cache["text"]) @@ -270,7 +271,7 @@ while True: [x, y, -0.16, 0, 0, 0, 100], [x, y, z, 0, 0, 0, 0], [x, y, -0.16, 0, 0, 0, 0], - ] + ], ).ravel() if y < 0: @@ -351,7 +352,7 @@ while True: 0, 0, 100, - ] + ], ), metadata={"encoding": "xyzrpy", "duration": "0.75"}, ) @@ -388,7 +389,7 @@ while True: 0, 0, 100, - ] + ], ), metadata={"encoding": "xyzrpy", "duration": "0.75"}, ) diff --git a/examples/reachy2/state_machine.py b/examples/reachy2/state_machine.py index 13f6360c..9e129815 100644 --- a/examples/reachy2/state_machine.py +++ b/examples/reachy2/state_machine.py @@ -187,7 +187,7 @@ while True: [x, y, -0.16, 0, 0, 0, 100], [x, y, z, 0, 0, 0, 0], [x, y, -0.16, 0, 0, 0, 0], - ] + ], ).ravel() if y < 0: diff --git a/examples/translation/pretty_print.py b/examples/translation/pretty_print.py index 77a9a743..167c3f6c 100644 --- a/examples/translation/pretty_print.py +++ b/examples/translation/pretty_print.py @@ -33,7 +33,7 @@ for event in node: if event["type"] == "INPUT": # The sentence to be printed sentence = event["value"][0].as_py() - if event["id"] not in previous_texts.keys(): + if event["id"] not in previous_texts: previous_texts[event["id"]] = ["", "", sentence] else: diff --git a/libraries/extensions/ros2-bridge/python/test_utils.py b/libraries/extensions/ros2-bridge/python/test_utils.py index a0dd5e6f..1229926f 100644 --- a/libraries/extensions/ros2-bridge/python/test_utils.py +++ b/libraries/extensions/ros2-bridge/python/test_utils.py @@ -1,7 +1,6 @@ import numpy as np import pyarrow as pa - # Marker Message Example TEST_ARRAYS = [ ("std_msgs", "UInt8", pa.array([{"data": np.uint8(2)}])), @@ -23,12 +22,12 @@ TEST_ARRAYS = [ "label": "a", "size": np.uint32(10), "stride": np.uint32(20), - } + }, ], "data_offset": np.uint32(30), }, - } - ] + }, + ], ), ), ( @@ -44,12 +43,12 @@ TEST_ARRAYS = [ "label": "a", "size": np.uint32(10), "stride": np.uint32(20), - } + }, ], "data_offset": np.uint32(30), }, - } - ] + }, + ], ), ), ( @@ -99,7 +98,7 @@ TEST_ARRAYS = [ "x": np.float64(1.0), # Numpy type "y": np.float64(1.0), # Numpy type "z": np.float64(1.0), # Numpy type - } + }, ], "colors": [ { @@ -107,15 +106,15 @@ TEST_ARRAYS = [ "g": np.float32(1.0), # Numpy type "b": np.float32(1.0), # Numpy type "a": np.float32(1.0), # Numpy type (alpha) - } # Numpy array for colors + }, # Numpy array for colors ], "texture_resource": "", "uv_coordinates": [{}], "text": "", "mesh_resource": "", "mesh_use_embedded_materials": False, # Boolean type, no numpy equivalent - } - ] + }, + ], ), ), ( @@ -167,7 +166,7 @@ TEST_ARRAYS = [ "x": np.float64(1.0), # Numpy type "y": np.float64(1.0), # Numpy type "z": np.float64(1.0), # Numpy type - } + }, ], "colors": [ { @@ -175,17 +174,17 @@ TEST_ARRAYS = [ "g": np.float32(1.0), # Numpy type "b": np.float32(1.0), # Numpy type "a": np.float32(1.0), # Numpy type (alpha) - } # Numpy array for colors + }, # Numpy array for colors ], "texture_resource": "", "uv_coordinates": [{}], "text": "", "mesh_resource": "", "mesh_use_embedded_materials": False, # Boolean type, no numpy equivalent - } - ] - } - ] + }, + ], + }, + ], ), ), ( @@ -254,16 +253,15 @@ TEST_ARRAYS = [ "a": np.float32(1.0), # 32-bit float }, ], - } - ] + }, + ], ), ), ] def is_subset(subset, superset): - """ - Check if subset is a subset of superset, to avoid false negatives linked to default values. + """Check if subset is a subset of superset, to avoid false negatives linked to default values. """ if isinstance(subset, pa.Array): return is_subset(subset.to_pylist(), superset.to_pylist()) diff --git a/node-hub/dora-ios-lidar/dora_ios_lidar/__init__.py b/node-hub/dora-ios-lidar/dora_ios_lidar/__init__.py index ac3cbef9..cde7a377 100644 --- a/node-hub/dora-ios-lidar/dora_ios_lidar/__init__.py +++ b/node-hub/dora-ios-lidar/dora_ios_lidar/__init__.py @@ -5,7 +5,7 @@ readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.m # Read the content of the README file try: - with open(readme_path, "r", encoding="utf-8") as f: + with open(readme_path, encoding="utf-8") as f: __doc__ = f.read() except FileNotFoundError: __doc__ = "README file not found." diff --git a/node-hub/dora-ios-lidar/dora_ios_lidar/__main__.py b/node-hub/dora-ios-lidar/dora_ios_lidar/__main__.py index bcbfde6d..40e2b013 100644 --- a/node-hub/dora-ios-lidar/dora_ios_lidar/__main__.py +++ b/node-hub/dora-ios-lidar/dora_ios_lidar/__main__.py @@ -1,5 +1,4 @@ from .main import main - if __name__ == "__main__": main() diff --git a/node-hub/dora-ios-lidar/dora_ios_lidar/main.py b/node-hub/dora-ios-lidar/dora_ios_lidar/main.py index c37ae4ef..29a40f60 100644 --- a/node-hub/dora-ios-lidar/dora_ios_lidar/main.py +++ b/node-hub/dora-ios-lidar/dora_ios_lidar/main.py @@ -16,8 +16,7 @@ class DemoApp: self.stop = False def on_new_frame(self): - """ - This method is called from non-main thread, therefore cannot be used for presenting UI. + """This method is called from non-main thread, therefore cannot be used for presenting UI. """ self.event.set() # Notify the main thread to stop waiting and process new frame. @@ -28,13 +27,13 @@ class DemoApp: def connect_to_device(self, dev_idx): print("Searching for devices") devs = Record3DStream.get_connected_devices() - print("{} device(s) found".format(len(devs))) + print(f"{len(devs)} device(s) found") for dev in devs: - print("\tID: {}\n".format(dev.product_id)) + print(f"\tID: {dev.product_id}\n") if len(devs) <= dev_idx: raise RuntimeError( - "Cannot connect to device #{}, try different index.".format(dev_idx) + f"Cannot connect to device #{dev_idx}, try different index.", ) dev = devs[dev_idx] @@ -45,7 +44,7 @@ class DemoApp: def get_intrinsic_mat_from_coeffs(self, coeffs): return np.array( - [[coeffs.fx, 0, coeffs.tx], [0, coeffs.fy, coeffs.ty], [0, 0, 1]] + [[coeffs.fx, 0, coeffs.tx], [0, coeffs.fy, coeffs.ty], [0, 0, 1]], ) def start_processing_stream(self): @@ -62,7 +61,7 @@ class DemoApp: depth = self.session.get_depth_frame() rgb = self.session.get_rgb_frame() intrinsic_mat = self.get_intrinsic_mat_from_coeffs( - self.session.get_intrinsic_mat() + self.session.get_intrinsic_mat(), ) if depth.shape != rgb.shape: diff --git a/node-hub/dora-kokoro-tts/dora_kokoro_tts/__init__.py b/node-hub/dora-kokoro-tts/dora_kokoro_tts/__init__.py index ac3cbef9..cde7a377 100644 --- a/node-hub/dora-kokoro-tts/dora_kokoro_tts/__init__.py +++ b/node-hub/dora-kokoro-tts/dora_kokoro_tts/__init__.py @@ -5,7 +5,7 @@ readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.m # Read the content of the README file try: - with open(readme_path, "r", encoding="utf-8") as f: + with open(readme_path, encoding="utf-8") as f: __doc__ = f.read() except FileNotFoundError: __doc__ = "README file not found." diff --git a/node-hub/dora-kokoro-tts/dora_kokoro_tts/__main__.py b/node-hub/dora-kokoro-tts/dora_kokoro_tts/__main__.py index bcbfde6d..40e2b013 100644 --- a/node-hub/dora-kokoro-tts/dora_kokoro_tts/__main__.py +++ b/node-hub/dora-kokoro-tts/dora_kokoro_tts/__main__.py @@ -1,5 +1,4 @@ from .main import main - if __name__ == "__main__": main() diff --git a/node-hub/dora-pyaudio/dora_pyaudio/main.py b/node-hub/dora-pyaudio/dora_pyaudio/main.py index c3341b18..82cc4fb6 100644 --- a/node-hub/dora-pyaudio/dora_pyaudio/main.py +++ b/node-hub/dora-pyaudio/dora_pyaudio/main.py @@ -42,7 +42,7 @@ def main(): event = node.next(timeout=0.01) if event is None: break - elif event["type"] == "INPUT": + if event["type"] == "INPUT": if event["id"] == "audio": audio = event["value"].to_numpy() sr = event["metadata"].get("sample_rate", SAMPLE_RATE) diff --git a/node-hub/dora-qwen/dora_qwen/__init__.py b/node-hub/dora-qwen/dora_qwen/__init__.py index ac3cbef9..cde7a377 100644 --- a/node-hub/dora-qwen/dora_qwen/__init__.py +++ b/node-hub/dora-qwen/dora_qwen/__init__.py @@ -5,7 +5,7 @@ readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.m # Read the content of the README file try: - with open(readme_path, "r", encoding="utf-8") as f: + with open(readme_path, encoding="utf-8") as f: __doc__ = f.read() except FileNotFoundError: __doc__ = "README file not found." diff --git a/node-hub/dora-qwen/dora_qwen/__main__.py b/node-hub/dora-qwen/dora_qwen/__main__.py index bcbfde6d..40e2b013 100644 --- a/node-hub/dora-qwen/dora_qwen/__main__.py +++ b/node-hub/dora-qwen/dora_qwen/__main__.py @@ -1,5 +1,4 @@ from .main import main - if __name__ == "__main__": main() diff --git a/node-hub/dora-qwen/dora_qwen/main.py b/node-hub/dora-qwen/dora_qwen/main.py index 8d202456..812462ef 100644 --- a/node-hub/dora-qwen/dora_qwen/main.py +++ b/node-hub/dora-qwen/dora_qwen/main.py @@ -15,13 +15,13 @@ def get_model_gguf(): from llama_cpp import Llama llm = Llama.from_pretrained( - repo_id="Qwen/Qwen2.5-0.5B-Instruct-GGUF", filename="*fp16.gguf", verbose=False + repo_id="Qwen/Qwen2.5-0.5B-Instruct-GGUF", filename="*fp16.gguf", verbose=False, ) return llm def get_model_darwin(): - from mlx_lm import load # noqa + from mlx_lm import load model, tokenizer = load("mlx-community/Qwen2.5-0.5B-Instruct-8bit") return model, tokenizer @@ -31,7 +31,7 @@ def get_model_huggingface(): model_name = "Qwen/Qwen2.5-0.5B-Instruct" model = AutoModelForCausalLM.from_pretrained( - model_name, torch_dtype="auto", device_map="auto" + model_name, torch_dtype="auto", device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(model_name) return model, tokenizer @@ -43,7 +43,7 @@ ACTIVATION_WORDS = os.getenv("ACTIVATION_WORDS", "what how who where you").split def generate_hf(model, tokenizer, prompt: str, history) -> str: history += [{"role": "user", "content": prompt}] text = tokenizer.apply_chat_template( - history, tokenize=False, add_generation_prompt=True + history, tokenize=False, add_generation_prompt=True, ) model_inputs = tokenizer([text], return_tensors="pt").to(model.device) generated_ids = model.generate(**model_inputs, max_new_tokens=512) @@ -91,11 +91,11 @@ def main(): from mlx_lm import generate response = generate( - model, tokenizer, prompt=text, verbose=False, max_tokens=50 + model, tokenizer, prompt=text, verbose=False, max_tokens=50, ) node.send_output( - output_id="text", data=pa.array([response]), metadata={} + output_id="text", data=pa.array([response]), metadata={}, ) diff --git a/node-hub/dora-qwen2-5-vl/dora_qwen2_5_vl/main.py b/node-hub/dora-qwen2-5-vl/dora_qwen2_5_vl/main.py index d3334b02..499e434f 100644 --- a/node-hub/dora-qwen2-5-vl/dora_qwen2_5_vl/main.py +++ b/node-hub/dora-qwen2-5-vl/dora_qwen2_5_vl/main.py @@ -46,7 +46,7 @@ try: ) except (ImportError, ModuleNotFoundError): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( - MODEL_NAME_OR_PATH, torch_dtype="auto", device_map="auto" + MODEL_NAME_OR_PATH, torch_dtype="auto", device_map="auto", ) @@ -60,7 +60,6 @@ processor = AutoProcessor.from_pretrained(MODEL_NAME_OR_PATH) def generate(frames: dict, question, history, past_key_values=None, image_id=None): """Generate the response to the question given the image using Qwen2 model.""" - if image_id is not None: images = [frames[image_id]] else: @@ -219,7 +218,7 @@ def main(): continue # set the max number of tiles in `max_num` response, history, past_key_values = generate( - frames, text, history, past_key_values, image_id + frames, text, history, past_key_values, image_id, ) node.send_output( "text", diff --git a/node-hub/dora-rdt-1b/tests/test_dora_rdt_1b.py b/node-hub/dora-rdt-1b/tests/test_dora_rdt_1b.py index e91142ea..f10bbc35 100644 --- a/node-hub/dora-rdt-1b/tests/test_dora_rdt_1b.py +++ b/node-hub/dora-rdt-1b/tests/test_dora_rdt_1b.py @@ -38,7 +38,6 @@ def test_download_policy(): def test_download_vision_model(): # Skip vision test as it is currently failing on macOS # See: https://github.com/dora-rs/dora/actions/runs/13484462433/job/37673857429 - pass from dora_rdt_1b.main import get_vision_model (vision_encoder, image_processor) = get_vision_model() diff --git a/node-hub/dora-sam2/dora_sam2/main.py b/node-hub/dora-sam2/dora_sam2/main.py index 33750c69..f3130ddf 100644 --- a/node-hub/dora-sam2/dora_sam2/main.py +++ b/node-hub/dora-sam2/dora_sam2/main.py @@ -119,8 +119,8 @@ def main(): { "masks": masks.ravel(), "labels": event["value"]["labels"], - } - ] + }, + ], ), metadata={ "image_id": image_id, @@ -129,7 +129,7 @@ def main(): }, ) - elif "boxes2d" in event_id: + if "boxes2d" in event_id: if isinstance(event["value"], pa.StructArray): boxes2d = event["value"][0].get("bbox").values.to_numpy() @@ -159,7 +159,7 @@ def main(): ): predictor.set_image(frames[image_id]) masks, _scores, last_pred = predictor.predict( - box=boxes2d, point_labels=labels, multimask_output=False + box=boxes2d, point_labels=labels, multimask_output=False, ) if len(masks.shape) == 4: @@ -190,8 +190,8 @@ def main(): { "masks": masks.ravel(), "labels": event["value"]["labels"], - } - ] + }, + ], ), metadata={ "image_id": image_id, diff --git a/tests/queue_size_and_timeout_python/receive_data.py b/tests/queue_size_and_timeout_python/receive_data.py index b01463ba..f5cdd271 100644 --- a/tests/queue_size_and_timeout_python/receive_data.py +++ b/tests/queue_size_and_timeout_python/receive_data.py @@ -22,7 +22,7 @@ def main() -> None: i += 1 print( - f"[{i}, {j}] Sent: {sent_in_s}, Received: {received_in_s}, Difference: {received_in_s - sent_in_s}" + f"[{i}, {j}] Sent: {sent_in_s}, Received: {received_in_s}, Difference: {received_in_s - sent_in_s}", ) assert received_in_s - sent_in_s < 1.0 time.sleep(0.1) diff --git a/tests/queue_size_latest_data_python/receive_data.py b/tests/queue_size_latest_data_python/receive_data.py index 89483a4e..ef46580f 100644 --- a/tests/queue_size_latest_data_python/receive_data.py +++ b/tests/queue_size_latest_data_python/receive_data.py @@ -1,6 +1,6 @@ -from dora import Node import time +from dora import Node node = Node() diff --git a/tests/queue_size_latest_data_python/send_data.py b/tests/queue_size_latest_data_python/send_data.py index 4901f0e1..140e3184 100644 --- a/tests/queue_size_latest_data_python/send_data.py +++ b/tests/queue_size_latest_data_python/send_data.py @@ -1,7 +1,8 @@ -from dora import Node import time -import pyarrow as pa + import numpy as np +import pyarrow as pa +from dora import Node node = Node() diff --git a/yolo.yml b/yolo.yml new file mode 100755 index 00000000..41bf906a --- /dev/null +++ b/yolo.yml @@ -0,0 +1,27 @@ +nodes: + - id: camera + build: pip install opencv-video-capture + path: opencv-video-capture + inputs: + tick: dora/timer/millis/20 + outputs: + - image + env: + CAPTURE_PATH: 0 + IMAGE_WIDTH: 640 + IMAGE_HEIGHT: 480 + + - id: object-detection + build: pip install dora-yolo + path: dora-yolo + inputs: + image: camera/image + outputs: + - bbox + + - id: plot + build: pip install dora-rerun + path: dora-rerun + inputs: + image: camera/image + boxes2d: object-detection/bbox