| @@ -0,0 +1,130 @@ | |||
| name: "Dora Bot" | |||
| on: | |||
| issue_comment: | |||
| types: [created] | |||
| schedule: | |||
| - cron: "0 0 * * *" # Midnight(UTC) | |||
| jobs: | |||
| assign-unassign: | |||
| runs-on: ubuntu-latest | |||
| permissions: | |||
| issues: write | |||
| if: github.event_name == 'issue_comment' | |||
| steps: | |||
| - name: Checkout repository | |||
| uses: actions/checkout@v3 | |||
| - name: Parses comment then assign/unassign user | |||
| env: | |||
| GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |||
| COMMENT_BODY: "${{ github.event.comment.body }}" | |||
| ISSUE_NUMBER: "${{ github.event.issue.number }}" | |||
| COMMENT_AUTHOR: "${{ github.event.comment.user.login }}" | |||
| AUTHOR_ASSOCIATION: "${{ github.event.comment.author_association }}" | |||
| run: | | |||
| # For assigning | |||
| if [[ "$COMMENT_BODY" == "@dora-bot assign me" ]]; then | |||
| echo "Assigning $COMMENT_AUTHOR to issue #$ISSUE_NUMBER" | |||
| curl -X POST \ | |||
| -H "Authorization: Bearer $GITHUB_TOKEN" \ | |||
| -H "Accept: application/vnd.github+json" \ | |||
| https://api.github.com/repos/${{ github.repository }}/issues/$ISSUE_NUMBER/assignees \ | |||
| -d "{\"assignees\":[\"$COMMENT_AUTHOR\"]}" | |||
| # Returns a comment back | |||
| curl -X POST \ | |||
| -H "Authorization: Bearer $GITHUB_TOKEN" \ | |||
| -H "Accept: application/vnd.github+json" \ | |||
| https://api.github.com/repos/${{ github.repository }}/issues/$ISSUE_NUMBER/comments \ | |||
| -d "{\"body\":\"Hello @$COMMENT_AUTHOR, this issue is now assigned to you!\"}" | |||
| # for unassigning(self) | |||
| elif [[ "$COMMENT_BODY" == "@dora-bot unassign me" ]]; then | |||
| echo "Unassigning $COMMENT_AUTHOR from issue #$ISSUE_NUMBER" | |||
| curl -X DELETE \ | |||
| -H "Authorization: Bearer $GITHUB_TOKEN" \ | |||
| -H "Accept: application/vnd.github+json" \ | |||
| https://api.github.com/repos/${{ github.repository }}/issues/$ISSUE_NUMBER/assignees \ | |||
| -d "{\"assignees\":[\"$COMMENT_AUTHOR\"]}" | |||
| # Returns a comment back | |||
| curl -X POST \ | |||
| -H "Authorization: Bearer $GITHUB_TOKEN" \ | |||
| -H "Accept: application/vnd.github+json" \ | |||
| https://api.github.com/repos/${{ github.repository }}/issues/$ISSUE_NUMBER/comments \ | |||
| -d "{\"body\":\"Hello @$COMMENT_AUTHOR, you have been unassigned from this issue.\"}" | |||
| # Command to help maintainers to unassign | |||
| elif [[ "$COMMENT_BODY" =~ @dora-bot\ unassign\ [@]?([a-zA-Z0-9_-]+) ]]; then | |||
| TARGET_USER="${BASH_REMATCH[1]}" | |||
| # Checking that the comment author has proper permissions | |||
| if [[ "$AUTHOR_ASSOCIATION" == "NONE" || "$AUTHOR_ASSOCIATION" == "CONTRIBUTOR" ]]; then | |||
| echo "Unauthorized unassign command by $COMMENT_AUTHOR. Only maintainers or collaborators may unassign others." | |||
| exit 1 | |||
| fi | |||
| echo "Maintainer $COMMENT_AUTHOR is unassigning $TARGET_USER from issue #$ISSUE_NUMBER" | |||
| curl -X DELETE \ | |||
| -H "Authorization: Bearer $GITHUB_TOKEN" \ | |||
| -H "Accept: application/vnd.github+json" \ | |||
| https://api.github.com/repos/${{ github.repository }}/issues/$ISSUE_NUMBER/assignees \ | |||
| -d "{\"assignees\":[\"$TARGET_USER\"]}" | |||
| curl -X POST \ | |||
| -H "Authorization: Bearer $GITHUB_TOKEN" \ | |||
| -H "Accept: application/vnd.github+json" \ | |||
| https://api.github.com/repos/${{ github.repository }}/issues/$ISSUE_NUMBER/comments \ | |||
| -d "{\"body\":\"Hello @$TARGET_USER, you have been unassigned from this issue by @$COMMENT_AUTHOR.\"}" | |||
| else | |||
| echo "No matching command found in comment: $COMMENT_BODY" | |||
| fi | |||
| stale-unassign: | |||
| runs-on: ubuntu-latest | |||
| permissions: | |||
| issues: write | |||
| if: github.event_name == 'schedule' | |||
| steps: | |||
| - name: Unassign stale issues | |||
| env: | |||
| GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |||
| run: | | |||
| # Calculate the timestamp for 14 days ago | |||
| TWO_WEEKS_AGO=$(date -d "14 days ago" +%s) | |||
| repo="${{ github.repository }}" | |||
| echo "Fetching open issues for $repo" | |||
| issues=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" \ | |||
| -H "Accept: application/vnd.github+json" \ | |||
| "https://api.github.com/repos/${repo}/issues?state=open&per_page=100") | |||
| issue_count=$(echo "$issues" | jq '. | length') | |||
| echo "Found $issue_count open issues" | |||
| for (( i=0; i<$issue_count; i++ )); do | |||
| issue_number=$(echo "$issues" | jq -r ".[$i].number") | |||
| updated_at=$(echo "$issues" | jq -r ".[$i].updated_at") | |||
| updated_ts=$(date -d "$updated_at" +%s) | |||
| # If the issue hasn't been updated within 2 weeks, consider it stale. | |||
| if [[ $updated_ts -lt $TWO_WEEKS_AGO ]]; then | |||
| assignees=$(echo "$issues" | jq -r ".[$i].assignees | .[].login") | |||
| if [[ -n "$assignees" ]]; then | |||
| echo "Issue #$issue_number is stale. Unassigning users: $assignees" | |||
| for user in $assignees; do | |||
| curl -X DELETE \ | |||
| -H "Authorization: Bearer $GITHUB_TOKEN" \ | |||
| -H "Accept: application/vnd.github+json" \ | |||
| https://api.github.com/repos/${repo}/issues/$issue_number/assignees \ | |||
| -d "{\"assignees\":[\"$user\"]}" | |||
| curl -X POST \ | |||
| -H "Authorization: Bearer $GITHUB_TOKEN" \ | |||
| -H "Accept: application/vnd.github+json" \ | |||
| https://api.github.com/repos/${repo}/issues/$issue_number/comments \ | |||
| -d "{\"body\":\"@${user} has been automatically unassigned from this stale issue after 2 weeks of inactivity.\"}" | |||
| done | |||
| fi | |||
| fi | |||
| done | |||
| @@ -176,4 +176,7 @@ out/ | |||
| # MacOS DS_Store | |||
| .DS_Store | |||
| ~* | |||
| #Miscellaneous | |||
| yolo.yml | |||
| ~* | |||
| @@ -33,6 +33,19 @@ We're currently running the following kind of checks: | |||
| - **CI / Formatting:** Ensures that the code is formatted using `rustfmt` (see [below](#style)) | |||
| - **CI / License Checks:** Scans the dependency tree and tries to detect possible license incompatibilities. | |||
| ## Issue Management | |||
| ### Dora Bot | |||
| We use a custom Github Action to help manage issue assignments. You can interact with this action using the following: | |||
| - `@dora-bot assign me` - Assigns the current issue to you. | |||
| - `@dora-bot unassign me` - Removes yourself from the issue assignment. | |||
| For maintainers only: | |||
| - `dora-bot unassign @username` - Allows maintainers to unassign other contributors | |||
| Note: All issue assignments will be removed automatically after 2 weeks of inactivity. | |||
| ## Style | |||
| We use [`rustfmt`](https://github.com/rust-lang/rustfmt) with its default settings to format our code. | |||
| @@ -1,10 +1,11 @@ | |||
| """ | |||
| # dora-rs | |||
| # dora-rs. | |||
| This is the dora python client for interacting with dora dataflow. | |||
| You can install it via: | |||
| ```bash | |||
| pip install dora-rs | |||
| ``` | |||
| ```. | |||
| """ | |||
| from enum import Enum | |||
| @@ -28,8 +29,7 @@ from .dora import ( | |||
| class DoraStatus(Enum): | |||
| """Dora status to indicate if operator `on_input` loop | |||
| should be stopped. | |||
| """Dora status to indicate if operator `on_input` loop should be stopped. | |||
| Args: | |||
| Enum (u8): Status signaling to dora operator to | |||
| @@ -1,86 +1,100 @@ | |||
| import dora | |||
| import pyarrow | |||
| import typing | |||
| import pyarrow | |||
| import dora | |||
| @typing.final | |||
| class Enum: | |||
| """Generic enumeration. | |||
| Derive from this class to define new enumerations.""" | |||
| Derive from this class to define new enumerations. | |||
| """ | |||
| __members__: mappingproxy = ... | |||
| @typing.final | |||
| class Node: | |||
| """The custom node API lets you integrate `dora` into your application. | |||
| It allows you to retrieve input and send output in any fashion you want. | |||
| It allows you to retrieve input and send output in any fashion you want. | |||
| Use with: | |||
| Use with: | |||
| ```python | |||
| from dora import Node | |||
| ```python | |||
| from dora import Node | |||
| node = Node() | |||
| ```""" | |||
| node = Node() | |||
| ``` | |||
| """ | |||
| def __init__(self, node_id: str=None) -> None: | |||
| """The custom node API lets you integrate `dora` into your application. | |||
| It allows you to retrieve input and send output in any fashion you want. | |||
| """Use the custom node API to embed `dora` into your application. | |||
| Use with: | |||
| It allows you to retrieve input and send output in any fashion you want. | |||
| ```python | |||
| from dora import Node | |||
| Use with: | |||
| node = Node() | |||
| ```""" | |||
| ```python | |||
| from dora import Node | |||
| node = Node() | |||
| ``` | |||
| """ | |||
| def dataflow_descriptor(self) -> dict: | |||
| """Returns the full dataflow descriptor that this node is part of. | |||
| """Return the full dataflow descriptor that this node is part of. | |||
| This method returns the parsed dataflow YAML file.""" | |||
| This method returns the parsed dataflow YAML file. | |||
| """ | |||
| def dataflow_id(self) -> str: | |||
| """Returns the dataflow id.""" | |||
| """Return the dataflow id.""" | |||
| def merge_external_events(self, subscription: dora.Ros2Subscription) -> None: | |||
| """Merge an external event stream with dora main loop. | |||
| This currently only work with ROS2.""" | |||
| This currently only work with ROS2. | |||
| """ | |||
| def next(self, timeout: float=None) -> dict: | |||
| """`.next()` gives you the next input that the node has received. | |||
| It blocks until the next event becomes available. | |||
| You can use timeout in seconds to return if no input is available. | |||
| It will return `None` when all senders has been dropped. | |||
| ```python | |||
| event = node.next() | |||
| ``` | |||
| It blocks until the next event becomes available. | |||
| You can use timeout in seconds to return if no input is available. | |||
| It will return `None` when all senders has been dropped. | |||
| ```python | |||
| event = node.next() | |||
| ``` | |||
| You can also iterate over the event stream with a loop | |||
| You can also iterate over the event stream with a loop | |||
| ```python | |||
| for event in node: | |||
| match event["type"]: | |||
| case "INPUT": | |||
| match event["id"]: | |||
| case "image": | |||
| ```""" | |||
| ```python | |||
| for event in node: | |||
| match event["type"]: | |||
| case "INPUT": | |||
| match event["id"]: | |||
| case "image": | |||
| ``` | |||
| """ | |||
| def send_output(self, output_id: str, data: pyarrow.Array, metadata: dict=None) -> None: | |||
| """`send_output` send data from the node. | |||
| ```python | |||
| Args: | |||
| output_id: str, | |||
| data: pyarrow.Array, | |||
| metadata: Option[Dict], | |||
| ``` | |||
| ```python | |||
| Args: | |||
| output_id: str, | |||
| data: pyarrow.Array, | |||
| metadata: Option[Dict], | |||
| ``` | |||
| ex: | |||
| ex: | |||
| ```python | |||
| node.send_output("string", b"string", {"open_telemetry_context": "7632e76"}) | |||
| ```""" | |||
| ```python | |||
| node.send_output("string", b"string", {"open_telemetry_context": "7632e76"}) | |||
| ``` | |||
| """ | |||
| def __iter__(self) -> typing.Any: | |||
| """Implement iter(self).""" | |||
| @@ -92,63 +106,66 @@ node.send_output("string", b"string", {"open_telemetry_context": "7632e76"}) | |||
| class Ros2Context: | |||
| """ROS2 Context holding all messages definition for receiving and sending messages to ROS2. | |||
| By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. | |||
| By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. | |||
| AMENT_PREFIX_PATH folder structure should be the following: | |||
| AMENT_PREFIX_PATH folder structure should be the following: | |||
| - For messages: <namespace>/msg/<name>.msg | |||
| - For services: <namespace>/srv/<name>.srv | |||
| - For messages: <namespace>/msg/<name>.msg | |||
| - For services: <namespace>/srv/<name>.srv | |||
| You can also use `ros_paths` if you don't want to use env variable. | |||
| You can also use `ros_paths` if you don't want to use env variable. | |||
| warning:: | |||
| dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| warning:: | |||
| dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| ```python | |||
| context = Ros2Context() | |||
| ```""" | |||
| ```python | |||
| context = Ros2Context() | |||
| ``` | |||
| """ | |||
| def __init__(self, ros_paths: typing.List[str]=None) -> None: | |||
| """ROS2 Context holding all messages definition for receiving and sending messages to ROS2. | |||
| By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. | |||
| By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. | |||
| AMENT_PREFIX_PATH folder structure should be the following: | |||
| AMENT_PREFIX_PATH folder structure should be the following: | |||
| - For messages: <namespace>/msg/<name>.msg | |||
| - For services: <namespace>/srv/<name>.srv | |||
| - For messages: <namespace>/msg/<name>.msg | |||
| - For services: <namespace>/srv/<name>.srv | |||
| You can also use `ros_paths` if you don't want to use env variable. | |||
| You can also use `ros_paths` if you don't want to use env variable. | |||
| warning:: | |||
| dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| warning:: | |||
| dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| ```python | |||
| context = Ros2Context() | |||
| ```""" | |||
| ```python | |||
| context = Ros2Context() | |||
| ``` | |||
| """ | |||
| def new_node(self, name: str, namespace: str, options: dora.Ros2NodeOptions) -> dora.Ros2Node: | |||
| """Create a new ROS2 node | |||
| """Create a new ROS2 node. | |||
| ```python | |||
| ros2_node = ros2_context.new_node( | |||
| "turtle_teleop", | |||
| "/ros2_demo", | |||
| Ros2NodeOptions(rosout=True), | |||
| ) | |||
| ``` | |||
| ```python | |||
| ros2_node = ros2_context.new_node( | |||
| "turtle_teleop", | |||
| "/ros2_demo", | |||
| Ros2NodeOptions(rosout=True), | |||
| ) | |||
| ``` | |||
| warning:: | |||
| dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change.""" | |||
| warning:: | |||
| dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| """ | |||
| @typing.final | |||
| class Ros2Durability: | |||
| """DDS 2.2.3.4 DURABILITY""" | |||
| """DDS 2.2.3.4 DURABILITY.""" | |||
| def __eq__(self, value: typing.Any) -> bool: | |||
| def __eq__(self, value: object) -> bool: | |||
| """Return self==value.""" | |||
| def __ge__(self, value: typing.Any) -> bool: | |||
| @@ -158,7 +175,7 @@ class Ros2Durability: | |||
| """Return self>value.""" | |||
| def __int__(self) -> None: | |||
| """int(self)""" | |||
| """int(self).""" | |||
| def __le__(self, value: typing.Any) -> bool: | |||
| """Return self<=value.""" | |||
| @@ -166,11 +183,9 @@ class Ros2Durability: | |||
| def __lt__(self, value: typing.Any) -> bool: | |||
| """Return self<value.""" | |||
| def __ne__(self, value: typing.Any) -> bool: | |||
| def __ne__(self, value: object) -> bool: | |||
| """Return self!=value.""" | |||
| def __repr__(self) -> str: | |||
| """Return repr(self).""" | |||
| Persistent: Ros2Durability = ... | |||
| Transient: Ros2Durability = ... | |||
| TransientLocal: Ros2Durability = ... | |||
| @@ -178,9 +193,9 @@ class Ros2Durability: | |||
| @typing.final | |||
| class Ros2Liveliness: | |||
| """DDS 2.2.3.11 LIVELINESS""" | |||
| """DDS 2.2.3.11 LIVELINESS.""" | |||
| def __eq__(self, value: typing.Any) -> bool: | |||
| def __eq__(self, value: object) -> bool: | |||
| """Return self==value.""" | |||
| def __ge__(self, value: typing.Any) -> bool: | |||
| @@ -190,7 +205,7 @@ class Ros2Liveliness: | |||
| """Return self>value.""" | |||
| def __int__(self) -> None: | |||
| """int(self)""" | |||
| """int(self).""" | |||
| def __le__(self, value: typing.Any) -> bool: | |||
| """Return self<=value.""" | |||
| @@ -198,114 +213,123 @@ class Ros2Liveliness: | |||
| def __lt__(self, value: typing.Any) -> bool: | |||
| """Return self<value.""" | |||
| def __ne__(self, value: typing.Any) -> bool: | |||
| def __ne__(self, value: object) -> bool: | |||
| """Return self!=value.""" | |||
| def __repr__(self) -> str: | |||
| """Return repr(self).""" | |||
| Automatic: Ros2Liveliness = ... | |||
| ManualByParticipant: Ros2Liveliness = ... | |||
| ManualByTopic: Ros2Liveliness = ... | |||
| @typing.final | |||
| class Ros2Node: | |||
| """ROS2 Node | |||
| """ROS2 Node. | |||
| warnings:: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| - There's a known issue about ROS2 nodes not being discoverable by ROS2 | |||
| See: https://github.com/jhelovuo/ros2-client/issues/4""" | |||
| warnings:: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| - There's a known issue about ROS2 nodes not being discoverable by ROS2 | |||
| See: https://github.com/jhelovuo/ros2-client/issues/4 | |||
| """ | |||
| def create_publisher(self, topic: dora.Ros2Topic, qos: dora.Ros2QosPolicies=None) -> dora.Ros2Publisher: | |||
| """Create a ROS2 publisher | |||
| """Create a ROS2 publisher. | |||
| ```python | |||
| pose_publisher = ros2_node.create_publisher(turtle_pose_topic) | |||
| ``` | |||
| warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change.""" | |||
| ```python | |||
| pose_publisher = ros2_node.create_publisher(turtle_pose_topic) | |||
| ``` | |||
| warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| """ | |||
| def create_subscription(self, topic: dora.Ros2Topic, qos: dora.Ros2QosPolicies=None) -> dora.Ros2Subscription: | |||
| """Create a ROS2 subscription | |||
| """Create a ROS2 subscription. | |||
| ```python | |||
| pose_reader = ros2_node.create_subscription(turtle_pose_topic) | |||
| ``` | |||
| ```python | |||
| pose_reader = ros2_node.create_subscription(turtle_pose_topic) | |||
| ``` | |||
| warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change.""" | |||
| Warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| """ | |||
| def create_topic(self, name: str, message_type: str, qos: dora.Ros2QosPolicies) -> dora.Ros2Topic: | |||
| """Create a ROS2 topic to connect to. | |||
| ```python | |||
| turtle_twist_topic = ros2_node.create_topic( | |||
| "/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos | |||
| ) | |||
| ```""" | |||
| ```python | |||
| turtle_twist_topic = ros2_node.create_topic( | |||
| "/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos | |||
| ) | |||
| ``` | |||
| """ | |||
| @typing.final | |||
| class Ros2NodeOptions: | |||
| """ROS2 Node Options""" | |||
| """ROS2 Node Options.""" | |||
| def __init__(self, rosout: bool=None) -> None: | |||
| """ROS2 Node Options""" | |||
| """ROS2 Node Options.""" | |||
| @typing.final | |||
| class Ros2Publisher: | |||
| """ROS2 Publisher | |||
| """ROS2 Publisher. | |||
| Warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change.""" | |||
| """ | |||
| def publish(self, data: pyarrow.Array) -> None: | |||
| """Publish a message into ROS2 topic. | |||
| Remember that the data format should respect the structure of the ROS2 message using an arrow Structure. | |||
| ex: | |||
| ```python | |||
| gripper_command.publish( | |||
| pa.array( | |||
| [ | |||
| { | |||
| "name": "gripper", | |||
| "cmd": np.float32(5), | |||
| } | |||
| ] | |||
| ), | |||
| ) | |||
| ```""" | |||
| Remember that the data format should respect the structure of the ROS2 message using an arrow Structure. | |||
| ex: | |||
| ```python | |||
| gripper_command.publish( | |||
| pa.array( | |||
| [ | |||
| { | |||
| "name": "gripper", | |||
| "cmd": np.float32(5), | |||
| } | |||
| ] | |||
| ), | |||
| ) | |||
| ``` | |||
| """ | |||
| @typing.final | |||
| class Ros2QosPolicies: | |||
| """ROS2 QoS Policy""" | |||
| """ROS2 QoS Policy.""" | |||
| def __init__(self, durability: dora.Ros2Durability=None, liveliness: dora.Ros2Liveliness=None, reliable: bool=None, keep_all: bool=None, lease_duration: float=None, max_blocking_time: float=None, keep_last: int=None) -> dora.Ros2QoSPolicies: | |||
| """ROS2 QoS Policy""" | |||
| """ROS2 QoS Policy.""" | |||
| @typing.final | |||
| class Ros2Subscription: | |||
| """ROS2 Subscription | |||
| """ROS2 Subscription. | |||
| Warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change.""" | |||
| """ | |||
| def next(self):... | |||
| @typing.final | |||
| class Ros2Topic: | |||
| """ROS2 Topic | |||
| """ROS2 Topic. | |||
| Warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change. | |||
| warnings: | |||
| - dora Ros2 bridge functionality is considered **unstable**. It may be changed | |||
| at any point without it being considered a breaking change.""" | |||
| """ | |||
| def start_runtime() -> None: | |||
| """Start a runtime for Operators""" | |||
| """Start a runtime for Operators.""" | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import pyarrow as pa | |||
| # Make sure to install torch with cuda | |||
| @@ -12,7 +14,7 @@ from pyarrow import cuda | |||
| def torch_to_ipc_buffer(tensor: torch.TensorType) -> tuple[pa.array, dict]: | |||
| """Converts a Pytorch tensor into a pyarrow buffer containing the IPC handle and its metadata. | |||
| """Convert a Pytorch tensor into a pyarrow buffer containing the IPC handle and its metadata. | |||
| Example Use: | |||
| ```python | |||
| @@ -33,7 +35,7 @@ def torch_to_ipc_buffer(tensor: torch.TensorType) -> tuple[pa.array, dict]: | |||
| def ipc_buffer_to_ipc_handle(handle_buffer: pa.array) -> cuda.IpcMemHandle: | |||
| """Converts a buffer containing a serialized handler into cuda IPC MemHandle. | |||
| """Convert a buffer containing a serialized handler into cuda IPC MemHandle. | |||
| example use: | |||
| ```python | |||
| @@ -55,7 +57,7 @@ def ipc_buffer_to_ipc_handle(handle_buffer: pa.array) -> cuda.IpcMemHandle: | |||
| def cudabuffer_to_numba(buffer: cuda.CudaBuffer, metadata: dict) -> DeviceNDArray: | |||
| """Converts a pyarrow CUDA buffer to numba. | |||
| """Convert a pyarrow CUDA buffer to numba. | |||
| example use: | |||
| ```python | |||
| @@ -79,7 +81,7 @@ def cudabuffer_to_numba(buffer: cuda.CudaBuffer, metadata: dict) -> DeviceNDArra | |||
| def cudabuffer_to_torch(buffer: cuda.CudaBuffer, metadata: dict) -> torch.Tensor: | |||
| """Converts a pyarrow CUDA buffer to a torch tensor. | |||
| """Convert a pyarrow CUDA buffer to a torch tensor. | |||
| example use: | |||
| ```python | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import argparse | |||
| import ast | |||
| import importlib | |||
| @@ -5,11 +7,13 @@ import inspect | |||
| import logging | |||
| import re | |||
| import subprocess | |||
| from collections.abc import Mapping | |||
| from functools import reduce | |||
| from typing import Any, Dict, List, Mapping, Optional, Set, Tuple, Union | |||
| from typing import Any, Dict, List, Optional, Set, Tuple, Union | |||
| def path_to_type(*elements: str) -> ast.AST: | |||
| """TODO: Add docstring.""" | |||
| base: ast.AST = ast.Name(id=elements[0], ctx=ast.Load()) | |||
| for e in elements[1:]: | |||
| base = ast.Attribute(value=base, attr=e, ctx=ast.Load()) | |||
| @@ -65,18 +69,17 @@ BUILTINS: Dict[str, Union[None, Tuple[List[ast.AST], ast.AST]]] = { | |||
| def module_stubs(module: Any) -> ast.Module: | |||
| """TODO: Add docstring.""" | |||
| types_to_import = {"typing"} | |||
| classes = [] | |||
| functions = [] | |||
| for member_name, member_value in inspect.getmembers(module): | |||
| element_path = [module.__name__, member_name] | |||
| if member_name.startswith("__"): | |||
| pass | |||
| elif member_name.startswith("DoraStatus"): | |||
| if member_name.startswith("__") or member_name.startswith("DoraStatus"): | |||
| pass | |||
| elif inspect.isclass(member_value): | |||
| classes.append( | |||
| class_stubs(member_name, member_value, element_path, types_to_import) | |||
| class_stubs(member_name, member_value, element_path, types_to_import), | |||
| ) | |||
| elif inspect.isbuiltin(member_value): | |||
| functions.append( | |||
| @@ -86,7 +89,7 @@ def module_stubs(module: Any) -> ast.Module: | |||
| element_path, | |||
| types_to_import, | |||
| in_class=False, | |||
| ) | |||
| ), | |||
| ) | |||
| else: | |||
| logging.warning(f"Unsupported root construction {member_name}") | |||
| @@ -99,8 +102,9 @@ def module_stubs(module: Any) -> ast.Module: | |||
| def class_stubs( | |||
| cls_name: str, cls_def: Any, element_path: List[str], types_to_import: Set[str] | |||
| cls_name: str, cls_def: Any, element_path: List[str], types_to_import: Set[str], | |||
| ) -> ast.ClassDef: | |||
| """TODO: Add docstring.""" | |||
| attributes: List[ast.AST] = [] | |||
| methods: List[ast.AST] = [] | |||
| magic_methods: List[ast.AST] = [] | |||
| @@ -123,7 +127,7 @@ def class_stubs( | |||
| except ValueError as e: | |||
| if "no signature found" not in str(e): | |||
| raise ValueError( | |||
| f"Error while parsing signature of {cls_name}.__init_" | |||
| f"Error while parsing signature of {cls_name}.__init_", | |||
| ) from e | |||
| elif ( | |||
| member_value == OBJECT_MEMBERS.get(member_name) | |||
| @@ -133,8 +137,8 @@ def class_stubs( | |||
| elif inspect.isdatadescriptor(member_value): | |||
| attributes.extend( | |||
| data_descriptor_stub( | |||
| member_name, member_value, current_element_path, types_to_import | |||
| ) | |||
| member_name, member_value, current_element_path, types_to_import, | |||
| ), | |||
| ) | |||
| elif inspect.isroutine(member_value): | |||
| (magic_methods if member_name.startswith("__") else methods).append( | |||
| @@ -144,7 +148,7 @@ def class_stubs( | |||
| current_element_path, | |||
| types_to_import, | |||
| in_class=True, | |||
| ) | |||
| ), | |||
| ) | |||
| elif member_name == "__match_args__": | |||
| constants.append( | |||
| @@ -153,28 +157,28 @@ def class_stubs( | |||
| annotation=ast.Subscript( | |||
| value=path_to_type("tuple"), | |||
| slice=ast.Tuple( | |||
| elts=[path_to_type("str"), ast.Ellipsis()], ctx=ast.Load() | |||
| elts=[path_to_type("str"), ast.Ellipsis()], ctx=ast.Load(), | |||
| ), | |||
| ctx=ast.Load(), | |||
| ), | |||
| value=ast.Constant(member_value), | |||
| simple=1, | |||
| ) | |||
| ), | |||
| ) | |||
| elif member_value is not None: | |||
| constants.append( | |||
| ast.AnnAssign( | |||
| target=ast.Name(id=member_name, ctx=ast.Store()), | |||
| annotation=concatenated_path_to_type( | |||
| member_value.__class__.__name__, element_path, types_to_import | |||
| member_value.__class__.__name__, element_path, types_to_import, | |||
| ), | |||
| value=ast.Ellipsis(), | |||
| simple=1, | |||
| ) | |||
| ), | |||
| ) | |||
| else: | |||
| logging.warning( | |||
| f"Unsupported member {member_name} of class {'.'.join(element_path)}" | |||
| f"Unsupported member {member_name} of class {'.'.join(element_path)}", | |||
| ) | |||
| doc = inspect.getdoc(cls_def) | |||
| @@ -201,6 +205,7 @@ def data_descriptor_stub( | |||
| element_path: List[str], | |||
| types_to_import: Set[str], | |||
| ) -> Union[Tuple[ast.AnnAssign, ast.Expr], Tuple[ast.AnnAssign]]: | |||
| """TODO: Add docstring.""" | |||
| annotation = None | |||
| doc_comment = None | |||
| @@ -212,7 +217,7 @@ def data_descriptor_stub( | |||
| doc_comment = m[0] | |||
| elif len(m) > 1: | |||
| raise ValueError( | |||
| f"Multiple return annotations found with :return: in {'.'.join(element_path)} documentation" | |||
| f"Multiple return annotations found with :return: in {'.'.join(element_path)} documentation", | |||
| ) | |||
| assign = ast.AnnAssign( | |||
| @@ -232,6 +237,7 @@ def function_stub( | |||
| *, | |||
| in_class: bool, | |||
| ) -> ast.FunctionDef: | |||
| """TODO: Add docstring.""" | |||
| body: List[ast.AST] = [] | |||
| doc = inspect.getdoc(fn_def) | |||
| if doc is not None: | |||
| @@ -262,8 +268,9 @@ def arguments_stub( | |||
| element_path: List[str], | |||
| types_to_import: Set[str], | |||
| ) -> ast.arguments: | |||
| """TODO: Add docstring.""" | |||
| real_parameters: Mapping[str, inspect.Parameter] = inspect.signature( | |||
| callable_def | |||
| callable_def, | |||
| ).parameters | |||
| if callable_name == "__init__": | |||
| real_parameters = { | |||
| @@ -285,19 +292,19 @@ def arguments_stub( | |||
| # Types from comment | |||
| for match in re.findall( | |||
| r"^ *:type *([a-zA-Z0-9_]+): ([^\n]*) *$", doc, re.MULTILINE | |||
| r"^ *:type *([a-zA-Z0-9_]+): ([^\n]*) *$", doc, re.MULTILINE, | |||
| ): | |||
| if match[0] not in real_parameters: | |||
| raise ValueError( | |||
| f"The parameter {match[0]} of {'.'.join(element_path)} " | |||
| "is defined in the documentation but not in the function signature" | |||
| "is defined in the documentation but not in the function signature", | |||
| ) | |||
| type = match[1] | |||
| if type.endswith(", optional"): | |||
| optional_params.add(match[0]) | |||
| type = type[:-10] | |||
| parsed_param_types[match[0]] = convert_type_from_doc( | |||
| type, element_path, types_to_import | |||
| type, element_path, types_to_import, | |||
| ) | |||
| # we parse the parameters | |||
| @@ -312,10 +319,10 @@ def arguments_stub( | |||
| if param.name != "self" and param.name not in parsed_param_types: | |||
| raise ValueError( | |||
| f"The parameter {param.name} of {'.'.join(element_path)} " | |||
| "has no type definition in the function documentation" | |||
| "has no type definition in the function documentation", | |||
| ) | |||
| param_ast = ast.arg( | |||
| arg=param.name, annotation=parsed_param_types.get(param.name) | |||
| arg=param.name, annotation=parsed_param_types.get(param.name), | |||
| ) | |||
| default_ast = None | |||
| @@ -324,12 +331,12 @@ def arguments_stub( | |||
| if param.name not in optional_params: | |||
| raise ValueError( | |||
| f"Parameter {param.name} of {'.'.join(element_path)} " | |||
| "is optional according to the type but not flagged as such in the doc" | |||
| "is optional according to the type but not flagged as such in the doc", | |||
| ) | |||
| elif param.name in optional_params: | |||
| raise ValueError( | |||
| f"Parameter {param.name} of {'.'.join(element_path)} " | |||
| "is optional according to the documentation but has no default value" | |||
| "is optional according to the documentation but has no default value", | |||
| ) | |||
| if param.kind == param.POSITIONAL_ONLY: | |||
| @@ -359,8 +366,9 @@ def arguments_stub( | |||
| def returns_stub( | |||
| callable_name: str, doc: str, element_path: List[str], types_to_import: Set[str] | |||
| callable_name: str, doc: str, element_path: List[str], types_to_import: Set[str], | |||
| ) -> Optional[ast.AST]: | |||
| """TODO: Add docstring.""" | |||
| m = re.findall(r"^ *:rtype: *([^\n]*) *$", doc, re.MULTILINE) | |||
| if len(m) == 0: | |||
| builtin = BUILTINS.get(callable_name) | |||
| @@ -368,26 +376,28 @@ def returns_stub( | |||
| return builtin[1] | |||
| raise ValueError( | |||
| f"The return type of {'.'.join(element_path)} " | |||
| "has no type definition using :rtype: in the function documentation" | |||
| "has no type definition using :rtype: in the function documentation", | |||
| ) | |||
| if len(m) > 1: | |||
| raise ValueError( | |||
| f"Multiple return type annotations found with :rtype: for {'.'.join(element_path)}" | |||
| f"Multiple return type annotations found with :rtype: for {'.'.join(element_path)}", | |||
| ) | |||
| return convert_type_from_doc(m[0], element_path, types_to_import) | |||
| def convert_type_from_doc( | |||
| type_str: str, element_path: List[str], types_to_import: Set[str] | |||
| type_str: str, element_path: List[str], types_to_import: Set[str], | |||
| ) -> ast.AST: | |||
| """TODO: Add docstring.""" | |||
| type_str = type_str.strip() | |||
| return parse_type_to_ast(type_str, element_path, types_to_import) | |||
| def parse_type_to_ast( | |||
| type_str: str, element_path: List[str], types_to_import: Set[str] | |||
| type_str: str, element_path: List[str], types_to_import: Set[str], | |||
| ) -> ast.AST: | |||
| # let's tokenize | |||
| """TODO: Add docstring.""" | |||
| tokens = [] | |||
| current_token = "" | |||
| for c in type_str: | |||
| @@ -417,12 +427,11 @@ def parse_type_to_ast( | |||
| # then it's easy | |||
| def parse_sequence(sequence: List[Any]) -> ast.AST: | |||
| # we split based on "or" | |||
| """TODO: Add docstring.""" | |||
| or_groups: List[List[str]] = [[]] | |||
| print(sequence) | |||
| # TODO: Fix sequence | |||
| if "Ros" in sequence and "2" in sequence: | |||
| sequence = ["".join(sequence)] | |||
| elif "dora.Ros" in sequence and "2" in sequence: | |||
| if ("Ros" in sequence and "2" in sequence) or ("dora.Ros" in sequence and "2" in sequence): | |||
| sequence = ["".join(sequence)] | |||
| for e in sequence: | |||
| @@ -432,14 +441,14 @@ def parse_type_to_ast( | |||
| or_groups[-1].append(e) | |||
| if any(not g for g in or_groups): | |||
| raise ValueError( | |||
| f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}" | |||
| f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}", | |||
| ) | |||
| new_elements: List[ast.AST] = [] | |||
| for group in or_groups: | |||
| if len(group) == 1 and isinstance(group[0], str): | |||
| new_elements.append( | |||
| concatenated_path_to_type(group[0], element_path, types_to_import) | |||
| concatenated_path_to_type(group[0], element_path, types_to_import), | |||
| ) | |||
| elif ( | |||
| len(group) == 2 | |||
| @@ -449,15 +458,15 @@ def parse_type_to_ast( | |||
| new_elements.append( | |||
| ast.Subscript( | |||
| value=concatenated_path_to_type( | |||
| group[0], element_path, types_to_import | |||
| group[0], element_path, types_to_import, | |||
| ), | |||
| slice=parse_sequence(group[1]), | |||
| ctx=ast.Load(), | |||
| ) | |||
| ), | |||
| ) | |||
| else: | |||
| raise ValueError( | |||
| f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}" | |||
| f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}", | |||
| ) | |||
| return reduce( | |||
| lambda left, right: ast.BinOp(left=left, op=ast.BitOr(), right=right), | |||
| @@ -468,12 +477,13 @@ def parse_type_to_ast( | |||
| def concatenated_path_to_type( | |||
| path: str, element_path: List[str], types_to_import: Set[str] | |||
| path: str, element_path: List[str], types_to_import: Set[str], | |||
| ) -> ast.AST: | |||
| """TODO: Add docstring.""" | |||
| parts = path.split(".") | |||
| if any(not p for p in parts): | |||
| raise ValueError( | |||
| f"Not able to parse type '{path}' used by {'.'.join(element_path)}" | |||
| f"Not able to parse type '{path}' used by {'.'.join(element_path)}", | |||
| ) | |||
| if len(parts) > 1: | |||
| types_to_import.add(".".join(parts[:-1])) | |||
| @@ -481,6 +491,7 @@ def concatenated_path_to_type( | |||
| def build_doc_comment(doc: str) -> Optional[ast.Expr]: | |||
| """TODO: Add docstring.""" | |||
| lines = [line.strip() for line in doc.split("\n")] | |||
| clean_lines = [] | |||
| for line in lines: | |||
| @@ -492,15 +503,16 @@ def build_doc_comment(doc: str) -> Optional[ast.Expr]: | |||
| def format_with_ruff(file: str) -> None: | |||
| """TODO: Add docstring.""" | |||
| subprocess.check_call(["python", "-m", "ruff", "format", file]) | |||
| if __name__ == "__main__": | |||
| parser = argparse.ArgumentParser( | |||
| description="Extract Python type stub from a python module." | |||
| description="Extract Python type stub from a python module.", | |||
| ) | |||
| parser.add_argument( | |||
| "module_name", help="Name of the Python module for which generate stubs" | |||
| "module_name", help="Name of the Python module for which generate stubs", | |||
| ) | |||
| parser.add_argument( | |||
| "out", | |||
| @@ -508,7 +520,7 @@ if __name__ == "__main__": | |||
| type=argparse.FileType("wt"), | |||
| ) | |||
| parser.add_argument( | |||
| "--ruff", help="Formats the generated stubs using Ruff", action="store_true" | |||
| "--ruff", help="Formats the generated stubs using Ruff", action="store_true", | |||
| ) | |||
| args = parser.parse_args() | |||
| stub_content = ast.unparse(module_stubs(importlib.import_module(args.module_name))) | |||
| @@ -16,3 +16,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [tool.maturin] | |||
| features = ["pyo3/extension-module"] | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -12,3 +12,8 @@ dependencies = ['dora-rs >= 0.3.9', 'uv'] | |||
| [tool.maturin] | |||
| features = ["python", "pyo3/extension-module"] | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -5,7 +7,7 @@ readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.m | |||
| # Read the content of the README file | |||
| try: | |||
| with open(readme_path, "r", encoding="utf-8") as f: | |||
| with open(readme_path, encoding="utf-8") as f: | |||
| __doc__ = f.read() | |||
| except FileNotFoundError: | |||
| __doc__ = "README file not found." | |||
| @@ -1,5 +1,6 @@ | |||
| from .main import main | |||
| """TODO: Add docstring.""" | |||
| from .main import main | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -1,8 +1,11 @@ | |||
| from dora import Node | |||
| """TODO: Add docstring.""" | |||
| import pyarrow as pa | |||
| from dora import Node | |||
| def main(): | |||
| """TODO: Add docstring.""" | |||
| node = Node() | |||
| for event in node: | |||
| @@ -12,13 +15,13 @@ def main(): | |||
| f"""Node received: | |||
| id: {event["id"]}, | |||
| value: {event["value"]}, | |||
| metadata: {event["metadata"]}""" | |||
| metadata: {event["metadata"]}""", | |||
| ) | |||
| elif event["id"] == "my_input_id": | |||
| # Warning: Make sure to add my_output_id and my_input_id within the dataflow. | |||
| node.send_output( | |||
| output_id="my_output_id", data=pa.array([1, 2, 3]), metadata={} | |||
| output_id="my_output_id", data=pa.array([1, 2, 3]), metadata={}, | |||
| ) | |||
| @@ -14,3 +14,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| __node-name__ = "__node_name__.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", | |||
| ] | |||
| @@ -1,9 +1,13 @@ | |||
| """Test module for __node_name__ package.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """Test importing and running the main function.""" | |||
| from __node_name__.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| # Check that everything is working, and catch Dora RuntimeError | |||
| # as we're not running in a Dora dataflow. | |||
| with pytest.raises(RuntimeError): | |||
| main() | |||
| @@ -1,13 +1,14 @@ | |||
| from dora import Node | |||
| """Module to process node input events and print received messages.""" | |||
| from dora import Node | |||
| def main(): | |||
| """Listen for input events and print received messages.""" | |||
| node = Node() | |||
| for event in node: | |||
| if event["type"] == "INPUT": | |||
| message = event["value"][0].as_py() | |||
| print(f"""I heard {message} from {event["id"]}""") | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -1,32 +1,33 @@ | |||
| """TODO: Add docstring.""" | |||
| from dora import DoraStatus | |||
| class Operator: | |||
| """ | |||
| Template docstring | |||
| """ | |||
| """Template docstring.""" | |||
| def __init__(self): | |||
| """Called on initialisation""" | |||
| pass | |||
| """Perform initialization tasks.""" | |||
| def on_event( | |||
| self, | |||
| dora_event, | |||
| send_output, | |||
| ) -> DoraStatus: | |||
| """ | |||
| Args: | |||
| dora_event: Event containing an `id`, `data` and `metadata`. | |||
| send_output Callable[[str, bytes | pa.Array, Optional[dict]], None]: | |||
| Function for sending output to the dataflow: | |||
| - First argument is the `output_id` | |||
| - Second argument is the data as either bytes or `pa.Array` | |||
| - Third argument is dora metadata dict | |||
| e.g.: `send_output("bbox", pa.array([100], type=pa.uint8()), dora_event["metadata"])` | |||
| Returns: | |||
| """TODO :Description. | |||
| Parameters | |||
| ---------- | |||
| dora_event : dict | |||
| Event containing an `id`, `data`, and `metadata`. | |||
| send_output : Callable[[str, bytes | pa.Array, Optional[dict]], None] | |||
| Function for sending output to the dataflow. The first argument is the `output_id`, the second | |||
| argument is the data (either as bytes or a pa.Array), and the third argument is the dora metadata | |||
| dictionary. For example: | |||
| send_output("bbox", pa.array([100], type=pa.uint8()), dora_event["metadata"]). | |||
| Returns | |||
| ------- | |||
| DoraStatus: | |||
| CONTINUE means that the operator will | |||
| keep listening for further inputs. | |||
| @@ -35,11 +36,10 @@ class Operator: | |||
| """ | |||
| if dora_event["type"] == "INPUT": | |||
| print( | |||
| f"Received input {dora_event['id']}, with data: {dora_event['value']}" | |||
| f"Received input {dora_event['id']}, with data: {dora_event['value']}", | |||
| ) | |||
| return DoraStatus.CONTINUE | |||
| def __del__(self): | |||
| """Called before being deleted""" | |||
| pass | |||
| """Perform actions before being deleted.""" | |||
| @@ -1,8 +1,10 @@ | |||
| from dora import Node | |||
| import pyarrow as pa | |||
| """Module to handle node input events and send speech output.""" | |||
| import pyarrow as pa | |||
| from dora import Node | |||
| def main(): | |||
| """Process node input events and send speech output.""" | |||
| node = Node() | |||
| for event in node: | |||
| @@ -11,10 +13,9 @@ def main(): | |||
| f"""Node received: | |||
| id: {event["id"]}, | |||
| value: {event["value"]}, | |||
| metadata: {event["metadata"]}""" | |||
| metadata: {event["metadata"]}""", | |||
| ) | |||
| node.send_output("speech", pa.array(["Hello World"])) | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -6,8 +6,8 @@ | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from dora import Node\n", | |||
| "import IPython.display\n", | |||
| "from dora import Node\n", | |||
| "from IPython.display import clear_output" | |||
| ] | |||
| }, | |||
| @@ -1,17 +1,17 @@ | |||
| #!/usr/bin/env python | |||
| # -*- coding: utf-8 -*- | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import time | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| from tqdm import tqdm | |||
| import torch | |||
| from dora import Node | |||
| from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch | |||
| from dora.cuda import cudabuffer_to_torch, ipc_buffer_to_ipc_handle | |||
| from helper import record_results | |||
| import torch | |||
| import numpy as np | |||
| from tqdm import tqdm | |||
| torch.tensor([], device="cuda") | |||
| @@ -31,7 +31,7 @@ NAME = f"dora torch {DEVICE}" | |||
| ctx = pa.cuda.Context() | |||
| print("") | |||
| print() | |||
| print("Receiving 40MB packets using default dora-rs") | |||
| while True: | |||
| @@ -79,7 +79,7 @@ pbar.close() | |||
| time.sleep(2) | |||
| print("") | |||
| print() | |||
| print("----") | |||
| print(f"Node communication duration with default dora-rs: {mean_cpu/1000:.1f}ms") | |||
| print(f"Node communication duration with dora CUDA->CUDA: {mean_cuda/1000:.1f}ms") | |||
| @@ -1,13 +1,15 @@ | |||
| #!/usr/bin/env python | |||
| # -*- coding: utf-8 -*- | |||
| """TODO: Add docstring.""" | |||
| import time | |||
| import os | |||
| import time | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| import torch | |||
| from dora import Node | |||
| from dora.cuda import torch_to_ipc_buffer | |||
| import torch | |||
| torch.tensor([], device="cuda") | |||
| @@ -24,7 +26,7 @@ time.sleep(1) | |||
| # test latency first | |||
| for size in SIZES: | |||
| for _ in range(0, 100): | |||
| for _ in range(100): | |||
| now = time.time() | |||
| random_data = np.random.randint(1000, size=size, dtype=np.int64) | |||
| torch_tensor = torch.tensor(random_data, dtype=torch.int64, device="cuda") | |||
| @@ -51,7 +53,7 @@ DEVICE = "cuda" | |||
| time.sleep(1) | |||
| for size in SIZES: | |||
| for _ in range(0, 100): | |||
| for _ in range(100): | |||
| now = time.time() | |||
| random_data = np.random.randint(1000, size=size, dtype=np.int64) | |||
| torch_tensor = torch.tensor(random_data, dtype=torch.int64, device="cuda") | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import csv | |||
| import os | |||
| import platform | |||
| @@ -27,7 +29,7 @@ LOG_HEADER = [ | |||
| def record_results(name, current_size, latencies): | |||
| """TODO: Add docstring.""" | |||
| avg_latency = np.array(latencies).mean() | |||
| # Calculate Q1 (25th percentile), median (50th percentile), and Q3 (75th percentile) | |||
| @@ -1,16 +1,16 @@ | |||
| #!/usr/bin/env python | |||
| # -*- coding: utf-8 -*- | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import time | |||
| import pyarrow as pa | |||
| from tqdm import tqdm | |||
| import torch | |||
| from dora import Node | |||
| from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch | |||
| from dora.cuda import cudabuffer_to_torch, ipc_buffer_to_ipc_handle | |||
| from helper import record_results | |||
| import torch | |||
| from tqdm import tqdm | |||
| torch.tensor([], device="cuda") | |||
| @@ -1,13 +1,15 @@ | |||
| #!/usr/bin/env python | |||
| # -*- coding: utf-8 -*- | |||
| """TODO: Add docstring.""" | |||
| import time | |||
| import os | |||
| import time | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| import torch | |||
| from dora import Node | |||
| from dora.cuda import torch_to_ipc_buffer | |||
| import torch | |||
| torch.tensor([], device="cuda") | |||
| @@ -22,7 +24,7 @@ node = Node() | |||
| time.sleep(4) | |||
| # test latency first | |||
| for size in SIZES: | |||
| for _ in range(0, 100): | |||
| for _ in range(100): | |||
| now = time.time() | |||
| random_data = np.random.randint(1000, size=size, dtype=np.int64) | |||
| torch_tensor = torch.tensor(random_data, dtype=torch.int64, device="cuda") | |||
| @@ -1,9 +1,12 @@ | |||
| """TODO: Add docstring.""" | |||
| from openai import OpenAI | |||
| client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy_api_key") | |||
| def test_list_models(): | |||
| """TODO: Add docstring.""" | |||
| try: | |||
| models = client.models.list() | |||
| print("Available models:") | |||
| @@ -14,6 +17,7 @@ def test_list_models(): | |||
| def test_chat_completion(user_input): | |||
| """TODO: Add docstring.""" | |||
| try: | |||
| response = client.chat.completions.create( | |||
| model="gpt-3.5-turbo", | |||
| @@ -1,11 +1,11 @@ | |||
| """TODO: Add docstring.""" | |||
| import numpy as np | |||
| from scipy.spatial.transform import Rotation as R | |||
| def convert_quaternion_to_euler(quat): | |||
| """ | |||
| Convert Quaternion (xyzw) to Euler angles (rpy) | |||
| """ | |||
| """Convert Quaternion (xyzw) to Euler angles (rpy).""" | |||
| # Normalize | |||
| quat = quat / np.linalg.norm(quat) | |||
| euler = R.from_quat(quat).as_euler("xyz") | |||
| @@ -14,27 +14,21 @@ def convert_quaternion_to_euler(quat): | |||
| def convert_euler_to_quaternion(euler): | |||
| """ | |||
| Convert Euler angles (rpy) to Quaternion (xyzw) | |||
| """ | |||
| """Convert Euler angles (rpy) to Quaternion (xyzw).""" | |||
| quat = R.from_euler("xyz", euler).as_quat() | |||
| return quat | |||
| def convert_euler_to_rotation_matrix(euler): | |||
| """ | |||
| Convert Euler angles (rpy) to rotation matrix (3x3). | |||
| """ | |||
| """Convert Euler angles (rpy) to rotation matrix (3x3).""" | |||
| quat = R.from_euler("xyz", euler).as_matrix() | |||
| return quat | |||
| def convert_rotation_matrix_to_euler(rotmat): | |||
| """ | |||
| Convert rotation matrix (3x3) to Euler angles (rpy). | |||
| """ | |||
| """Convert rotation matrix (3x3) to Euler angles (rpy).""" | |||
| r = R.from_matrix(rotmat) | |||
| euler = r.as_euler("xyz", degrees=False) | |||
| @@ -42,12 +36,14 @@ def convert_rotation_matrix_to_euler(rotmat): | |||
| def normalize_vector(v): | |||
| """TODO: Add docstring.""" | |||
| v_mag = np.linalg.norm(v, axis=-1, keepdims=True) | |||
| v_mag = np.maximum(v_mag, 1e-8) | |||
| return v / v_mag | |||
| def cross_product(u, v): | |||
| """TODO: Add docstring.""" | |||
| i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1] | |||
| j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2] | |||
| k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0] | |||
| @@ -57,6 +53,7 @@ def cross_product(u, v): | |||
| def compute_rotation_matrix_from_ortho6d(ortho6d): | |||
| """TODO: Add docstring.""" | |||
| x_raw = ortho6d[:, 0:3] | |||
| y_raw = ortho6d[:, 3:6] | |||
| @@ -77,5 +74,6 @@ def compute_ortho6d_from_rotation_matrix(matrix): | |||
| # rotation matrix: [ | , |, | ] | |||
| # [ a1, a2, a3] | |||
| # [ | , |, | ] | |||
| """TODO: Add docstring.""" | |||
| ortho6d = matrix[:, :, :2].transpose(0, 2, 1).reshape(matrix.shape[0], -1) | |||
| return ortho6d | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| from dora import Node | |||
| node = Node() | |||
| @@ -215,11 +217,12 @@ pred = np.array( | |||
| 0.0913, | |||
| 0.1221, | |||
| ], | |||
| ] | |||
| ] | |||
| ], | |||
| ], | |||
| ) | |||
| import time | |||
| import pyarrow as pa | |||
| data = pred[0] | |||
| @@ -1,7 +1,7 @@ | |||
| from dora import Node | |||
| """TODO: Add docstring.""" | |||
| import h5py | |||
| from dora import Node | |||
| f = h5py.File("data/episode_0.hdf5", "r") | |||
| @@ -10,19 +10,19 @@ data = f["action"][:] | |||
| STATE_VEC_IDX_MAPPING = { | |||
| # [0, 10): right arm joint positions | |||
| **{"arm_joint_{}_pos".format(i): i for i in range(10)}, | |||
| **{"right_arm_joint_{}_pos".format(i): i for i in range(10)}, | |||
| **{f"arm_joint_{i}_pos": i for i in range(10)}, | |||
| **{f"right_arm_joint_{i}_pos": i for i in range(10)}, | |||
| # [10, 15): right gripper joint positions | |||
| **{"gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, | |||
| **{"right_gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, | |||
| **{f"gripper_joint_{i}_pos": i + 10 for i in range(5)}, | |||
| **{f"right_gripper_joint_{i}_pos": i + 10 for i in range(5)}, | |||
| "gripper_open": 10, # alias of right_gripper_joint_0_pos | |||
| "right_gripper_open": 10, | |||
| # [15, 25): right arm joint velocities | |||
| **{"arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, | |||
| **{"right_arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, | |||
| **{f"arm_joint_{i}_vel": i + 15 for i in range(10)}, | |||
| **{f"right_arm_joint_{i}_vel": i + 15 for i in range(10)}, | |||
| # [25, 30): right gripper joint velocities | |||
| **{"gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, | |||
| **{"right_gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, | |||
| **{f"gripper_joint_{i}_vel": i + 25 for i in range(5)}, | |||
| **{f"right_gripper_joint_{i}_vel": i + 25 for i in range(5)}, | |||
| "gripper_open_vel": 25, # alias of right_gripper_joint_0_vel | |||
| "right_gripper_open_vel": 25, | |||
| # [30, 33): right end effector positions | |||
| @@ -61,14 +61,14 @@ STATE_VEC_IDX_MAPPING = { | |||
| "right_eef_angular_vel_yaw": 44, | |||
| # [45, 50): reserved | |||
| # [50, 60): left arm joint positions | |||
| **{"left_arm_joint_{}_pos".format(i): i + 50 for i in range(10)}, | |||
| **{f"left_arm_joint_{i}_pos": i + 50 for i in range(10)}, | |||
| # [60, 65): left gripper joint positions | |||
| **{"left_gripper_joint_{}_pos".format(i): i + 60 for i in range(5)}, | |||
| **{f"left_gripper_joint_{i}_pos": i + 60 for i in range(5)}, | |||
| "left_gripper_open": 60, # alias of left_gripper_joint_0_pos | |||
| # [65, 75): left arm joint velocities | |||
| **{"left_arm_joint_{}_vel".format(i): i + 65 for i in range(10)}, | |||
| **{f"left_arm_joint_{i}_vel": i + 65 for i in range(10)}, | |||
| # [75, 80): left gripper joint velocities | |||
| **{"left_gripper_joint_{}_vel".format(i): i + 75 for i in range(5)}, | |||
| **{f"left_gripper_joint_{i}_vel": i + 75 for i in range(5)}, | |||
| "left_gripper_open_vel": 75, # alias of left_gripper_joint_0_vel | |||
| # [80, 83): left end effector positions | |||
| "left_eef_pos_x": 80, | |||
| @@ -99,6 +99,7 @@ STATE_VEC_IDX_MAPPING = { | |||
| } | |||
| import time | |||
| import pyarrow as pa | |||
| node = Node() | |||
| @@ -109,17 +110,17 @@ RIGHT_UNI_STATE_INDICES = [ | |||
| STATE_VEC_IDX_MAPPING[f"right_arm_joint_{i}_pos"] for i in range(6) | |||
| ] + [STATE_VEC_IDX_MAPPING["right_gripper_open"]] | |||
| MOBILE_BASE_UNI_STATE_INDICES = [STATE_VEC_IDX_MAPPING["base_vel_x"]] + [ | |||
| STATE_VEC_IDX_MAPPING["base_angular_vel"] | |||
| STATE_VEC_IDX_MAPPING["base_angular_vel"], | |||
| ] | |||
| for joint in data: | |||
| node.send_output( | |||
| "jointstate_left", pa.array(joint[LEFT_UNI_STATE_INDICES], type=pa.float32()) | |||
| "jointstate_left", pa.array(joint[LEFT_UNI_STATE_INDICES], type=pa.float32()), | |||
| ) | |||
| node.send_output( | |||
| "jointstate_right", pa.array(joint[RIGHT_UNI_STATE_INDICES], type=pa.float32()) | |||
| "jointstate_right", pa.array(joint[RIGHT_UNI_STATE_INDICES], type=pa.float32()), | |||
| ) | |||
| node.send_output( | |||
| "mobile_base", pa.array(joint[MOBILE_BASE_UNI_STATE_INDICES], type=pa.float32()) | |||
| "mobile_base", pa.array(joint[MOBILE_BASE_UNI_STATE_INDICES], type=pa.float32()), | |||
| ) | |||
| time.sleep(0.05) | |||
| @@ -1,9 +1,12 @@ | |||
| """TODO: Add docstring.""" | |||
| from dora import Node | |||
| node = Node() | |||
| import time | |||
| import pyarrow as pa | |||
| for event in node: | |||
| @@ -25,7 +28,7 @@ for event in node: | |||
| node.send_output("jointstate_left", pa.array(action[:7], type=pa.float32())) | |||
| node.send_output( | |||
| "jointstate_right", pa.array(action[7:], type=pa.float32()) | |||
| "jointstate_right", pa.array(action[7:], type=pa.float32()), | |||
| ) | |||
| time.sleep(0.02) | |||
| print(actions) | |||
| @@ -1,30 +1,31 @@ | |||
| import h5py | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import datetime | |||
| import os | |||
| from dora import Node | |||
| import h5py | |||
| import numpy as np | |||
| from convert import ( | |||
| convert_euler_to_rotation_matrix, | |||
| compute_ortho6d_from_rotation_matrix, | |||
| convert_euler_to_rotation_matrix, | |||
| ) | |||
| from dora import Node | |||
| STATE_VEC_IDX_MAPPING = { | |||
| # [0, 10): right arm joint positions | |||
| **{"arm_joint_{}_pos".format(i): i for i in range(10)}, | |||
| **{"right_arm_joint_{}_pos".format(i): i for i in range(10)}, | |||
| **{f"arm_joint_{i}_pos": i for i in range(10)}, | |||
| **{f"right_arm_joint_{i}_pos": i for i in range(10)}, | |||
| # [10, 15): right gripper joint positions | |||
| **{"gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, | |||
| **{"right_gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, | |||
| **{f"gripper_joint_{i}_pos": i + 10 for i in range(5)}, | |||
| **{f"right_gripper_joint_{i}_pos": i + 10 for i in range(5)}, | |||
| "gripper_open": 10, # alias of right_gripper_joint_0_pos | |||
| "right_gripper_open": 10, | |||
| # [15, 25): right arm joint velocities | |||
| **{"arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, | |||
| **{"right_arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, | |||
| **{f"arm_joint_{i}_vel": i + 15 for i in range(10)}, | |||
| **{f"right_arm_joint_{i}_vel": i + 15 for i in range(10)}, | |||
| # [25, 30): right gripper joint velocities | |||
| **{"gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, | |||
| **{"right_gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, | |||
| **{f"gripper_joint_{i}_vel": i + 25 for i in range(5)}, | |||
| **{f"right_gripper_joint_{i}_vel": i + 25 for i in range(5)}, | |||
| "gripper_open_vel": 25, # alias of right_gripper_joint_0_vel | |||
| "right_gripper_open_vel": 25, | |||
| # [30, 33): right end effector positions | |||
| @@ -63,14 +64,14 @@ STATE_VEC_IDX_MAPPING = { | |||
| "right_eef_angular_vel_yaw": 44, | |||
| # [45, 50): reserved | |||
| # [50, 60): left arm joint positions | |||
| **{"left_arm_joint_{}_pos".format(i): i + 50 for i in range(10)}, | |||
| **{f"left_arm_joint_{i}_pos": i + 50 for i in range(10)}, | |||
| # [60, 65): left gripper joint positions | |||
| **{"left_gripper_joint_{}_pos".format(i): i + 60 for i in range(5)}, | |||
| **{f"left_gripper_joint_{i}_pos": i + 60 for i in range(5)}, | |||
| "left_gripper_open": 60, # alias of left_gripper_joint_0_pos | |||
| # [65, 75): left arm joint velocities | |||
| **{"left_arm_joint_{}_vel".format(i): i + 65 for i in range(10)}, | |||
| **{f"left_arm_joint_{i}_vel": i + 65 for i in range(10)}, | |||
| # [75, 80): left gripper joint velocities | |||
| **{"left_gripper_joint_{}_vel".format(i): i + 75 for i in range(5)}, | |||
| **{f"left_gripper_joint_{i}_vel": i + 75 for i in range(5)}, | |||
| "left_gripper_open_vel": 75, # alias of left_gripper_joint_0_vel | |||
| # [80, 83): left end effector positions | |||
| "left_eef_pos_x": 80, | |||
| @@ -113,6 +114,7 @@ if not os.path.exists(DATA_DIR): | |||
| def save_data(data_dict, dataset_path, data_size): | |||
| """TODO: Add docstring.""" | |||
| with h5py.File(dataset_path + ".hdf5", "w", rdcc_nbytes=1024**2 * 2) as root: | |||
| root.attrs["sim"] = False | |||
| root.attrs["compress"] = False | |||
| @@ -193,9 +195,7 @@ for event in node: | |||
| elif char == "s": | |||
| start = True | |||
| elif "image" in event["id"]: | |||
| tmp_dict[event["id"]] = event["value"].to_numpy() | |||
| elif "qpos" in event["id"]: | |||
| elif "image" in event["id"] or "qpos" in event["id"]: | |||
| tmp_dict[event["id"]] = event["value"].to_numpy() | |||
| elif "pose" in event["id"]: | |||
| value = event["value"].to_numpy() | |||
| @@ -213,7 +213,7 @@ for event in node: | |||
| ortho6d[3], | |||
| ortho6d[4], | |||
| ortho6d[5], | |||
| ] | |||
| ], | |||
| ) | |||
| tmp_dict[event["id"]] = values | |||
| elif "base_vel" in event["id"]: | |||
| @@ -230,7 +230,7 @@ for event in node: | |||
| tmp_dict["/observations/pose_left"], | |||
| tmp_dict["/observations/pose_right"], | |||
| # tmp_dict["/observations/base_vel"], | |||
| ] | |||
| ], | |||
| ) | |||
| UNI_STATE_INDICES = ( | |||
| [STATE_VEC_IDX_MAPPING[f"left_arm_joint_{i}_pos"] for i in range(6)] | |||
| @@ -264,11 +264,11 @@ for event in node: | |||
| # We reproduce obs and action | |||
| data_dict["/action"].append(universal_vec) | |||
| data_dict["/observations/images/cam_high"].append( | |||
| tmp_dict["/observations/images/cam_high"] | |||
| tmp_dict["/observations/images/cam_high"], | |||
| ) | |||
| data_dict["/observations/images/cam_left_wrist"].append( | |||
| tmp_dict["/observations/images/cam_left_wrist"] | |||
| tmp_dict["/observations/images/cam_left_wrist"], | |||
| ) | |||
| data_dict["/observations/images/cam_right_wrist"].append( | |||
| tmp_dict["/observations/images/cam_right_wrist"] | |||
| tmp_dict["/observations/images/cam_right_wrist"], | |||
| ) | |||
| @@ -1,8 +1,9 @@ | |||
| from dora import Node | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import h5py | |||
| import os | |||
| from dora import Node | |||
| EPISODE_PATH = os.getenv("EPISODE_PATH", "data/episode_0.hdf5") | |||
| @@ -13,19 +14,19 @@ data = f["action"][:] | |||
| STATE_VEC_IDX_MAPPING = { | |||
| # [0, 10): right arm joint positions | |||
| **{"arm_joint_{}_pos".format(i): i for i in range(10)}, | |||
| **{"right_arm_joint_{}_pos".format(i): i for i in range(10)}, | |||
| **{f"arm_joint_{i}_pos": i for i in range(10)}, | |||
| **{f"right_arm_joint_{i}_pos": i for i in range(10)}, | |||
| # [10, 15): right gripper joint positions | |||
| **{"gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, | |||
| **{"right_gripper_joint_{}_pos".format(i): i + 10 for i in range(5)}, | |||
| **{f"gripper_joint_{i}_pos": i + 10 for i in range(5)}, | |||
| **{f"right_gripper_joint_{i}_pos": i + 10 for i in range(5)}, | |||
| "gripper_open": 10, # alias of right_gripper_joint_0_pos | |||
| "right_gripper_open": 10, | |||
| # [15, 25): right arm joint velocities | |||
| **{"arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, | |||
| **{"right_arm_joint_{}_vel".format(i): i + 15 for i in range(10)}, | |||
| **{f"arm_joint_{i}_vel": i + 15 for i in range(10)}, | |||
| **{f"right_arm_joint_{i}_vel": i + 15 for i in range(10)}, | |||
| # [25, 30): right gripper joint velocities | |||
| **{"gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, | |||
| **{"right_gripper_joint_{}_vel".format(i): i + 25 for i in range(5)}, | |||
| **{f"gripper_joint_{i}_vel": i + 25 for i in range(5)}, | |||
| **{f"right_gripper_joint_{i}_vel": i + 25 for i in range(5)}, | |||
| "gripper_open_vel": 25, # alias of right_gripper_joint_0_vel | |||
| "right_gripper_open_vel": 25, | |||
| # [30, 33): right end effector positions | |||
| @@ -64,14 +65,14 @@ STATE_VEC_IDX_MAPPING = { | |||
| "right_eef_angular_vel_yaw": 44, | |||
| # [45, 50): reserved | |||
| # [50, 60): left arm joint positions | |||
| **{"left_arm_joint_{}_pos".format(i): i + 50 for i in range(10)}, | |||
| **{f"left_arm_joint_{i}_pos": i + 50 for i in range(10)}, | |||
| # [60, 65): left gripper joint positions | |||
| **{"left_gripper_joint_{}_pos".format(i): i + 60 for i in range(5)}, | |||
| **{f"left_gripper_joint_{i}_pos": i + 60 for i in range(5)}, | |||
| "left_gripper_open": 60, # alias of left_gripper_joint_0_pos | |||
| # [65, 75): left arm joint velocities | |||
| **{"left_arm_joint_{}_vel".format(i): i + 65 for i in range(10)}, | |||
| **{f"left_arm_joint_{i}_vel": i + 65 for i in range(10)}, | |||
| # [75, 80): left gripper joint velocities | |||
| **{"left_gripper_joint_{}_vel".format(i): i + 75 for i in range(5)}, | |||
| **{f"left_gripper_joint_{i}_vel": i + 75 for i in range(5)}, | |||
| "left_gripper_open_vel": 75, # alias of left_gripper_joint_0_vel | |||
| # [80, 83): left end effector positions | |||
| "left_eef_pos_x": 80, | |||
| @@ -102,6 +103,7 @@ STATE_VEC_IDX_MAPPING = { | |||
| } | |||
| import time | |||
| import pyarrow as pa | |||
| node = Node() | |||
| @@ -112,15 +114,15 @@ RIGHT_UNI_STATE_INDICES = [ | |||
| STATE_VEC_IDX_MAPPING[f"right_arm_joint_{i}_pos"] for i in range(6) | |||
| ] + [STATE_VEC_IDX_MAPPING["right_gripper_open"]] | |||
| MOBILE_BASE_UNI_STATE_INDICES = [STATE_VEC_IDX_MAPPING["base_vel_x"]] + [ | |||
| STATE_VEC_IDX_MAPPING["base_angular_vel"] | |||
| STATE_VEC_IDX_MAPPING["base_angular_vel"], | |||
| ] | |||
| for joint in data: | |||
| node.send_output( | |||
| "jointstate_left", pa.array(joint[LEFT_UNI_STATE_INDICES], type=pa.float32()) | |||
| "jointstate_left", pa.array(joint[LEFT_UNI_STATE_INDICES], type=pa.float32()), | |||
| ) | |||
| node.send_output( | |||
| "jointstate_right", pa.array(joint[RIGHT_UNI_STATE_INDICES], type=pa.float32()) | |||
| "jointstate_right", pa.array(joint[RIGHT_UNI_STATE_INDICES], type=pa.float32()), | |||
| ) | |||
| # node.send_output( | |||
| # "mobile_base", pa.array(joint[MOBILE_BASE_UNI_STATE_INDICES], type=pa.float32()) | |||
| @@ -1,14 +1,14 @@ | |||
| import pyarrow as pa | |||
| """TODO: Add docstring.""" | |||
| import pyarrow as pa | |||
| from dora import DoraStatus | |||
| class Operator: | |||
| """ | |||
| Inferring object from images | |||
| """ | |||
| """Inferring object from images.""" | |||
| def __init__(self): | |||
| """TODO: Add docstring.""" | |||
| self.last_file = "" | |||
| self.last_path = "" | |||
| self.last_netadata = None | |||
| @@ -18,10 +18,11 @@ class Operator: | |||
| dora_event, | |||
| send_output, | |||
| ) -> DoraStatus: | |||
| """TODO: Add docstring.""" | |||
| if dora_event["type"] == "INPUT" and dora_event["id"] == "file": | |||
| input = dora_event["value"][0].as_py() | |||
| with open(input["path"], "r") as file: | |||
| with open(input["path"]) as file: | |||
| self.last_file = file.read() | |||
| self.last_path = input["path"] | |||
| self.last_metadata = dora_event["metadata"] | |||
| @@ -36,8 +37,8 @@ class Operator: | |||
| "raw": input["raw"], | |||
| "path": input["path"], | |||
| "origin": dora_event["id"], | |||
| } | |||
| ] | |||
| }, | |||
| ], | |||
| ), | |||
| dora_event["metadata"], | |||
| ) | |||
| @@ -1,8 +1,9 @@ | |||
| from pynput import keyboard | |||
| from pynput.keyboard import Key, Events | |||
| """TODO: Add docstring.""" | |||
| import pyarrow as pa | |||
| from dora import Node | |||
| from pynput import keyboard | |||
| from pynput.keyboard import Events, Key | |||
| node = Node() | |||
| buffer_text = "" | |||
| @@ -30,36 +31,35 @@ with keyboard.Events() as events: | |||
| cursor = 0 | |||
| buffer_text += event.key.char | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| else: | |||
| if event.key == Key.backspace: | |||
| buffer_text = buffer_text[:-1] | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event.key == Key.esc: | |||
| buffer_text = "" | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event.key == Key.enter: | |||
| node.send_output("submitted", pa.array([buffer_text])) | |||
| first_word = buffer_text.split(" ")[0] | |||
| if first_word in NODE_TOPIC: | |||
| node.send_output(first_word, pa.array([buffer_text])) | |||
| submitted_text.append(buffer_text) | |||
| buffer_text = "" | |||
| elif event.key == Key.backspace: | |||
| buffer_text = buffer_text[:-1] | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event.key == Key.esc: | |||
| buffer_text = "" | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event.key == Key.enter: | |||
| node.send_output("submitted", pa.array([buffer_text])) | |||
| first_word = buffer_text.split(" ")[0] | |||
| if first_word in NODE_TOPIC: | |||
| node.send_output(first_word, pa.array([buffer_text])) | |||
| submitted_text.append(buffer_text) | |||
| buffer_text = "" | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event.key == Key.ctrl: | |||
| ctrl = True | |||
| elif event.key == Key.space: | |||
| buffer_text += " " | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event.key == Key.up: | |||
| if len(submitted_text) > 0: | |||
| cursor = max(cursor - 1, -len(submitted_text)) | |||
| buffer_text = submitted_text[cursor] | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event.key == Key.ctrl: | |||
| ctrl = True | |||
| elif event.key == Key.space: | |||
| buffer_text += " " | |||
| elif event.key == Key.down: | |||
| if len(submitted_text) > 0: | |||
| cursor = min(cursor + 1, 0) | |||
| buffer_text = submitted_text[cursor] | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event.key == Key.up: | |||
| if len(submitted_text) > 0: | |||
| cursor = max(cursor - 1, -len(submitted_text)) | |||
| buffer_text = submitted_text[cursor] | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event.key == Key.down: | |||
| if len(submitted_text) > 0: | |||
| cursor = min(cursor + 1, 0) | |||
| buffer_text = submitted_text[cursor] | |||
| node.send_output("buffer", pa.array([buffer_text])) | |||
| elif event is not None and isinstance(event, Events.Release): | |||
| if event.key == Key.ctrl: | |||
| ctrl = False | |||
| @@ -1,13 +1,15 @@ | |||
| from dora import DoraStatus | |||
| import pylcs | |||
| import os | |||
| import pyarrow as pa | |||
| from transformers import AutoModelForCausalLM, AutoTokenizer | |||
| import json | |||
| """TODO: Add docstring.""" | |||
| import json | |||
| import os | |||
| import re | |||
| import time | |||
| import pyarrow as pa | |||
| import pylcs | |||
| from dora import DoraStatus | |||
| from transformers import AutoModelForCausalLM, AutoTokenizer | |||
| MODEL_NAME_OR_PATH = "TheBloke/deepseek-coder-6.7B-instruct-GPTQ" | |||
| # MODEL_NAME_OR_PATH = "hanspeterlyngsoeraaschoujensen/deepseek-math-7b-instruct-GPTQ" | |||
| @@ -64,14 +66,17 @@ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True) | |||
| def extract_python_code_blocks(text): | |||
| """ | |||
| Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier. | |||
| """Extract Python code blocks from the given text that are enclosed in triple backticks with a python language identifier. | |||
| Parameters: | |||
| - text: A string that may contain one or more Python code blocks. | |||
| Parameters | |||
| ---------- | |||
| text : str | |||
| A string that may contain one or more Python code blocks. | |||
| Returns: | |||
| Returns | |||
| ------- | |||
| - A list of strings, where each string is a block of Python code extracted from the text. | |||
| """ | |||
| pattern = r"```python\n(.*?)\n```" | |||
| matches = re.findall(pattern, text, re.DOTALL) | |||
| @@ -80,21 +85,24 @@ def extract_python_code_blocks(text): | |||
| matches = re.findall(pattern, text, re.DOTALL) | |||
| if len(matches) == 0: | |||
| return [text] | |||
| else: | |||
| matches = [remove_last_line(matches[0])] | |||
| matches = [remove_last_line(matches[0])] | |||
| return matches | |||
| def extract_json_code_blocks(text): | |||
| """ | |||
| Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier. | |||
| """Extract json code blocks from the given text that are enclosed in triple backticks with a json language identifier. | |||
| Parameters: | |||
| - text: A string that may contain one or more json code blocks. | |||
| Parameters | |||
| ---------- | |||
| text : str | |||
| A string that may contain one or more json code blocks. | |||
| Returns | |||
| ------- | |||
| list of str | |||
| A list of strings, where each string is a block of json code extracted from the text. | |||
| Returns: | |||
| - A list of strings, where each string is a block of json code extracted from the text. | |||
| """ | |||
| pattern = r"```json\n(.*?)\n```" | |||
| matches = re.findall(pattern, text, re.DOTALL) | |||
| @@ -108,14 +116,18 @@ def extract_json_code_blocks(text): | |||
| def remove_last_line(python_code): | |||
| """ | |||
| Removes the last line from a given string of Python code. | |||
| """Remove the last line from a given string of Python code. | |||
| Parameters | |||
| ---------- | |||
| python_code : str | |||
| A string representing Python source code. | |||
| Parameters: | |||
| - python_code: A string representing Python source code. | |||
| Returns | |||
| ------- | |||
| str | |||
| A string with the last line removed. | |||
| Returns: | |||
| - A string with the last line removed. | |||
| """ | |||
| lines = python_code.split("\n") # Split the string into lines | |||
| if lines: # Check if there are any lines to remove | |||
| @@ -124,8 +136,8 @@ def remove_last_line(python_code): | |||
| def calculate_similarity(source, target): | |||
| """ | |||
| Calculate a similarity score between the source and target strings. | |||
| """Calculate a similarity score between the source and target strings. | |||
| This uses the edit distance relative to the length of the strings. | |||
| """ | |||
| edit_distance = pylcs.edit_distance(source, target) | |||
| @@ -136,10 +148,7 @@ def calculate_similarity(source, target): | |||
| def find_best_match_location(source_code, target_block): | |||
| """ | |||
| Find the best match for the target_block within the source_code by searching line by line, | |||
| considering blocks of varying lengths. | |||
| """ | |||
| """Find the best match for the target_block within the source_code by searching line by line, considering blocks of varying lengths.""" | |||
| source_lines = source_code.split("\n") | |||
| target_lines = target_block.split("\n") | |||
| @@ -167,9 +176,7 @@ def find_best_match_location(source_code, target_block): | |||
| def replace_code_in_source(source_code, replacement_block: str): | |||
| """ | |||
| Replace the best matching block in the source_code with the replacement_block, considering variable block lengths. | |||
| """ | |||
| """Replace the best matching block in the source_code with the replacement_block, considering variable block lengths.""" | |||
| replacement_block = extract_python_code_blocks(replacement_block)[0] | |||
| start_index, end_index = find_best_match_location(source_code, replacement_block) | |||
| if start_index != -1 and end_index != -1: | |||
| @@ -178,27 +185,28 @@ def replace_code_in_source(source_code, replacement_block: str): | |||
| source_code[:start_index] + replacement_block + source_code[end_index:] | |||
| ) | |||
| return new_source | |||
| else: | |||
| return source_code | |||
| return source_code | |||
| class Operator: | |||
| """TODO: Add docstring.""" | |||
| def on_event( | |||
| self, | |||
| dora_event, | |||
| send_output, | |||
| ) -> DoraStatus: | |||
| """TODO: Add docstring.""" | |||
| if dora_event["type"] == "INPUT" and dora_event["id"] == "code_modifier": | |||
| input = dora_event["value"][0].as_py() | |||
| with open(input["path"], "r", encoding="utf8") as f: | |||
| with open(input["path"], encoding="utf8") as f: | |||
| code = f.read() | |||
| user_message = input["user_message"] | |||
| start_llm = time.time() | |||
| output = self.ask_llm( | |||
| CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message) | |||
| CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message), | |||
| ) | |||
| source_code = replace_code_in_source(code, output) | |||
| @@ -212,8 +220,8 @@ class Operator: | |||
| "path": input["path"], | |||
| "response": output, | |||
| "prompt": input["user_message"], | |||
| } | |||
| ] | |||
| }, | |||
| ], | |||
| ), | |||
| dora_event["metadata"], | |||
| ) | |||
| @@ -226,7 +234,7 @@ class Operator: | |||
| elif dora_event["type"] == "INPUT" and dora_event["id"] == "message_sender": | |||
| user_message = dora_event["value"][0].as_py() | |||
| output = self.ask_llm( | |||
| MESSAGE_SENDER_TEMPLATE.format(user_message=user_message) | |||
| MESSAGE_SENDER_TEMPLATE.format(user_message=user_message), | |||
| ) | |||
| outputs = extract_json_code_blocks(output)[0] | |||
| try: | |||
| @@ -261,6 +269,7 @@ class Operator: | |||
| # Generate output | |||
| # prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt)) | |||
| """TODO: Add docstring.""" | |||
| input = tokenizer(prompt, return_tensors="pt") | |||
| input_ids = input.input_ids.cuda() | |||
| @@ -293,7 +302,7 @@ if __name__ == "__main__": | |||
| current_directory = os.path.dirname(current_file_path) | |||
| path = current_directory + "object_detection.py" | |||
| with open(path, "r", encoding="utf8") as f: | |||
| with open(path, encoding="utf8") as f: | |||
| raw = f.read() | |||
| op.on_event( | |||
| @@ -306,7 +315,7 @@ if __name__ == "__main__": | |||
| "path": path, | |||
| "user_message": "send a star ", | |||
| }, | |||
| ] | |||
| ], | |||
| ), | |||
| "metadata": [], | |||
| }, | |||
| @@ -1,7 +1,8 @@ | |||
| """TODO: Add docstring.""" | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| import sounddevice as sd | |||
| from dora import DoraStatus | |||
| # Set the parameters for recording | |||
| @@ -10,15 +11,14 @@ MAX_DURATION = 5 | |||
| class Operator: | |||
| """ | |||
| Microphone operator that records the audio | |||
| """ | |||
| """Microphone operator that records the audio.""" | |||
| def on_event( | |||
| self, | |||
| dora_event, | |||
| send_output, | |||
| ) -> DoraStatus: | |||
| """TODO: Add docstring.""" | |||
| if dora_event["type"] == "INPUT": | |||
| audio_data = sd.rec( | |||
| int(SAMPLE_RATE * MAX_DURATION), | |||
| @@ -1,10 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| from dora import DoraStatus | |||
| from ultralytics import YOLO | |||
| CAMERA_WIDTH = 640 | |||
| CAMERA_HEIGHT = 480 | |||
| @@ -13,15 +13,14 @@ model = YOLO("yolov8n.pt") | |||
| class Operator: | |||
| """ | |||
| Inferring object from images | |||
| """ | |||
| """Inferring object from images.""" | |||
| def on_event( | |||
| self, | |||
| dora_event, | |||
| send_output, | |||
| ) -> DoraStatus: | |||
| """TODO: Add docstring.""" | |||
| if dora_event["type"] == "INPUT": | |||
| frame = ( | |||
| dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) | |||
| @@ -1,10 +1,11 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import cv2 | |||
| import cv2 | |||
| from dora import DoraStatus | |||
| from utils import LABELS | |||
| CI = os.environ.get("CI") | |||
| CAMERA_WIDTH = 640 | |||
| @@ -14,11 +15,10 @@ FONT = cv2.FONT_HERSHEY_SIMPLEX | |||
| class Operator: | |||
| """ | |||
| Plot image and bounding box | |||
| """ | |||
| """Plot image and bounding box.""" | |||
| def __init__(self): | |||
| """TODO: Add docstring.""" | |||
| self.bboxs = [] | |||
| self.buffer = "" | |||
| self.submitted = [] | |||
| @@ -29,6 +29,7 @@ class Operator: | |||
| dora_event, | |||
| send_output, | |||
| ): | |||
| """TODO: Add docstring.""" | |||
| if dora_event["type"] == "INPUT": | |||
| id = dora_event["id"] | |||
| value = dora_event["value"] | |||
| @@ -63,7 +64,7 @@ class Operator: | |||
| ) | |||
| cv2.putText( | |||
| image, self.buffer, (20, 14 + 21 * 14), FONT, 0.5, (190, 250, 0), 1 | |||
| image, self.buffer, (20, 14 + 21 * 14), FONT, 0.5, (190, 250, 0), 1, | |||
| ) | |||
| i = 0 | |||
| @@ -111,7 +112,7 @@ class Operator: | |||
| { | |||
| "role": id, | |||
| "content": value[0].as_py(), | |||
| } | |||
| }, | |||
| ] | |||
| return DoraStatus.CONTINUE | |||
| @@ -1,11 +1,12 @@ | |||
| from sentence_transformers import SentenceTransformer | |||
| from sentence_transformers import util | |||
| """TODO: Add docstring.""" | |||
| from dora import DoraStatus | |||
| import os | |||
| import sys | |||
| import torch | |||
| import pyarrow as pa | |||
| import torch | |||
| from dora import DoraStatus | |||
| from sentence_transformers import SentenceTransformer, util | |||
| SHOULD_BE_INCLUDED = [ | |||
| "webcam.py", | |||
| @@ -16,6 +17,7 @@ SHOULD_BE_INCLUDED = [ | |||
| ## Get all python files path in given directory | |||
| def get_all_functions(path): | |||
| """TODO: Add docstring.""" | |||
| raw = [] | |||
| paths = [] | |||
| for root, dirs, files in os.walk(path): | |||
| @@ -24,7 +26,7 @@ def get_all_functions(path): | |||
| if file not in SHOULD_BE_INCLUDED: | |||
| continue | |||
| path = os.path.join(root, file) | |||
| with open(path, "r", encoding="utf8") as f: | |||
| with open(path, encoding="utf8") as f: | |||
| ## add file folder to system path | |||
| sys.path.append(root) | |||
| ## import module from path | |||
| @@ -35,6 +37,7 @@ def get_all_functions(path): | |||
| def search(query_embedding, corpus_embeddings, paths, raw, k=5, file_extension=None): | |||
| """TODO: Add docstring.""" | |||
| cos_scores = util.cos_sim(query_embedding, corpus_embeddings)[0] | |||
| top_results = torch.topk(cos_scores, k=min(k, len(cos_scores)), sorted=True) | |||
| out = [] | |||
| @@ -44,10 +47,11 @@ def search(query_embedding, corpus_embeddings, paths, raw, k=5, file_extension=N | |||
| class Operator: | |||
| """ """ | |||
| """TODO: Add docstring.""" | |||
| def __init__(self): | |||
| ## TODO: Add a initialisation step | |||
| """TODO: Add docstring.""" | |||
| self.model = SentenceTransformer("BAAI/bge-large-en-v1.5") | |||
| self.encoding = [] | |||
| # file directory | |||
| @@ -62,6 +66,7 @@ class Operator: | |||
| dora_event, | |||
| send_output, | |||
| ) -> DoraStatus: | |||
| """TODO: Add docstring.""" | |||
| if dora_event["type"] == "INPUT": | |||
| if dora_event["id"] == "query": | |||
| values = dora_event["value"].to_pylist() | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| LABELS = [ | |||
| "person", | |||
| "bicycle", | |||
| @@ -1,10 +1,11 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import time | |||
| import cv2 | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| from dora import DoraStatus | |||
| CAMERA_WIDTH = 640 | |||
| @@ -16,11 +17,10 @@ font = cv2.FONT_HERSHEY_SIMPLEX | |||
| class Operator: | |||
| """ | |||
| Sending image from webcam to the dataflow | |||
| """ | |||
| """Sending image from webcam to the dataflow.""" | |||
| def __init__(self): | |||
| """TODO: Add docstring.""" | |||
| self.video_capture = cv2.VideoCapture(CAMERA_INDEX) | |||
| self.start_time = time.time() | |||
| self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH) | |||
| @@ -32,6 +32,7 @@ class Operator: | |||
| dora_event: str, | |||
| send_output, | |||
| ) -> DoraStatus: | |||
| """TODO: Add docstring.""" | |||
| event_type = dora_event["type"] | |||
| if event_type == "INPUT": | |||
| ret, frame = self.video_capture.read() | |||
| @@ -39,22 +40,21 @@ class Operator: | |||
| frame = cv2.resize(frame, (CAMERA_WIDTH, CAMERA_HEIGHT)) | |||
| self.failure_count = 0 | |||
| ## Push an error image in case the camera is not available. | |||
| elif self.failure_count > 10: | |||
| frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) | |||
| cv2.putText( | |||
| frame, | |||
| "No Webcam was found at index %d" % (CAMERA_INDEX), | |||
| (30, 30), | |||
| font, | |||
| 0.75, | |||
| (255, 255, 255), | |||
| 2, | |||
| 1, | |||
| ) | |||
| else: | |||
| if self.failure_count > 10: | |||
| frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) | |||
| cv2.putText( | |||
| frame, | |||
| "No Webcam was found at index %d" % (CAMERA_INDEX), | |||
| (int(30), int(30)), | |||
| font, | |||
| 0.75, | |||
| (255, 255, 255), | |||
| 2, | |||
| 1, | |||
| ) | |||
| else: | |||
| self.failure_count += 1 | |||
| return DoraStatus.CONTINUE | |||
| self.failure_count += 1 | |||
| return DoraStatus.CONTINUE | |||
| send_output( | |||
| "image", | |||
| @@ -68,8 +68,8 @@ class Operator: | |||
| if time.time() - self.start_time < 20 or CI != "true": | |||
| return DoraStatus.CONTINUE | |||
| else: | |||
| return DoraStatus.STOP | |||
| return DoraStatus.STOP | |||
| def __del__(self): | |||
| """TODO: Add docstring.""" | |||
| self.video_capture.release() | |||
| @@ -1,22 +1,21 @@ | |||
| """TODO: Add docstring.""" | |||
| import pyarrow as pa | |||
| import whisper | |||
| from dora import DoraStatus | |||
| model = whisper.load_model("base") | |||
| class Operator: | |||
| """ | |||
| Transforming Speech to Text using OpenAI Whisper model | |||
| """ | |||
| """Transforming Speech to Text using OpenAI Whisper model.""" | |||
| def on_event( | |||
| self, | |||
| dora_event, | |||
| send_output, | |||
| ) -> DoraStatus: | |||
| """TODO: Add docstring.""" | |||
| if dora_event["type"] == "INPUT": | |||
| audio = dora_event["value"].to_numpy() | |||
| audio = whisper.pad_or_trim(audio) | |||
| @@ -1,9 +1,11 @@ | |||
| #!/usr/bin/env python | |||
| # -*- coding: utf-8 -*- | |||
| """TODO: Add docstring.""" | |||
| import random | |||
| from dora import Node | |||
| import pyarrow as pa | |||
| from dora import Node | |||
| node = Node() | |||
| @@ -16,8 +18,8 @@ for i in range(500): | |||
| if event_id == "turtle_pose": | |||
| print( | |||
| f"""Pose: {event["value"].tolist()}""".replace("\r", "").replace( | |||
| "\n", " " | |||
| ) | |||
| "\n", " ", | |||
| ), | |||
| ) | |||
| elif event_id == "tick": | |||
| direction = { | |||
| @@ -1,5 +1,6 @@ | |||
| #!/usr/bin/env python | |||
| # -*- coding: utf-8 -*- | |||
| """TODO: Add docstring.""" | |||
| from dora import Node, Ros2Context, Ros2NodeOptions, Ros2QosPolicies | |||
| @@ -18,7 +19,7 @@ topic_qos = Ros2QosPolicies(reliable=True, max_blocking_time=0.1) | |||
| # Create a publisher to cmd_vel topic | |||
| turtle_twist_topic = ros2_node.create_topic( | |||
| "/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos | |||
| "/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos, | |||
| ) | |||
| twist_writer = ros2_node.create_publisher(turtle_twist_topic) | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import json | |||
| import os | |||
| @@ -11,14 +13,17 @@ IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0")) | |||
| def extract_bboxes(json_text): | |||
| """ | |||
| Extracts bounding boxes from a JSON string with markdown markers and returns them as a NumPy array. | |||
| """Extract bounding boxes from a JSON string with markdown markers and return them as a NumPy array. | |||
| Parameters: | |||
| json_text (str): JSON string containing bounding box data, including ```json markers. | |||
| Parameters | |||
| ---------- | |||
| json_text : str | |||
| JSON string containing bounding box data, including ```json markers. | |||
| Returns: | |||
| Returns | |||
| ------- | |||
| np.ndarray: NumPy array of bounding boxes. | |||
| """ | |||
| # Ensure all lines are stripped of whitespace and markers | |||
| lines = json_text.strip().splitlines() | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import json | |||
| import os | |||
| @@ -11,14 +13,17 @@ IMAGE_RESIZE_RATIO = float(os.getenv("IMAGE_RESIZE_RATIO", "1.0")) | |||
| def extract_bboxes(json_text) -> (np.ndarray, np.ndarray): | |||
| """ | |||
| Extracts bounding boxes from a JSON string with markdown markers and returns them as a NumPy array. | |||
| """Extract bounding boxes from a JSON string with markdown markers and return them as a NumPy array. | |||
| Parameters: | |||
| json_text (str): JSON string containing bounding box data, including ```json markers. | |||
| Parameters | |||
| ---------- | |||
| json_text : str | |||
| JSON string containing bounding box data, including ```json markers. | |||
| Returns: | |||
| Returns | |||
| ------- | |||
| np.ndarray: NumPy array of bounding boxes. | |||
| """ | |||
| # Ensure all lines are stripped of whitespace and markers | |||
| lines = json_text.strip().splitlines() | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| # State Machine | |||
| import json | |||
| import os | |||
| @@ -80,14 +82,17 @@ stop = True | |||
| def extract_bboxes(json_text) -> (np.ndarray, np.ndarray): | |||
| """ | |||
| Extracts bounding boxes from a JSON string with markdown markers and returns them as a NumPy array. | |||
| """Extract bounding boxes from a JSON string with markdown markers and return them as a NumPy array. | |||
| Parameters: | |||
| json_text (str): JSON string containing bounding box data, including ```json markers. | |||
| Parameters | |||
| ---------- | |||
| json_text : str | |||
| JSON string containing bounding box data, including ```json markers. | |||
| Returns: | |||
| Returns | |||
| ------- | |||
| np.ndarray: NumPy array of bounding boxes. | |||
| """ | |||
| # Ensure all lines are stripped of whitespace and markers | |||
| lines = json_text.strip().splitlines() | |||
| @@ -112,6 +117,7 @@ def extract_bboxes(json_text) -> (np.ndarray, np.ndarray): | |||
| def handle_speech(last_text): | |||
| """TODO: Add docstring.""" | |||
| global stop | |||
| words = last_text.lower().split() | |||
| if len(ACTIVATION_WORDS) > 0 and any(word in ACTIVATION_WORDS for word in words): | |||
| @@ -120,8 +126,8 @@ def handle_speech(last_text): | |||
| "text_vlm", | |||
| pa.array( | |||
| [ | |||
| f"Given the prompt: {cache['text']}. Output the two bounding boxes for the two objects" | |||
| ] | |||
| f"Given the prompt: {cache['text']}. Output the two bounding boxes for the two objects", | |||
| ], | |||
| ), | |||
| metadata={"image_id": "image_depth"}, | |||
| ) | |||
| @@ -135,7 +141,7 @@ def handle_speech(last_text): | |||
| def wait_for_event(id, timeout=None, cache={}): | |||
| """TODO: Add docstring.""" | |||
| while True: | |||
| event = node.next(timeout=timeout) | |||
| if event is None: | |||
| @@ -154,6 +160,7 @@ def wait_for_event(id, timeout=None, cache={}): | |||
| def wait_for_events(ids: list[str], timeout=None, cache={}): | |||
| """TODO: Add docstring.""" | |||
| response = {} | |||
| while True: | |||
| event = node.next(timeout=timeout) | |||
| @@ -174,18 +181,18 @@ def wait_for_events(ids: list[str], timeout=None, cache={}): | |||
| def get_prompt(): | |||
| """TODO: Add docstring.""" | |||
| text = wait_for_event(id="text", timeout=0.3) | |||
| if text is None: | |||
| return | |||
| return None | |||
| text = text[0].as_py() | |||
| words = text.lower().split() | |||
| if len(ACTIVATION_WORDS) > 0 and all( | |||
| word not in ACTIVATION_WORDS for word in words | |||
| ): | |||
| return | |||
| else: | |||
| return text | |||
| return None | |||
| return text | |||
| last_text = "" | |||
| @@ -205,7 +212,7 @@ while True: | |||
| metadata={"encoding": "jointstate", "duration": 1}, | |||
| ) | |||
| _, cache = wait_for_events( | |||
| ids=["response_r_arm", "response_l_arm"], timeout=2, cache=cache | |||
| ids=["response_r_arm", "response_l_arm"], timeout=2, cache=cache, | |||
| ) | |||
| # handle_speech(cache["text"]) | |||
| @@ -270,7 +277,7 @@ while True: | |||
| [x, y, -0.16, 0, 0, 0, 100], | |||
| [x, y, z, 0, 0, 0, 0], | |||
| [x, y, -0.16, 0, 0, 0, 0], | |||
| ] | |||
| ], | |||
| ).ravel() | |||
| if y < 0: | |||
| @@ -351,7 +358,7 @@ while True: | |||
| 0, | |||
| 0, | |||
| 100, | |||
| ] | |||
| ], | |||
| ), | |||
| metadata={"encoding": "xyzrpy", "duration": "0.75"}, | |||
| ) | |||
| @@ -388,7 +395,7 @@ while True: | |||
| 0, | |||
| 0, | |||
| 100, | |||
| ] | |||
| ], | |||
| ), | |||
| metadata={"encoding": "xyzrpy", "duration": "0.75"}, | |||
| ) | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| # State Machine | |||
| import os | |||
| @@ -76,6 +78,7 @@ l_release_closed_pose = [ | |||
| def wait_for_event(id, timeout=None): | |||
| """TODO: Add docstring.""" | |||
| while True: | |||
| event = node.next(timeout=timeout) | |||
| if event["type"] == "INPUT": | |||
| @@ -87,6 +90,7 @@ def wait_for_event(id, timeout=None): | |||
| def wait_for_events(ids: list[str], timeout=None): | |||
| """TODO: Add docstring.""" | |||
| response = {} | |||
| while True: | |||
| event = node.next(timeout=timeout) | |||
| @@ -187,7 +191,7 @@ while True: | |||
| [x, y, -0.16, 0, 0, 0, 100], | |||
| [x, y, z, 0, 0, 0, 0], | |||
| [x, y, -0.16, 0, 0, 0, 0], | |||
| ] | |||
| ], | |||
| ).ravel() | |||
| if y < 0: | |||
| @@ -1,14 +1,18 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import shutil | |||
| def clear_screen(): | |||
| # Clear the screen based on the operating system | |||
| """TODO: Add docstring.""" | |||
| os.system("cls" if os.name == "nt" else "clear") | |||
| def print_centered(texts): | |||
| # Get terminal size | |||
| """TODO: Add docstring.""" | |||
| terminal_size = shutil.get_terminal_size() | |||
| # Print newlines to move cursor to the middle vertically | |||
| @@ -33,7 +37,7 @@ for event in node: | |||
| if event["type"] == "INPUT": | |||
| # The sentence to be printed | |||
| sentence = event["value"][0].as_py() | |||
| if event["id"] not in previous_texts.keys(): | |||
| if event["id"] not in previous_texts: | |||
| previous_texts[event["id"]] = ["", "", sentence] | |||
| else: | |||
| @@ -1,7 +1,8 @@ | |||
| """TODO: Add docstring.""" | |||
| import numpy as np | |||
| import pyarrow as pa | |||
| # Marker Message Example | |||
| TEST_ARRAYS = [ | |||
| ("std_msgs", "UInt8", pa.array([{"data": np.uint8(2)}])), | |||
| @@ -23,12 +24,12 @@ TEST_ARRAYS = [ | |||
| "label": "a", | |||
| "size": np.uint32(10), | |||
| "stride": np.uint32(20), | |||
| } | |||
| }, | |||
| ], | |||
| "data_offset": np.uint32(30), | |||
| }, | |||
| } | |||
| ] | |||
| }, | |||
| ], | |||
| ), | |||
| ), | |||
| ( | |||
| @@ -44,12 +45,12 @@ TEST_ARRAYS = [ | |||
| "label": "a", | |||
| "size": np.uint32(10), | |||
| "stride": np.uint32(20), | |||
| } | |||
| }, | |||
| ], | |||
| "data_offset": np.uint32(30), | |||
| }, | |||
| } | |||
| ] | |||
| }, | |||
| ], | |||
| ), | |||
| ), | |||
| ( | |||
| @@ -99,7 +100,7 @@ TEST_ARRAYS = [ | |||
| "x": np.float64(1.0), # Numpy type | |||
| "y": np.float64(1.0), # Numpy type | |||
| "z": np.float64(1.0), # Numpy type | |||
| } | |||
| }, | |||
| ], | |||
| "colors": [ | |||
| { | |||
| @@ -107,15 +108,15 @@ TEST_ARRAYS = [ | |||
| "g": np.float32(1.0), # Numpy type | |||
| "b": np.float32(1.0), # Numpy type | |||
| "a": np.float32(1.0), # Numpy type (alpha) | |||
| } # Numpy array for colors | |||
| }, # Numpy array for colors | |||
| ], | |||
| "texture_resource": "", | |||
| "uv_coordinates": [{}], | |||
| "text": "", | |||
| "mesh_resource": "", | |||
| "mesh_use_embedded_materials": False, # Boolean type, no numpy equivalent | |||
| } | |||
| ] | |||
| }, | |||
| ], | |||
| ), | |||
| ), | |||
| ( | |||
| @@ -167,7 +168,7 @@ TEST_ARRAYS = [ | |||
| "x": np.float64(1.0), # Numpy type | |||
| "y": np.float64(1.0), # Numpy type | |||
| "z": np.float64(1.0), # Numpy type | |||
| } | |||
| }, | |||
| ], | |||
| "colors": [ | |||
| { | |||
| @@ -175,17 +176,17 @@ TEST_ARRAYS = [ | |||
| "g": np.float32(1.0), # Numpy type | |||
| "b": np.float32(1.0), # Numpy type | |||
| "a": np.float32(1.0), # Numpy type (alpha) | |||
| } # Numpy array for colors | |||
| }, # Numpy array for colors | |||
| ], | |||
| "texture_resource": "", | |||
| "uv_coordinates": [{}], | |||
| "text": "", | |||
| "mesh_resource": "", | |||
| "mesh_use_embedded_materials": False, # Boolean type, no numpy equivalent | |||
| } | |||
| ] | |||
| } | |||
| ] | |||
| }, | |||
| ], | |||
| }, | |||
| ], | |||
| ), | |||
| ), | |||
| ( | |||
| @@ -254,17 +255,15 @@ TEST_ARRAYS = [ | |||
| "a": np.float32(1.0), # 32-bit float | |||
| }, | |||
| ], | |||
| } | |||
| ] | |||
| }, | |||
| ], | |||
| ), | |||
| ), | |||
| ] | |||
| def is_subset(subset, superset): | |||
| """ | |||
| Check if subset is a subset of superset, to avoid false negatives linked to default values. | |||
| """ | |||
| """Check if subset is a subset of superset, to avoid false negatives linked to default values.""" | |||
| if isinstance(subset, pa.Array): | |||
| return is_subset(subset.to_pylist(), superset.to_pylist()) | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| os.environ["ARGOS_DEVICE_TYPE"] = "auto" | |||
| @@ -22,6 +24,7 @@ argostranslate.package.install_from_path(package_to_install.download()) | |||
| def main(): | |||
| """TODO: Add docstring.""" | |||
| node = Node() | |||
| while True: | |||
| event = node.next() | |||
| @@ -18,3 +18,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-argotranslate = "dora_argotranslate.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,7 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| from dora_argotranslate.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import sys | |||
| from pathlib import Path | |||
| @@ -12,6 +14,7 @@ TRANSLATE = bool(os.getenv("TRANSLATE", "False") in ["True", "true"]) | |||
| def load_model(): | |||
| """TODO: Add docstring.""" | |||
| from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline | |||
| MODEL_NAME_OR_PATH = os.getenv("MODEL_NAME_OR_PATH", DEFAULT_PATH) | |||
| @@ -70,6 +73,7 @@ BAD_SENTENCES = [ | |||
| def cut_repetition(text, min_repeat_length=4, max_repeat_length=50): | |||
| """TODO: Add docstring.""" | |||
| if len(text) == 0: | |||
| return text | |||
| # Check if the text is primarily Chinese (you may need to adjust this threshold) | |||
| @@ -103,6 +107,7 @@ def cut_repetition(text, min_repeat_length=4, max_repeat_length=50): | |||
| def main(): | |||
| """TODO: Add docstring.""" | |||
| node = Node() | |||
| # For macos use mlx: | |||
| @@ -27,3 +27,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-distil-whisper = "dora_distil_whisper.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,7 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| from dora_distil_whisper.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import argparse | |||
| import os | |||
| @@ -8,6 +10,7 @@ RUNNER_CI = True if os.getenv("CI") == "true" else False | |||
| def main(): | |||
| # Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables. | |||
| """TODO: Add docstring.""" | |||
| parser = argparse.ArgumentParser(description="Simple arrow sender") | |||
| parser.add_argument( | |||
| @@ -17,3 +17,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-echo = "dora_echo.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,7 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| from dora_echo.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import numpy as np | |||
| @@ -14,6 +16,7 @@ IMAGENET_STD = (0.229, 0.224, 0.225) | |||
| def build_transform(input_size): | |||
| """TODO: Add docstring.""" | |||
| MEAN, STD = IMAGENET_MEAN, IMAGENET_STD | |||
| transform = T.Compose( | |||
| [ | |||
| @@ -27,6 +30,7 @@ def build_transform(input_size): | |||
| def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): | |||
| """TODO: Add docstring.""" | |||
| best_ratio_diff = float("inf") | |||
| best_ratio = (1, 1) | |||
| area = width * height | |||
| @@ -45,6 +49,7 @@ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_ | |||
| def dynamic_preprocess( | |||
| image, min_num=1, max_num=12, image_size=448, use_thumbnail=False, | |||
| ): | |||
| """TODO: Add docstring.""" | |||
| orig_width, orig_height = image.size | |||
| aspect_ratio = orig_width / orig_height | |||
| @@ -89,6 +94,7 @@ def dynamic_preprocess( | |||
| def load_image(image_array: np.array, input_size=448, max_num=12): | |||
| """TODO: Add docstring.""" | |||
| image = Image.fromarray(image_array).convert("RGB") | |||
| transform = build_transform(input_size=input_size) | |||
| images = dynamic_preprocess( | |||
| @@ -101,6 +107,7 @@ def load_image(image_array: np.array, input_size=448, max_num=12): | |||
| def main(): | |||
| # Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables. | |||
| """TODO: Add docstring.""" | |||
| model_path = os.getenv("MODEL", "OpenGVLab/InternVL2-1B") | |||
| device = "cuda:0" if torch.cuda.is_available() else "cpu" | |||
| @@ -30,3 +30,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-internvl = "dora_internvl.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,7 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| from dora_internvl.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -5,7 +7,7 @@ readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.m | |||
| # Read the content of the README file | |||
| try: | |||
| with open(readme_path, "r", encoding="utf-8") as f: | |||
| with open(readme_path, encoding="utf-8") as f: | |||
| __doc__ = f.read() | |||
| except FileNotFoundError: | |||
| __doc__ = "README file not found." | |||
| @@ -1,5 +1,6 @@ | |||
| from .main import main | |||
| """TODO: Add docstring.""" | |||
| from .main import main | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| from threading import Event | |||
| import cv2 | |||
| @@ -8,7 +10,10 @@ from record3d import Record3DStream | |||
| class DemoApp: | |||
| """TODO: Add docstring.""" | |||
| def __init__(self): | |||
| """TODO: Add docstring.""" | |||
| self.event = Event() | |||
| self.session = None | |||
| self.DEVICE_TYPE__TRUEDEPTH = 0 | |||
| @@ -16,25 +21,25 @@ class DemoApp: | |||
| self.stop = False | |||
| def on_new_frame(self): | |||
| """ | |||
| This method is called from non-main thread, therefore cannot be used for presenting UI. | |||
| """ | |||
| """on_new_frame method is called from non-main thread, therefore cannot be used for presenting UI.""" | |||
| self.event.set() # Notify the main thread to stop waiting and process new frame. | |||
| def on_stream_stopped(self): | |||
| """TODO: Add docstring.""" | |||
| self.stop = True | |||
| print("Stream stopped") | |||
| def connect_to_device(self, dev_idx): | |||
| """TODO: Add docstring.""" | |||
| print("Searching for devices") | |||
| devs = Record3DStream.get_connected_devices() | |||
| print("{} device(s) found".format(len(devs))) | |||
| print(f"{len(devs)} device(s) found") | |||
| for dev in devs: | |||
| print("\tID: {}\n".format(dev.product_id)) | |||
| print(f"\tID: {dev.product_id}\n") | |||
| if len(devs) <= dev_idx: | |||
| raise RuntimeError( | |||
| "Cannot connect to device #{}, try different index.".format(dev_idx) | |||
| f"Cannot connect to device #{dev_idx}, try different index.", | |||
| ) | |||
| dev = devs[dev_idx] | |||
| @@ -44,11 +49,13 @@ class DemoApp: | |||
| self.session.connect(dev) # Initiate connection and start capturing | |||
| def get_intrinsic_mat_from_coeffs(self, coeffs): | |||
| """TODO: Add docstring.""" | |||
| return np.array( | |||
| [[coeffs.fx, 0, coeffs.tx], [0, coeffs.fy, coeffs.ty], [0, 0, 1]] | |||
| [[coeffs.fx, 0, coeffs.tx], [0, coeffs.fy, coeffs.ty], [0, 0, 1]], | |||
| ) | |||
| def start_processing_stream(self): | |||
| """TODO: Add docstring.""" | |||
| node = Node() | |||
| for event in node: | |||
| @@ -62,7 +69,7 @@ class DemoApp: | |||
| depth = self.session.get_depth_frame() | |||
| rgb = self.session.get_rgb_frame() | |||
| intrinsic_mat = self.get_intrinsic_mat_from_coeffs( | |||
| self.session.get_intrinsic_mat() | |||
| self.session.get_intrinsic_mat(), | |||
| ) | |||
| if depth.shape != rgb.shape: | |||
| @@ -100,6 +107,7 @@ class DemoApp: | |||
| def main(): | |||
| """TODO: Add docstring.""" | |||
| app = DemoApp() | |||
| app.connect_to_device(dev_idx=0) | |||
| app.start_processing_stream() | |||
| @@ -14,3 +14,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-ios-lidar = "dora_ios_lidar.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,7 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| from dora_ios_lidar.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import pyarrow as pa | |||
| from dora import Node | |||
| from pynput import keyboard | |||
| @@ -5,6 +7,7 @@ from pynput.keyboard import Events | |||
| def main(): | |||
| """TODO: Add docstring.""" | |||
| node = Node() | |||
| always_none = node.next(timeout=0.001) is None | |||
| @@ -23,3 +23,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-keyboard = "dora_keyboard.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,7 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| from dora_keyboard.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| @@ -15,3 +15,8 @@ scripts = { "dora-kit-car" = "dora_kit_car:py_main" } | |||
| [tool.maturin] | |||
| features = ["python", "pyo3/extension-module"] | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -5,7 +7,7 @@ readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.m | |||
| # Read the content of the README file | |||
| try: | |||
| with open(readme_path, "r", encoding="utf-8") as f: | |||
| with open(readme_path, encoding="utf-8") as f: | |||
| __doc__ = f.read() | |||
| except FileNotFoundError: | |||
| __doc__ = "README file not found." | |||
| @@ -1,5 +1,6 @@ | |||
| from .main import main | |||
| """TODO: Add docstring.""" | |||
| from .main import main | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import pyarrow as pa | |||
| from dora import Node | |||
| from kokoro import KPipeline | |||
| @@ -6,6 +8,7 @@ pipeline = KPipeline(lang_code="a") # <= make sure lang_code matches voice | |||
| def main(): | |||
| """TODO: Add docstring.""" | |||
| node = Node() | |||
| for event in node: | |||
| @@ -14,3 +14,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-kokoro-tts = "dora_kokoro_tts.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,7 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| from dora_kokoro_tts.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import time as tm | |||
| @@ -12,6 +14,7 @@ SAMPLE_RATE = int(os.getenv("SAMPLE_RATE", "16000")) | |||
| def main(): | |||
| # Initialize buffer and recording flag | |||
| """TODO: Add docstring.""" | |||
| buffer = [] | |||
| start_recording_time = tm.time() | |||
| node = Node() | |||
| @@ -20,6 +23,7 @@ def main(): | |||
| finished = False | |||
| def callback(indata, frames, time, status): | |||
| """TODO: Add docstring.""" | |||
| nonlocal buffer, node, start_recording_time, finished | |||
| if tm.time() - start_recording_time > MAX_DURATION: | |||
| @@ -23,3 +23,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-microphone = "dora_microphone.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,7 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| from dora_microphone.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| @@ -14,3 +14,8 @@ scripts = { "dora-object-to-pose" = "dora_object_to_pose:py_main" } | |||
| [tool.maturin] | |||
| features = ["python", "pyo3/extension-module"] | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import ast | |||
| import asyncio | |||
| from typing import List, Optional | |||
| @@ -13,11 +15,15 @@ app = FastAPI() | |||
| class ChatCompletionMessage(BaseModel): | |||
| """TODO: Add docstring.""" | |||
| role: str | |||
| content: str | |||
| class ChatCompletionRequest(BaseModel): | |||
| """TODO: Add docstring.""" | |||
| model: str | |||
| messages: List[ChatCompletionMessage] | |||
| temperature: Optional[float] = 1.0 | |||
| @@ -25,6 +31,8 @@ class ChatCompletionRequest(BaseModel): | |||
| class ChatCompletionResponse(BaseModel): | |||
| """TODO: Add docstring.""" | |||
| id: str | |||
| object: str | |||
| created: int | |||
| @@ -38,6 +46,7 @@ node = Node() # provide the name to connect to the dataflow if dynamic node | |||
| @app.post("/v1/chat/completions") | |||
| async def create_chat_completion(request: ChatCompletionRequest): | |||
| """TODO: Add docstring.""" | |||
| data = next( | |||
| (msg.content for msg in request.messages if msg.role == "user"), | |||
| "No user message found.", | |||
| @@ -95,6 +104,7 @@ async def create_chat_completion(request: ChatCompletionRequest): | |||
| @app.get("/v1/models") | |||
| async def list_models(): | |||
| """TODO: Add docstring.""" | |||
| return { | |||
| "object": "list", | |||
| "data": [ | |||
| @@ -109,6 +119,7 @@ async def list_models(): | |||
| async def run_fastapi(): | |||
| """TODO: Add docstring.""" | |||
| config = uvicorn.Config(app, host="0.0.0.0", port=8000, log_level="info") | |||
| server = uvicorn.Server(config) | |||
| @@ -121,6 +132,7 @@ async def run_fastapi(): | |||
| def main(): | |||
| """TODO: Add docstring.""" | |||
| asyncio.run(run_fastapi()) | |||
| @@ -27,6 +27,6 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-openai-server = "dora_openai_server.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = ["I"] | |||
| extend-select = ["I","D"] | |||
| @@ -1,2 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| pass | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| from pathlib import Path | |||
| @@ -26,6 +28,7 @@ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME_OR_PATH) | |||
| def cut_repetition(text, min_repeat_length=4, max_repeat_length=50): | |||
| # Check if the text is primarily Chinese (you may need to adjust this threshold) | |||
| """TODO: Add docstring.""" | |||
| if sum(1 for char in text if "\u4e00" <= char <= "\u9fff") / len(text) > 0.5: | |||
| # Chinese text processing | |||
| for repeat_length in range( | |||
| @@ -54,6 +57,7 @@ def cut_repetition(text, min_repeat_length=4, max_repeat_length=50): | |||
| def main(): | |||
| """TODO: Add docstring.""" | |||
| node = Node() | |||
| while True: | |||
| event = node.next() | |||
| @@ -27,3 +27,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-opus = "dora_opus.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,7 +1,10 @@ | |||
| """TODO: Add docstring.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| from dora_opus.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| from .main import main | |||
| if __name__ == "__main__": | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import argparse # Add argparse import | |||
| import os | |||
| import pathlib | |||
| @@ -15,6 +17,7 @@ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 | |||
| def load_interface(): | |||
| """TODO: Add docstring.""" | |||
| if os.getenv("INTERFACE", "HF") == "HF": | |||
| model_config = outetts.HFModelConfig_v1( | |||
| model_path="OuteAI/OuteTTS-0.2-500M", | |||
| @@ -39,6 +42,7 @@ def load_interface(): | |||
| def create_speaker(interface, path): | |||
| """TODO: Add docstring.""" | |||
| speaker = interface.create_speaker( | |||
| audio_path=path, | |||
| # If transcript is not provided, it will be automatically transcribed using Whisper | |||
| @@ -53,6 +57,7 @@ def create_speaker(interface, path): | |||
| def main(arg_list: list[str] | None = None): | |||
| # Parse cli args | |||
| """TODO: Add docstring.""" | |||
| parser = argparse.ArgumentParser(description="Dora Outetts Node") | |||
| parser.add_argument("--create-speaker", type=str, help="Path to audio file") | |||
| parser.add_argument("--test", action="store_true", help="Run tests") | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import pytest | |||
| @@ -7,11 +9,13 @@ CI = os.getenv("CI", "false") in ["True", "true"] | |||
| def test_import_main(): | |||
| """TODO: Add docstring.""" | |||
| with pytest.raises(RuntimeError): | |||
| main([]) | |||
| def test_load_interface(): | |||
| """TODO: Add docstring.""" | |||
| try: | |||
| interface = load_interface() | |||
| except RuntimeError: | |||
| @@ -27,3 +27,8 @@ llama-cpp-python = [ | |||
| [project.scripts] | |||
| dora-outtetts = "dora_outtetts.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| @@ -1,3 +1,5 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| import time | |||
| from pathlib import Path | |||
| @@ -58,6 +60,7 @@ stream = p.open(format=pyaudio.paInt16, channels=1, rate=sampling_rate, output=T | |||
| def play_audio(audio_array): | |||
| """TODO: Add docstring.""" | |||
| if np.issubdtype(audio_array.dtype, np.floating): | |||
| max_val = np.max(np.abs(audio_array)) | |||
| audio_array = (audio_array / max_val) * 32767 | |||
| @@ -67,16 +70,21 @@ def play_audio(audio_array): | |||
| class InterruptStoppingCriteria(StoppingCriteria): | |||
| """TODO: Add docstring.""" | |||
| def __init__(self): | |||
| """TODO: Add docstring.""" | |||
| super().__init__() | |||
| self.stop_signal = False | |||
| def __call__( | |||
| self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs, | |||
| ) -> bool: | |||
| """TODO: Add docstring.""" | |||
| return self.stop_signal | |||
| def stop(self): | |||
| """TODO: Add docstring.""" | |||
| self.stop_signal = True | |||
| @@ -86,6 +94,7 @@ def generate_base( | |||
| description=default_description, | |||
| play_steps_in_s=0.5, | |||
| ): | |||
| """TODO: Add docstring.""" | |||
| prev_time = time.time() | |||
| play_steps = int(frame_rate * play_steps_in_s) | |||
| inputs = tokenizer(description, return_tensors="pt").to(device) | |||
| @@ -133,6 +142,7 @@ def generate_base( | |||
| def main(): | |||
| """TODO: Add docstring.""" | |||
| generate_base(None, "Ready !", default_description, 0.5) | |||
| node = Node() | |||
| while True: | |||
| @@ -29,3 +29,8 @@ dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-parler = "dora_parler.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| ] | |||