Browse Source

Final demo code

remote-reachy2-improvements
haixuanTao 8 months ago
parent
commit
8eeca6a3b9
14 changed files with 451 additions and 116 deletions
  1. +7
    -0
      examples/reachy2-remote/boot_reachy.sh
  2. +23
    -3
      examples/reachy2-remote/dataflow_reachy.yml
  3. +45
    -0
      examples/reachy2-remote/no_torque copy.yml
  4. +45
    -0
      examples/reachy2-remote/no_torque.yml
  5. +45
    -0
      examples/reachy2-remote/no_torque_remote.yml
  6. +1
    -1
      examples/reachy2-remote/parse_bbox.py
  7. +176
    -65
      examples/reachy2-remote/parse_pose.py
  8. +67
    -12
      examples/reachy2-remote/parse_whisper.py
  9. +2
    -2
      node-hub/dora-kokoro-tts/dora_kokoro_tts/main.py
  10. +1
    -1
      node-hub/dora-object-to-pose/src/lib.rs
  11. +2
    -1
      node-hub/dora-reachy2/dora_reachy2/head.py
  12. +16
    -14
      node-hub/dora-reachy2/dora_reachy2/left_arm.py
  13. +4
    -1
      node-hub/dora-reachy2/dora_reachy2/mobile_base.py
  14. +17
    -16
      node-hub/dora-reachy2/dora_reachy2/right_arm.py

+ 7
- 0
examples/reachy2-remote/boot_reachy.sh View File

@@ -0,0 +1,7 @@
ssh bedrock@reachy2-pvt02.local


docker exec -it core bash


ros2 service call /SetZuuuSafety zuuu_interfaces/srv/SetZuuuSafety "{safety_on: False}"

+ 23
- 3
examples/reachy2-remote/dataflow_reachy.yml View File

@@ -108,6 +108,8 @@ nodes:
- follow_pose
- raise_arm_pose
- look_ahead
- emotion
- speech
env:
IMAGE_RESIZE_RATIO: "1.0"

@@ -133,7 +135,7 @@ nodes:
points: parse_whisper/points
outputs:
- bbox_track
- bbox_grab
- bbox_pick
env:
IMAGE_RESIZE_RATIO: "1.0"

@@ -142,7 +144,7 @@ nodes:
path: dora-sam2
inputs:
image_depth: camera_depth/image_depth
boxes2d: parse_bbox/bbox_grab
boxes2d: parse_bbox/bbox_pick
outputs:
- masks

@@ -179,6 +181,7 @@ nodes:
raise_arm_pose: parse_whisper/raise_arm_pose
look_ahead: parse_whisper/look_ahead
translate_base: reachy-mobile-base/response_base
emotion: parse_whisper/emotion
outputs:
- action_r_arm
- action_l_arm
@@ -206,9 +209,10 @@ nodes:
head/image: camera/image_left
torso/image: camera_depth/image_depth
torso/depth: camera_depth/depth
torso/boxes2d: parse_bbox/bbox_grab
torso/boxes2d: parse_bbox/bbox_pick
torso/masks: sam2/masks
original_text: dora-distil-whisper/text
emotion_text: parse_whisper/emotion
parsed_text: parse_whisper/text
qwenvl_text: dora-qwenvl/text
cotracker/tracked_image: tracker/tracked_image
@@ -216,3 +220,19 @@ nodes:
env:
RERUN_MEMORY_LIMIT: 25%
CAMERA_PITCH: 2.40

- id: dora-kokoro-tts
build: pip install -e ../../node-hub/dora-kokoro-tts
path: dora-kokoro-tts
inputs:
text: parse_whisper/speech
outputs:
- audio
env:
VOICE: bm_fable

- id: dora-pyaudio
build: pip install -e ../../node-hub/dora-pyaudio
path: dora-pyaudio
inputs:
audio: dora-kokoro-tts/audio

+ 45
- 0
examples/reachy2-remote/no_torque copy.yml View File

@@ -0,0 +1,45 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/dora-reachy2
path: dora-reachy2-camera
_unstable_deploy:
machine: macbook
inputs:
image_left: dora/timer/millis/20
outputs:
#- image_depth
#- depth
- image_left
env:
# IMAGE_WIDTH: 640
# IMAGE_HEIGHT: 480
ROBOT_IP: 10.42.0.242

- id: camera_depth
build: pip install -e ../../node-hub/dora-reachy2
path: dora-reachy2-camera
_unstable_deploy:
machine: macbook
inputs:
depth: dora/timer/millis/20
outputs:
- image_depth
- depth
env:
# IMAGE_WIDTH: 640
# IMAGE_HEIGHT: 480
ROBOT_IP: 10.42.0.242

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
_unstable_deploy:
machine: macbook
inputs:
torso/depth: camera_depth/depth
torso/image: camera_depth/image_depth
head/image_left: camera/image_left
#torso/depth: camera/depth
env:
RERUN_MEMORY_LIMIT: 5%
CAMERA_PITCH: 2.47

+ 45
- 0
examples/reachy2-remote/no_torque.yml View File

@@ -0,0 +1,45 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/dora-reachy2
path: dora-reachy2-camera
_unstable_deploy:
machine: macbook
inputs:
image_left: dora/timer/millis/20
outputs:
#- image_depth
#- depth
- image_left
env:
# IMAGE_WIDTH: 640
# IMAGE_HEIGHT: 480
ROBOT_IP: 172.18.131.66

- id: camera_depth
build: pip install -e ../../node-hub/dora-reachy2
path: dora-reachy2-camera
_unstable_deploy:
machine: macbook
inputs:
depth: dora/timer/millis/20
outputs:
- image_depth
- depth
env:
# IMAGE_WIDTH: 640
# IMAGE_HEIGHT: 480
ROBOT_IP: 172.18.131.66

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
_unstable_deploy:
machine: macbook
inputs:
torso/depth: camera_depth/depth
torso/image: camera_depth/image_depth
head/image_left: camera/image_left
#torso/depth: camera/depth
env:
RERUN_MEMORY_LIMIT: 5%
CAMERA_PITCH: 2.40

+ 45
- 0
examples/reachy2-remote/no_torque_remote.yml View File

@@ -0,0 +1,45 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/dora-reachy2
path: dora-reachy2-camera
_unstable_deploy:
machine: macbook
inputs:
image_left: dora/timer/millis/20
outputs:
#- image_depth
#- depth
- image_left
env:
# IMAGE_WIDTH: 640
# IMAGE_HEIGHT: 480
ROBOT_IP: 10.42.0.242

- id: camera_depth
build: pip install -e ../../node-hub/dora-reachy2
path: dora-reachy2-camera
_unstable_deploy:
machine: macbook
inputs:
depth: dora/timer/millis/20
outputs:
- image_depth
- depth
env:
# IMAGE_WIDTH: 640
# IMAGE_HEIGHT: 480
ROBOT_IP: 10.42.0.242

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
_unstable_deploy:
machine: macbook
inputs:
torso/depth: camera_depth/depth
torso/image: camera_depth/image_depth
head/image_left: camera/image_left
#torso/depth: camera/depth
env:
RERUN_MEMORY_LIMIT: 5%
CAMERA_PITCH: 2.47

+ 1
- 1
examples/reachy2-remote/parse_bbox.py View File

@@ -70,7 +70,7 @@ for event in node:
)
elif image_id == "image_depth":
node.send_output(
"bbox_grab",
"bbox_pick",
pa.array(bboxes.ravel()),
metadata,
)

+ 176
- 65
examples/reachy2-remote/parse_pose.py View File

@@ -1,8 +1,6 @@
"""TODO: Add docstring."""

import json
import os

import numpy as np
import pyarrow as pa
from dora import Node
@@ -34,7 +32,6 @@ r_init_pose = [
]



def wait_for_event(id, timeout=None, cache={}):
"""TODO: Add docstring."""
while True:
@@ -74,30 +71,24 @@ for event in node:
node.send_output("look", pa.array([x, y, z]))

match action:
case "grab":
case "pick":
if len(values) == 0:
continue
x = x + 0.05
z = z + 0.03

## Clip the Maximum and minim values for the height of the arm to avoid collision or weird movement.
trajectory = np.array(
[
[x, y, z + 0.1, 0, 0, 0, 100],
[x, y, z, 0, 0, 0, 0],
[x, y, z + 0.1, 0, 0, 0, 0],
],
).ravel()

if y < 0:
if y < 0: # right arm
node.send_output("look", pa.array([x, y, z]))

# Send a mobile base command to move slightly left to facilitate the grasp
node.send_output(
"translate_base",
pa.array([0.0, y + 0.3, 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")
node.send_output("look", pa.array([x, -0.3, z]))

trajectory = np.array(
[
[x, -0.3, z + 0.1, 0, 0, 0, 100],
@@ -106,55 +97,167 @@ for event in node:
[0.3, -0.3, -0.16, 0, 0, 0, 0],
],
).ravel()
node.send_output(
"action_r_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="response_r_arm")

node.send_output("look", pa.array([x, -0.3, z]))

node.send_output(
"action_r_arm",
"translate_base",
pa.array([0.0, -(y + 0.3), 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")
arm_holding_object = "right"
node.send_output("look", pa.array([0.4, 0, 0]))
node.send_output("success", pa.array([True]))

else: # left arm
# Send a mobile base command to move slightly left to facilitate the grasp
node.send_output("look", pa.array([x, y, z]))

## Move the base
node.send_output(
"translate_base",
pa.array([0.0, y - 0.3, 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")

node.send_output("look", pa.array([x, 0.3, z]))
trajectory = np.array(
[
[x, 0.3, z + 0.1, 0, 0, 0, 100],
[x, 0.3, z, 0, 0, 0, 0],
[x, 0.3, z + 0.1, 0, 0, 0, 0],
[0.3, 0.3, -0.16, 0, 0, 0, 0],
],
).ravel()
node.send_output("look", pa.array([x, 0.3, z]))

node.send_output(
"action_l_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)

event = wait_for_event(id="response_l_arm")
if not (
event is not None
and event[0] is not None
and event[0][0].as_py()
):
node.send_output(
"action_l_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)

event = wait_for_event(id="response_l_arm")
node.send_output(
"translate_base",
pa.array([0.0, -(y - 0.3), 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")
node.send_output("look", pa.array([0.4, 0.0, 0.0]))

arm_holding_object = "left"
node.send_output("success", pa.array([True]))
case "flip":
if len(values) == 0:
continue
x = x + 0.05
z = z + 0.03

if y < 0: # right arm
node.send_output("look", pa.array([x, y, z]))

# Send a mobile base command to move slightly left to facilitate the grasp
node.send_output(
"translate_base",
pa.array([0.0, y + 0.3, 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")
node.send_output("look", pa.array([x, -0.3, z]))

trajectory = np.array(
[
[x, -0.3, z + 0.1, 0, 0, 0, 100],
[x, -0.3, z, 0, 0, 0, 0],
[x, -0.3, z + 0.1, 0, 0, 0, 0],
[x, -0.3, z + 0.1, -np.pi, 0, 0, 0],
[x, -0.3, z, -np.pi, 0, 0, 100],
[x, -0.3, z + 0.1, -np.pi, 0, 0, 100],
[x, -0.3, z + 0.1, 0, 0, 0, 100],
],
).ravel()
node.send_output(
"action_r_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.8"},
)
event = wait_for_event(id="response_r_arm")

node.send_output("look", pa.array([x, -0.3, z]))

node.send_output(
"translate_base",
pa.array([0.0, -(y + 0.3), 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")
arm_holding_object = "right"
node.send_output("look", pa.array([0.4, 0, 0]))
node.send_output("success", pa.array([True]))

else:
else: # left arm
# Send a mobile base command to move slightly left to facilitate the grasp
node.send_output("look", pa.array([x, y, z]))

## Move the base
node.send_output(
"translate_base",
pa.array([0.0, y - 0.3, 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")

node.send_output("look", pa.array([x, 0.3, z]))
trajectory = np.array(
[
[x, 0.3, z + 0.1, 0, 0, 0, 100],
[x, 0.3, z, 0, 0, 0, 0],
[x, 0.3, z + 0.1, 0, 0, 0, 0],
[0.3, 0.3, -0.16, 0, 0, 0, 0],
[x, 0.3, z + 0.1, -np.pi, 0, 0, 0],
[x, 0.3, z, -np.pi, 0, 0, 100],
[x, 0.3, z + 0.1, -np.pi, 0, 0, 100],
[x, 0.3, z + 0.1, 0, 0, 0, 100],
],
).ravel()
node.send_output("look", pa.array([x, 0.3, z]))

node.send_output(
"action_l_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.8"},
)

event = wait_for_event(id="response_l_arm")

node.send_output(
"translate_base",
pa.array([0.0, -(y - 0.3), 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")
node.send_output("look", pa.array([0.4, 0.0, 0.0]))

arm_holding_object = "left"
node.send_output("success", pa.array([True]))

@@ -165,13 +268,6 @@ for event in node:
z = z + 0.13

## Clip the Maximum and minim values for the height of the arm to avoid collision or weird movement.
trajectory = np.array(
[
[x, y, z + 0.1, 0, 0, 0, 0],
[x, y, z, 0, 0, 0, 100],
[x, y, z + 0.1, 0, 0, 0, 100],
],
).ravel()
node.send_output("look", pa.array([x, y, z]))
if abs(y) > 0.5:
continue
@@ -179,12 +275,11 @@ for event in node:
print("No arm holding object!!!")
continue
elif arm_holding_object == "right":
# Send a mobile base command to move slightly left to facilitate the grasp
node.send_output(
"translate_base",
pa.array([0.0, y + 0.3, 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base", timeout=10)
node.send_output("look", pa.array([x, -0.3, z]))
@@ -199,29 +294,48 @@ for event in node:
node.send_output(
"action_r_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="response_r_arm", timeout=10)
if not (
event is not None
and event[0] is not None
and event[0][0].as_py()
):
trajectory = np.array(
[
[x, 0.3, -0.2, 0, 0, 0, 0],
[x, 0.3, -0.2, 0, 0, 0, 100],
[x, 0.3, -0.2, 0, 0, 0, 100],
],
).ravel()

node.send_output(
"action_l_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
node.send_output(
"action_r_arm",
pa.array(r_init_pose),
metadata={"encoding": "jointstate", "duration": "0.6"},
metadata={"encoding": "jointstate", "duration": "0.7"},
)

event = wait_for_event(id="response_r_arm", timeout=10)
node.send_output(
"translate_base",
pa.array([0.0, -(y + 0.3), 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base", timeout=10)
node.send_output("success", pa.array([True]))
node.send_output("look", pa.array([0.4, 0.0, 0.0]))

else:
node.send_output(
"translate_base",
pa.array([0.0, y - 0.3, 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")
trajectory = np.array(
@@ -236,25 +350,43 @@ for event in node:
node.send_output(
"action_l_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="response_l_arm")
if not (
event is not None
and event[0] is not None
and event[0][0].as_py()
):
trajectory = np.array(
[
[x, 0.3, -0.2, 0, 0, 0, 0],
[x, 0.3, -0.2, 0, 0, 0, 100],
[x, 0.3, -0.2, 0, 0, 0, 100],
],
).ravel()

node.send_output(
"action_l_arm",
pa.array(trajectory),
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)

node.send_output(
"action_l_arm",
pa.array(l_init_pose),
metadata={"encoding": "jointstate", "duration": "0.6"},
metadata={"encoding": "jointstate", "duration": "0.7"},
)
event = wait_for_event(id="response_l_arm")

node.send_output(
"translate_base",
pa.array([0.0, -(y - 0.3), 0, 0, 0, 0]),
metadata={"encoding": "xyzrpy", "duration": "0.6"},
metadata={"encoding": "xyzrpy", "duration": "0.3"},
)
event = wait_for_event(id="translate_base")
node.send_output("success", pa.array([True]))

node.send_output("look", pa.array([0.4, 0.0, 0.0]))

elif event["id"] == "release_right":
node.send_output(
@@ -279,29 +411,6 @@ for event in node:
pa.array(r_init_pose),
metadata={"encoding": "jointstate", "duration": 1},
)
elif event["id"] == "release_left":
node.send_output(
"action_l_arm",
pa.array(
[
0.4,
0,
-0.16,
0,
0,
0,
100,
],
),
metadata={"encoding": "xyzrpy", "duration": "0.75"},
)
event, cache = wait_for_event(id="response_l_arm", cache=cache)
if event is not None and event[0]:
node.send_output(
"action_l_arm",
pa.array(l_init_pose),
metadata={"encoding": "jointstate", "duration": 1},
)

elif event["id"] == "follow_pose":
node.send_output(
@@ -330,11 +439,13 @@ for event in node:
metadata={"encoding": "jointstate", "duration": 2},
)
event = wait_for_event(id="response_r_arm", timeout=10)

node.send_output(
"action_l_arm",
pa.array(l_init_pose),
metadata={"encoding": "jointstate", "duration": 2},
)
event = wait_for_event(id="response_l_arm", timeout=10)

elif event["id"] == "look_ahead":
node.send_output("look", pa.array([0.7, 0, 0]))
node.send_output("look", pa.array([0.7, 0, 0]))

+ 67
- 12
examples/reachy2-remote/parse_whisper.py View File

@@ -1,14 +1,13 @@
"""TODO: Add docstring."""

import json
import os
import time
import re

import numpy as np
import pyarrow as pa
from dora import Node

import re

node = Node()

@@ -18,6 +17,7 @@ queue = []

last_prompt = ""


def handle_event(text: str):
global queue
if "stop" in text:
@@ -31,31 +31,87 @@ def handle_event(text: str):
elif "raise your arms" in text:
node.send_output("raise_arm_pose", pa.array([1.0]))

elif ("grab" in text and "drop" in text) or ("make a hot dog" in text):
elif (
("pick" in text and "place" in text)
or ("make a hot dog" in text)
or ("cook" in text)
):
if "make a hot dog" in text:
text = "grab the sausage, drop it on the black grill, grab the salad, drop it on the bread, grab the sausage, drop it on the bread"
text = "pick the sausage, place it on the black grill, wait, flip the sausage, wait, pick the sausage, place it on the bread, speak it's ready!"
elif "cook" in text:
# Match 'grill' followed by two words
match = re.search(r"\bcook\b\s+(\w+)\s+(\w+)", text)

if match:
word1 = match.group(1)
word2 = match.group(2)
grilled_item = word1 + " " + word2
text = f"pick {grilled_item}, place it on the black grill, wait, flip {grilled_item}, wait, pick {grilled_item} again, place it on the white plate, speak {grilled_item} is ready!"

if "," or "." in text:
prompts = re.split(r"[,.]",text)
prompts = re.split(r"[,.]", text)
queue = prompts
first_prompt = queue[0]
queue = queue[1:]
handle_event(first_prompt)
elif "grab " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the given grabbed object"

elif "pick " in text:
text = text.replace("can you", "")
text = text.replace("please", "")
text = text.replace("reachy", "")

node.send_output("speech", pa.array(["I'm going to " + text]))

text = f"Given the prompt: {text}. Output the bounding boxes for the given picked object"

node.send_output(
"text",
pa.array([text]),
{"image_id": "image_depth", "action": "grab"},
{"image_id": "image_depth", "action": "pick"},
)
elif "drop " in text:
text = f"Given the prompt: {text}. Output the bounding boxes for the place to drop the object"
elif "place " in text:
text = text.replace("can you", "")
text = text.replace("please", "")
text = text.replace("reachy", "")

node.send_output("speech", pa.array(["I'm going to " + text]))

text = f"Given the prompt: {text}. Output the bounding boxes for the place to place the object"
node.send_output(
"text",
pa.array([text]),
{"image_id": "image_depth", "action": "release"},
)
elif " wait" in text:
node.send_output("speech", pa.array(["I'm going to wait for 5 seconds."]))

time.sleep(5)
if len(queue) > 0:
first_prompt = queue[0]
queue = queue[1:]
handle_event(first_prompt)
elif " speak" in text:
node.send_output("speech", pa.array([text.replace("speak ", "")]))

if len(queue) > 0:
first_prompt = queue[0]
queue = queue[1:]
handle_event(first_prompt)
elif " flip" in text:
text = text.replace("can you", "")
text = text.replace("please", "")
text = text.replace("reachy", "")

node.send_output("speech", pa.array(["I'm going to " + text]))

text = f"Given the prompt: {text}. Output the bounding boxes for the flipped object"
node.send_output(
"text",
pa.array([text]),
{"image_id": "image_depth", "action": "flip"},
)
# elif "flip " in text:
# node.send_output("flip", pa.array([True]))
elif "release left" in text:
node.send_output("action_release_left", pa.array([1.0]))
elif "release right" in text:
@@ -90,7 +146,6 @@ def handle_event(text: str):
node.send_output("points", pa.array([]))



for event in node:
if event["type"] == "INPUT":
if event["id"] == "text":


+ 2
- 2
node-hub/dora-kokoro-tts/dora_kokoro_tts/main.py View File

@@ -7,7 +7,7 @@ from dora import Node
from kokoro import KPipeline

LANGUAGE = os.getenv("LANGUAGE", "en")
VOICE = os.getenv("VOICE", "af_heart")
def main():
"""TODO: Add docstring."""
if LANGUAGE in ["en", "english"]:
@@ -31,7 +31,7 @@ def main():

generator = pipeline(
text,
voice="af_heart", # <= change voice here
voice=VOICE, # <= change voice here
speed=1.2,
split_pattern=r"\n+",
)


+ 1
- 1
node-hub/dora-object-to-pose/src/lib.rs View File

@@ -82,7 +82,7 @@ fn points_to_pose(points: &[(f32, f32, f32)]) -> Vec<f32> {
_z_max,
) = points
.iter()
.filter(|(_x, _y, z)| (z - mean_z).abs() < 0.07)
.filter(|(_x, _y, z)| (z - mean_z).abs() < 0.05)
.fold(
(
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, -10.0, 10.0, -10.0, 10., -10.0,


+ 2
- 1
node-hub/dora-reachy2/dora_reachy2/head.py View File

@@ -50,8 +50,9 @@ def main():
[roll, _pitch, yaw] = reachy.head.get_current_positions()
if "look" in event["id"]:
[x, y, z] = event["value"].to_numpy()
duration = event["metadata"].get("duration", 0.5)
reachy.head.cancel_all_goto()
reachy.head.look_at(x, y, z, duration=0.5)
reachy.head.look_at(x, y, z, duration=float(duration))

if reachy.head is not None:
reachy.head.turn_off()


+ 16
- 14
node-hub/dora-reachy2/dora_reachy2/left_arm.py View File

@@ -12,44 +12,46 @@ from scipy.spatial.transform import Rotation
ROBOT_IP = os.getenv("ROBOT_IP", "127.0.0.1")


def l_arm_go_to_mixed_angles(reachy, x, y, z):
def l_arm_go_to_mixed_angles(reachy, x, y, z, roll):
"""TODO: Add docstring."""
for theta in range(-46, -46, 10):
theta = -60
r = Rotation.from_euler("zyx", [0, theta, 0], degrees=True)
transform = np.eye(4)
transform[:3, :3] = r.as_matrix()
transform[:3, 3] = [x, y, z]

try:
return reachy.l_arm.inverse_kinematics(transform)

joint=reachy.l_arm.inverse_kinematics(transform)
if roll:
joint[-1] = joint[-1] - 180
return joint
except ValueError:
continue

for yaw in range(0, 30, 10):

## First try turning left
pitch = -46
pitch = -60

r = Rotation.from_euler("ZYX", (-yaw, 0, 0), degrees=True) * Rotation.from_euler(
"ZYX",
(0, pitch, 0),
degrees=True,
)
)
transform = np.eye(4)
transform[:3, :3] = r.as_matrix()
transform[:3, 3] = [x, y, z]

try:
return reachy.l_arm.inverse_kinematics(transform)
joint=reachy.l_arm.inverse_kinematics(transform)
if roll:
joint[-1] = joint[-1] - 180
return joint
except ValueError:
pass

try:
return reachy.l_arm.inverse_kinematics(transform)

except ValueError:
continue

print("Left arm: No solution found for x, y, z: ", x, y, z)
return []

@@ -103,11 +105,11 @@ def main():
x = value[0]
y = value[1]
z = value[2]
_r = value[3]
r = value[3]
_p = value[4]
_y = value[5]
gripper = value[6]
joints = l_arm_go_to_mixed_angles(reachy, x, y, z)
joints = l_arm_go_to_mixed_angles(reachy, x, y, z, np.rad2deg(r))
response_ik = len(joints) > 0
if response_ik:
joint_values.append((joints, gripper))


+ 4
- 1
node-hub/dora-reachy2/dora_reachy2/mobile_base.py View File

@@ -27,7 +27,10 @@ def main():
reachy.mobile_base.send_speed_command()
elif event["id"] == "translate_base":
[x, y, _z, _rx, _ry, rz] = event["value"].to_numpy()
reachy.mobile_base.translate_by(x, y, wait=True)
duration = float(event["metadata"].get("duration", 1))

if abs(y) <1:
reachy.mobile_base.translate_by(x, y, wait=True, )

node.send_output("response_base", pa.array([True]))
elif event["id"] == "action_whisper":


+ 17
- 16
node-hub/dora-reachy2/dora_reachy2/right_arm.py View File

@@ -9,47 +9,48 @@ from dora import Node
from reachy2_sdk import ReachySDK
from scipy.spatial.transform import Rotation

ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.242")
ROBOT_IP = os.getenv("ROBOT_IP", "127.0.0.1")


def r_arm_go_to_mixed_angles(reachy, x, y, z):
def r_arm_go_to_mixed_angles(reachy, x, y, z, roll):
"""TODO: Add docstring."""
for theta in range(-46, -46, 10):
theta = -60

r = Rotation.from_euler("zyx", [0, theta, 0], degrees=True)
transform = np.eye(4)
transform[:3, :3] = r.as_matrix()
transform[:3, 3] = [x, y, z]

try:
return reachy.r_arm.inverse_kinematics(transform)

joint=reachy.r_arm.inverse_kinematics(transform)
if roll:
joint[-1] = joint[-1] + 180
return joint
except ValueError:
continue

for yaw in range(0, 30, 10):

## First try turning left
pitch = -46
pitch = -60
r = Rotation.from_euler("ZYX", (yaw, 0, 0), degrees=True) * Rotation.from_euler(
"ZYX",
(0, pitch, 0),
degrees=True,
)
)
transform = np.eye(4)
transform[:3, :3] = r.as_matrix()
transform[:3, 3] = [x, y, z]

try:
return reachy.r_arm.inverse_kinematics(transform)
except ValueError:
pass

try:
return reachy.r_arm.inverse_kinematics(transform)
joint=reachy.r_arm.inverse_kinematics(transform)
if roll:
joint[-1] = joint[-1] + 180
return joint

except ValueError:
continue

pass

print("Right arm: No solution found for x, y, z: ", x, y, z)
return []
@@ -104,11 +105,11 @@ def main():
x = value[0]
y = value[1]
z = value[2]
_r = value[3]
r = value[3]
_p = value[4]
_y = value[5]
gripper = value[6]
joints = r_arm_go_to_mixed_angles(reachy, x, y, z)
joints = r_arm_go_to_mixed_angles(reachy, x, y, z, np.rad2deg(r))
response_ik = len(joints) > 0
if response_ik:
joint_values.append((joints, gripper))


Loading…
Cancel
Save