Browse Source

Added N flag inside node-hub: E flag is pending

tags/v0.3.11-rc1
Somay (DIIN) 10 months ago
parent
commit
59e196d232
17 changed files with 94 additions and 94 deletions
  1. +8
    -8
      examples/aloha/benchmark/python/dynamixel.py
  2. +5
    -5
      examples/piper/convert.py
  3. +3
    -3
      node-hub/dora-argotranslate/dora_argotranslate/main.py
  4. +5
    -5
      node-hub/dora-distil-whisper/dora_distil_whisper/main.py
  5. +7
    -7
      node-hub/dora-internvl/dora_internvl/main.py
  6. +10
    -10
      node-hub/dora-magma/dora_magma/main.py
  7. +2
    -2
      node-hub/dora-piper/dora_piper/main.py
  8. +8
    -8
      node-hub/dora-pyrealsense/dora_pyrealsense/main.py
  9. +8
    -8
      node-hub/dora-rdt-1b/dora_rdt_1b/main.py
  10. +11
    -11
      node-hub/dora-rdt-1b/tests/test_dora_rdt_1b.py
  11. +4
    -4
      node-hub/dora-reachy1/dora_reachy1/main.py
  12. +4
    -4
      node-hub/dora-reachy1/dora_reachy1_vision/main.py
  13. +5
    -5
      node-hub/dora-reachy2/dora_reachy2/camera.py
  14. +6
    -6
      node-hub/dora-reachy2/dora_reachy2/head.py
  15. +3
    -3
      node-hub/dora-reachy2/dora_reachy2/left_arm.py
  16. +2
    -2
      node-hub/dora-reachy2/dora_reachy2/mobile_base.py
  17. +3
    -3
      node-hub/dora-reachy2/dora_reachy2/right_arm.py

+ 8
- 8
examples/aloha/benchmark/python/dynamixel.py View File

@@ -377,29 +377,29 @@ class Dynamixel:
) )
self._process_response(dxl_comm_result, dxl_error, motor_id) self._process_response(dxl_comm_result, dxl_error, motor_id)


def set_P(self, motor_id: int, P: int):
def set_p(self, motor_id: int, p: int):
"""Set the position P gain for the specified servo. """Set the position P gain for the specified servo.


Args: Args:
motor_id: ID of the servo to control.
P: Position P gain value to set.
motor_id (int): The ID of the servo motor
p (int): The position P gain value to set


""" """
dxl_comm_result, dxl_error = self.packetHandler.write2ByteTxRx( dxl_comm_result, dxl_error = self.packetHandler.write2ByteTxRx(
self.portHandler, motor_id, self.POSITION_P, P,
self.portHandler, motor_id, self.POSITION_P, p,
) )
self._process_response(dxl_comm_result, dxl_error, motor_id) self._process_response(dxl_comm_result, dxl_error, motor_id)


def set_I(self, motor_id: int, I: int):
def set_i(self, motor_id: int, i: int):
"""Set the position I gain for the specified servo. """Set the position I gain for the specified servo.


Args: Args:
motor_id: ID of the servo to control.
I: Position I gain value to set.
motor_id (int): The ID of the servo motor
i (int): The position I gain value to set


""" """
dxl_comm_result, dxl_error = self.packetHandler.write2ByteTxRx( dxl_comm_result, dxl_error = self.packetHandler.write2ByteTxRx(
self.portHandler, motor_id, self.POSITION_I, I,
self.portHandler, motor_id, self.POSITION_I, i,
) )
self._process_response(dxl_comm_result, dxl_error, motor_id) self._process_response(dxl_comm_result, dxl_error, motor_id)




+ 5
- 5
examples/piper/convert.py View File

@@ -1,29 +1,29 @@
"""TODO: Add docstring.""" """TODO: Add docstring."""


import numpy as np import numpy as np
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Rotation




def convert_quaternion_to_euler(quat): def convert_quaternion_to_euler(quat):
"""Convert Quaternion (xyzw) to Euler angles (rpy).""" """Convert Quaternion (xyzw) to Euler angles (rpy)."""
# Normalize # Normalize
quat = quat / np.linalg.norm(quat) quat = quat / np.linalg.norm(quat)
return R.from_quat(quat).as_euler("xyz")
return Rotation.from_quat(quat).as_euler("xyz")




def convert_euler_to_quaternion(euler): def convert_euler_to_quaternion(euler):
"""Convert Euler angles (rpy) to Quaternion (xyzw).""" """Convert Euler angles (rpy) to Quaternion (xyzw)."""
return R.from_euler("xyz", euler).as_quat()
return Rotation.from_euler("xyz", euler).as_quat()




def convert_euler_to_rotation_matrix(euler): def convert_euler_to_rotation_matrix(euler):
"""Convert Euler angles (rpy) to rotation matrix (3x3).""" """Convert Euler angles (rpy) to rotation matrix (3x3)."""
return R.from_euler("xyz", euler).as_matrix()
return Rotation.from_euler("xyz", euler).as_matrix()




def convert_rotation_matrix_to_euler(rotmat): def convert_rotation_matrix_to_euler(rotmat):
"""Convert rotation matrix (3x3) to Euler angles (rpy).""" """Convert rotation matrix (3x3) to Euler angles (rpy)."""
r = R.from_matrix(rotmat)
r = Rotation.from_matrix(rotmat)
return r.as_euler("xyz", degrees=False) return r.as_euler("xyz", degrees=False)






+ 3
- 3
node-hub/dora-argotranslate/dora_argotranslate/main.py View File

@@ -32,15 +32,15 @@ def main():
break break
if event["type"] == "INPUT" and event["id"] == "text": if event["type"] == "INPUT" and event["id"] == "text":
text = event["value"][0].as_py() text = event["value"][0].as_py()
translatedText = argostranslate.translate.translate(
translated_text = argostranslate.translate.translate(
text, text,
from_code, from_code,
to_code, to_code,
) )
print(text, flush=True) print(text, flush=True)
print("translated: " + translatedText, flush=True)
print("translated: " + translated_text, flush=True)
node.send_output( node.send_output(
"text", "text",
pa.array([translatedText]),
pa.array([translated_text]),
{"language": to_code}, {"language": to_code},
) )

+ 5
- 5
node-hub/dora-distil-whisper/dora_distil_whisper/main.py View File

@@ -90,26 +90,26 @@ def load_model():
"""TODO: Add docstring.""" """TODO: Add docstring."""
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline


MODEL_NAME_OR_PATH = os.getenv("MODEL_NAME_OR_PATH", DEFAULT_PATH)
model_name_or_path = os.getenv("MODEL_NAME_OR_PATH", DEFAULT_PATH)


if bool(os.getenv("USE_MODELSCOPE_HUB") in ["True", "true"]): if bool(os.getenv("USE_MODELSCOPE_HUB") in ["True", "true"]):
from modelscope import snapshot_download from modelscope import snapshot_download


if not Path(MODEL_NAME_OR_PATH).exists():
MODEL_NAME_OR_PATH = snapshot_download(MODEL_NAME_OR_PATH)
if not Path(model_name_or_path).exists():
model_name_or_path = snapshot_download(model_name_or_path)


device = "cuda:0" if torch.cuda.is_available() else "cpu" device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32


model = AutoModelForSpeechSeq2Seq.from_pretrained( model = AutoModelForSpeechSeq2Seq.from_pretrained(
MODEL_NAME_OR_PATH,
model_name_or_path,
torch_dtype=torch_dtype, torch_dtype=torch_dtype,
low_cpu_mem_usage=True, low_cpu_mem_usage=True,
use_safetensors=True, use_safetensors=True,
) )
model.to(device) model.to(device)


processor = AutoProcessor.from_pretrained(MODEL_NAME_OR_PATH)
processor = AutoProcessor.from_pretrained(model_name_or_path)
return pipeline( return pipeline(
"automatic-speech-recognition", "automatic-speech-recognition",
model=model, model=model,


+ 7
- 7
node-hub/dora-internvl/dora_internvl/main.py View File

@@ -5,9 +5,9 @@ import os
import numpy as np import numpy as np
import pyarrow as pa import pyarrow as pa
import torch import torch
import torchvision.transforms as T
from dora import Node from dora import Node
from PIL import Image from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode from torchvision.transforms.functional import InterpolationMode
from transformers import AutoModel, AutoTokenizer from transformers import AutoModel, AutoTokenizer


@@ -17,13 +17,13 @@ IMAGENET_STD = (0.229, 0.224, 0.225)


def build_transform(input_size): def build_transform(input_size):
"""TODO: Add docstring.""" """TODO: Add docstring."""
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
return T.Compose(
mean, std = IMAGENET_MEAN, IMAGENET_STD
return transforms.Compose(
[ [
T.Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img),
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean=MEAN, std=STD),
transforms.Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img),
transforms.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
], ],
) )




+ 10
- 10
node-hub/dora-magma/dora_magma/main.py View File

@@ -22,33 +22,33 @@ magma_dir = current_dir.parent / "Magma" / "magma"


def load_magma_models(): def load_magma_models():
"""TODO: Add docstring.""" """TODO: Add docstring."""
DEFAULT_PATH = str(magma_dir.parent / "checkpoints" / "Magma-8B")
if not os.path.exists(DEFAULT_PATH):
DEFAULT_PATH = str(magma_dir.parent)
if not os.path.exists(DEFAULT_PATH):
default_path = str(magma_dir.parent / "checkpoints" / "Magma-8B")
if not os.path.exists(default_path):
default_path = str(magma_dir.parent)
if not os.path.exists(default_path):
logger.warning( logger.warning(
"Warning: Magma submodule not found, falling back to HuggingFace version", "Warning: Magma submodule not found, falling back to HuggingFace version",
) )
DEFAULT_PATH = "microsoft/Magma-8B"
default_path = "microsoft/Magma-8B"


MODEL_NAME_OR_PATH = os.getenv("MODEL_NAME_OR_PATH", DEFAULT_PATH)
logger.info(f"Loading Magma model from: {MODEL_NAME_OR_PATH}")
model_name_or_path = os.getenv("MODEL_NAME_OR_PATH", default_path)
logger.info(f"Loading Magma model from: {model_name_or_path}")


try: try:
model = AutoModelForCausalLM.from_pretrained( model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME_OR_PATH,
model_name_or_path,
trust_remote_code=True, trust_remote_code=True,
torch_dtype=torch.bfloat16, torch_dtype=torch.bfloat16,
device_map="auto", device_map="auto",
) )
processor = AutoProcessor.from_pretrained( processor = AutoProcessor.from_pretrained(
MODEL_NAME_OR_PATH, trust_remote_code=True,
model_name_or_path, trust_remote_code=True,
) )
except Exception as e: except Exception as e:
logger.error(f"Failed to load model: {e}") logger.error(f"Failed to load model: {e}")
raise raise


return model, processor, MODEL_NAME_OR_PATH
return model, processor, model_name_or_path




model, processor, MODEL_NAME_OR_PATH = load_magma_models() model, processor, MODEL_NAME_OR_PATH = load_magma_models()


+ 2
- 2
node-hub/dora-piper/dora_piper/main.py View File

@@ -47,8 +47,8 @@ def enable_fun(piper: C_PiperInterface):
def main(): def main():
"""TODO: Add docstring.""" """TODO: Add docstring."""
elapsed_time = time.time() elapsed_time = time.time()
CAN_BUS = os.getenv("CAN_BUS", "")
piper = C_PiperInterface(CAN_BUS)
can_bus = os.getenv("CAN_BUS", "")
piper = C_PiperInterface(can_bus)
piper.ConnectPort() piper.ConnectPort()


if not TEACH_MODE: if not TEACH_MODE:


+ 8
- 8
node-hub/dora-pyrealsense/dora_pyrealsense/main.py View File

@@ -14,8 +14,8 @@ RUNNER_CI = True if os.getenv("CI") == "true" else False


def main(): def main():
"""TODO: Add docstring.""" """TODO: Add docstring."""
FLIP = os.getenv("FLIP", "")
DEVICE_SERIAL = os.getenv("DEVICE_SERIAL", "")
flip = os.getenv("FLIP", "")
device_serial = os.getenv("DEVICE_SERIAL", "")
image_height = int(os.getenv("IMAGE_HEIGHT", "480")) image_height = int(os.getenv("IMAGE_HEIGHT", "480"))
image_width = int(os.getenv("IMAGE_WIDTH", "640")) image_width = int(os.getenv("IMAGE_WIDTH", "640"))
encoding = os.getenv("ENCODING", "rgb8") encoding = os.getenv("ENCODING", "rgb8")
@@ -26,15 +26,15 @@ def main():


# Serial list # Serial list
serials = [device.get_info(rs.camera_info.serial_number) for device in devices] serials = [device.get_info(rs.camera_info.serial_number) for device in devices]
if DEVICE_SERIAL and (DEVICE_SERIAL in serials):
if device_serial and (device_serial in serials):
raise ConnectionError( raise ConnectionError(
f"Device with serial {DEVICE_SERIAL} not found within: {serials}.",
f"Device with serial {device_serial} not found within: {serials}.",
) )


pipeline = rs.pipeline() pipeline = rs.pipeline()


config = rs.config() config = rs.config()
config.enable_device(DEVICE_SERIAL)
config.enable_device(device_serial)
config.enable_stream(rs.stream.color, image_width, image_height, rs.format.rgb8, 30) config.enable_stream(rs.stream.color, image_width, image_height, rs.format.rgb8, 30)
config.enable_stream(rs.stream.depth, image_width, image_height) config.enable_stream(rs.stream.depth, image_width, image_height)


@@ -79,11 +79,11 @@ def main():


## Change rgb to bgr ## Change rgb to bgr


if FLIP == "VERTICAL":
if flip == "VERTICAL":
frame = cv2.flip(frame, 0) frame = cv2.flip(frame, 0)
elif FLIP == "HORIZONTAL":
elif flip == "HORIZONTAL":
frame = cv2.flip(frame, 1) frame = cv2.flip(frame, 1)
elif FLIP == "BOTH":
elif flip == "BOTH":
frame = cv2.flip(frame, -1) frame = cv2.flip(frame, -1)


# resize the frame # resize the frame


+ 8
- 8
node-hub/dora-rdt-1b/dora_rdt_1b/main.py View File

@@ -81,7 +81,7 @@ def get_language_embeddings():


return lang_embeddings.unsqueeze( return lang_embeddings.unsqueeze(
0, 0,
) # Size: (B, L_lang, D) or None, language condition tokens (variable length), dimension D is assumed to be the same as the hidden size.
) # Size: (b, L_lang, D) or None, language condition tokens (variable length), dimension D is assumed to be the same as the hidden size.




def expand2square(pil_img, background_color): def expand2square(pil_img, background_color):
@@ -144,7 +144,7 @@ def process_image(rgbs_lst, image_processor, vision_encoder):
def get_states(proprio): def get_states(proprio):
# suppose you control in 7DOF joint position # suppose you control in 7DOF joint position
"""TODO: Add docstring.""" """TODO: Add docstring."""
STATE_INDICES = [
state_indices = [
STATE_VEC_IDX_MAPPING["left_arm_joint_0_pos"], STATE_VEC_IDX_MAPPING["left_arm_joint_0_pos"],
STATE_VEC_IDX_MAPPING["left_arm_joint_1_pos"], STATE_VEC_IDX_MAPPING["left_arm_joint_1_pos"],
STATE_VEC_IDX_MAPPING["left_arm_joint_2_pos"], STATE_VEC_IDX_MAPPING["left_arm_joint_2_pos"],
@@ -161,32 +161,32 @@ def get_states(proprio):
STATE_VEC_IDX_MAPPING["right_gripper_open"], STATE_VEC_IDX_MAPPING["right_gripper_open"],
] ]


B, N = 1, 1 # batch size and state history size
b, n = 1, 1 # batch size and state history size
states = torch.zeros( states = torch.zeros(
(B, N, config["model"]["state_token_dim"]), device=DEVICE, dtype=DTYPE,
(b, n, config["model"]["state_token_dim"]), device=DEVICE, dtype=DTYPE,
) )
# suppose you do not have proprio # suppose you do not have proprio
# it's kind of tricky, I strongly suggest adding proprio as input and further fine-tuning # it's kind of tricky, I strongly suggest adding proprio as input and further fine-tuning
proprio = torch.tensor(proprio, device=DEVICE, dtype=DTYPE).reshape( proprio = torch.tensor(proprio, device=DEVICE, dtype=DTYPE).reshape(
(1, 1, -1), (1, 1, -1),
) # B, N = 1, 1 # batch size and state history size
) # b, n = 1, 1 # batch size and state history size


# if you have proprio, you can do like this # if you have proprio, you can do like this
# format like this: [arm_joint_0_pos, arm_joint_1_pos, arm_joint_2_pos, arm_joint_3_pos, arm_joint_4_pos, arm_joint_5_pos, arm_joint_6_pos, gripper_open] # format like this: [arm_joint_0_pos, arm_joint_1_pos, arm_joint_2_pos, arm_joint_3_pos, arm_joint_4_pos, arm_joint_5_pos, arm_joint_6_pos, gripper_open]
# proprio = torch.tensor([0, 1, 2, 3, 4, 5, 6, 0.5]).reshape((1, 1, -1)) # proprio = torch.tensor([0, 1, 2, 3, 4, 5, 6, 0.5]).reshape((1, 1, -1))
states[:, :, STATE_INDICES] = proprio
states[:, :, state_indices] = proprio


state_elem_mask = torch.zeros( state_elem_mask = torch.zeros(
(1, config["model"]["state_token_dim"]), device=DEVICE, dtype=torch.bool, (1, config["model"]["state_token_dim"]), device=DEVICE, dtype=torch.bool,
) )


state_elem_mask[:, STATE_INDICES] = True
state_elem_mask[:, state_indices] = True
states, state_elem_mask = ( states, state_elem_mask = (
states.to(DEVICE, dtype=DTYPE), states.to(DEVICE, dtype=DTYPE),
state_elem_mask.to(DEVICE, dtype=DTYPE), state_elem_mask.to(DEVICE, dtype=DTYPE),
) )
states = states[:, -1:, :] # only use the last state states = states[:, -1:, :] # only use the last state
return states, state_elem_mask, STATE_INDICES
return states, state_elem_mask, state_indices




def main(): def main():


+ 11
- 11
node-hub/dora-rdt-1b/tests/test_dora_rdt_1b.py View File

@@ -172,9 +172,9 @@ def test_dummy_states():


# suppose you do not have proprio # suppose you do not have proprio
# it's kind of tricky, I strongly suggest adding proprio as input and further fine-tuning # it's kind of tricky, I strongly suggest adding proprio as input and further fine-tuning
B, N = 1, 1 # batch size and state history size
b, n = 1, 1 # batch size and state history size
states = torch.zeros( states = torch.zeros(
(B, N, config["model"]["state_token_dim"]),
(b, n, config["model"]["state_token_dim"]),
device=DEVICE, device=DEVICE,
dtype=DTYPE, dtype=DTYPE,
) )
@@ -182,10 +182,10 @@ def test_dummy_states():
# if you have proprio, you can do like this # if you have proprio, you can do like this
# format like this: [arm_joint_0_pos, arm_joint_1_pos, arm_joint_2_pos, arm_joint_3_pos, arm_joint_4_pos, arm_joint_5_pos, arm_joint_6_pos, gripper_open] # format like this: [arm_joint_0_pos, arm_joint_1_pos, arm_joint_2_pos, arm_joint_3_pos, arm_joint_4_pos, arm_joint_5_pos, arm_joint_6_pos, gripper_open]
# proprio = torch.tensor([0, 1, 2, 3, 4, 5, 6, 0.5]).reshape((1, 1, -1)) # proprio = torch.tensor([0, 1, 2, 3, 4, 5, 6, 0.5]).reshape((1, 1, -1))
# states[:, :, STATE_INDICES] = proprio
# states[:, :, state_indices] = proprio


state_elem_mask = torch.zeros( state_elem_mask = torch.zeros(
(B, config["model"]["state_token_dim"]),
(b, config["model"]["state_token_dim"]),
device=DEVICE, device=DEVICE,
dtype=torch.bool, dtype=torch.bool,
) )
@@ -194,7 +194,7 @@ def test_dummy_states():
) )


# suppose you control in 7DOF joint position # suppose you control in 7DOF joint position
STATE_INDICES = [
state_indices = [
STATE_VEC_IDX_MAPPING["arm_joint_0_pos"], STATE_VEC_IDX_MAPPING["arm_joint_0_pos"],
STATE_VEC_IDX_MAPPING["arm_joint_1_pos"], STATE_VEC_IDX_MAPPING["arm_joint_1_pos"],
STATE_VEC_IDX_MAPPING["arm_joint_2_pos"], STATE_VEC_IDX_MAPPING["arm_joint_2_pos"],
@@ -205,7 +205,7 @@ def test_dummy_states():
STATE_VEC_IDX_MAPPING["gripper_open"], STATE_VEC_IDX_MAPPING["gripper_open"],
] ]


state_elem_mask[:, STATE_INDICES] = True
state_elem_mask[:, state_indices] = True
states, state_elem_mask = ( states, state_elem_mask = (
states.to(DEVICE, dtype=DTYPE), states.to(DEVICE, dtype=DTYPE),
state_elem_mask.to(DEVICE, dtype=DTYPE), state_elem_mask.to(DEVICE, dtype=DTYPE),
@@ -213,7 +213,7 @@ def test_dummy_states():
states = states[:, -1:, :] # only use the last state states = states[:, -1:, :] # only use the last state
pytest.states = states pytest.states = states
pytest.state_elem_mask = state_elem_mask pytest.state_elem_mask = state_elem_mask
pytest.STATE_INDICES = STATE_INDICES
pytest.state_indices = state_indices




def test_dummy_input(): def test_dummy_input():
@@ -227,7 +227,7 @@ def test_dummy_input():
image_embeds = pytest.image_embeds image_embeds = pytest.image_embeds
state_elem_mask = pytest.state_elem_mask state_elem_mask = pytest.state_elem_mask
states = pytest.states states = pytest.states
STATE_INDICES = pytest.STATE_INDICES
state_indices = pytest.state_indices


actions = rdt.predict_action( actions = rdt.predict_action(
lang_tokens=lang_embeddings, lang_tokens=lang_embeddings,
@@ -242,10 +242,10 @@ def test_dummy_input():
ctrl_freqs=torch.tensor([25.0], device=DEVICE), # would this default work? ctrl_freqs=torch.tensor([25.0], device=DEVICE), # would this default work?
) # (1, chunk_size, 128) ) # (1, chunk_size, 128)


# select the meaning action via STATE_INDICES
# select the meaning action via state_indices
action = actions[ action = actions[
:, :,
:, :,
STATE_INDICES,
] # (1, chunk_size, len(STATE_INDICES)) = (1, chunk_size, 7+ 1)
state_indices,
] # (1, chunk_size, len(state_indices)) = (1, chunk_size, 7+ 1)
print(action) print(action)

+ 4
- 4
node-hub/dora-reachy1/dora_reachy1/main.py View File

@@ -10,7 +10,7 @@ from reachy_sdk.trajectory import goto


def r_arm_inverse_kinematics(reachy, pose, action) -> list: def r_arm_inverse_kinematics(reachy, pose, action) -> list:
"""TODO: Add docstring.""" """TODO: Add docstring."""
A = np.array(
a = np.array(
[ [
[0, 0, -1, pose[0] + action[0]], [0, 0, -1, pose[0] + action[0]],
[0, 1, 0, pose[1] + action[1]], [0, 1, 0, pose[1] + action[1]],
@@ -18,7 +18,7 @@ def r_arm_inverse_kinematics(reachy, pose, action) -> list:
[0, 0, 0, 1], [0, 0, 0, 1],
], ],
) )
return reachy.r_arm.inverse_kinematics(A)
return reachy.r_arm.inverse_kinematics(a)




def happy_antennas(reachy): def happy_antennas(reachy):
@@ -54,9 +54,9 @@ def main():
"""TODO: Add docstring.""" """TODO: Add docstring."""
node = Node() node = Node()


ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.24")
robot_ip = os.getenv("ROBOT_IP", "10.42.0.24")


reachy = ReachySDK(ROBOT_IP, with_mobile_base=False)
reachy = ReachySDK(robot_ip, with_mobile_base=False)
reachy.turn_on("r_arm") reachy.turn_on("r_arm")
reachy.turn_on("head") reachy.turn_on("head")




+ 4
- 4
node-hub/dora-reachy1/dora_reachy1_vision/main.py View File

@@ -11,14 +11,14 @@ def main():
"""TODO: Add docstring.""" """TODO: Add docstring."""
node = Node() node = Node()


ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.24")
CAMERA = os.getenv("CAMERA", "right")
robot_ip = os.getenv("ROBOT_IP", "10.42.0.24")
camera = os.getenv("CAMERA", "right")


reachy = ReachySDK(ROBOT_IP, with_mobile_base=False)
reachy = ReachySDK(robot_ip, with_mobile_base=False)


for event in node: for event in node:
if event["type"] == "INPUT": if event["type"] == "INPUT":
if CAMERA == "right":
if camera == "right":
frame = reachy.right_camera.last_frame frame = reachy.right_camera.last_frame
else: else:
frame = reachy.left_camera.last_frame frame = reachy.left_camera.last_frame


+ 5
- 5
node-hub/dora-reachy2/dora_reachy2/camera.py View File

@@ -11,10 +11,10 @@ from reachy2_sdk.media.camera import CameraView


def main(): def main():
"""TODO: Add docstring.""" """TODO: Add docstring."""
ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.80")
robot_ip = os.getenv("ROBOT_IP", "10.42.0.80")


for _ in range(10): for _ in range(10):
reachy = ReachySDK(ROBOT_IP)
reachy = ReachySDK(robot_ip)
try: try:
reachy.cameras.teleop.get_frame(view=CameraView.LEFT) reachy.cameras.teleop.get_frame(view=CameraView.LEFT)
params = reachy.cameras.depth.get_parameters(view=CameraView.DEPTH) params = reachy.cameras.depth.get_parameters(view=CameraView.DEPTH)
@@ -28,7 +28,7 @@ def main():


reachy.cameras.teleop.get_frame(view=CameraView.LEFT) reachy.cameras.teleop.get_frame(view=CameraView.LEFT)
params = reachy.cameras.depth.get_parameters(view=CameraView.DEPTH) params = reachy.cameras.depth.get_parameters(view=CameraView.DEPTH)
height, width, _distortion_model, _D, K, _R, _P = params
height, width, _distortion_model, _d, k, _r, _p = params


node = Node() node = Node()


@@ -90,8 +90,8 @@ def main():
metadata={ metadata={
"width": width, "width": width,
"height": height, "height": height,
"focal": [int(K[0, 0]), int(K[1, 1])],
"resolution": [int(K[0, 2]), int(K[1, 2])],
"focal": [int(k[0, 0]), int(k[1, 1])],
"resolution": [int(k[0, 2]), int(k[1, 2])],
}, },
) )




+ 6
- 6
node-hub/dora-reachy2/dora_reachy2/head.py View File

@@ -9,17 +9,17 @@ from reachy2_sdk import ReachySDK


def main(): def main():
"""TODO: Add docstring.""" """TODO: Add docstring."""
ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.80")
robot_ip = os.getenv("ROBOT_IP", "10.42.0.80")


for _i in range(5): for _i in range(5):
reachy = ReachySDK(ROBOT_IP)
reachy = ReachySDK(robot_ip)


if reachy.head is not None: if reachy.head is not None:
reachy.head.turn_on() reachy.head.turn_on()
reachy.head.goto([0, 0, 0]) reachy.head.goto([0, 0, 0])
break break
FOV_H = 107
FOV_V = 91
fov_h = 107
fov_v = 91
resolution = [720, 960] resolution = [720, 960]


roll, _pitch, yaw = reachy.head.get_current_positions() roll, _pitch, yaw = reachy.head.get_current_positions()
@@ -37,8 +37,8 @@ def main():
x_min + x_max x_min + x_max
) / 2 - 10 # Deviate a bit to take into account the off centered camera ) / 2 - 10 # Deviate a bit to take into account the off centered camera
y = (3 * y_min + y_max) / 4 y = (3 * y_min + y_max) / 4
ry = (x - resolution[1] / 2) * FOV_H / 2 / resolution[1]
rz = (y - resolution[0] / 2) * FOV_V / 2 / resolution[0]
ry = (x - resolution[1] / 2) * fov_h / 2 / resolution[1]
rz = (y - resolution[0] / 2) * fov_v / 2 / resolution[0]
if np.abs(yaw) > 45 and yaw * -ry > 0: if np.abs(yaw) > 45 and yaw * -ry > 0:
reachy.head.cancel_all_goto() reachy.head.cancel_all_goto()
roll, _pitch, yaw = reachy.head.get_current_positions() roll, _pitch, yaw = reachy.head.get_current_positions()


+ 3
- 3
node-hub/dora-reachy2/dora_reachy2/left_arm.py View File

@@ -7,7 +7,7 @@ import numpy as np
import pyarrow as pa import pyarrow as pa
from dora import Node from dora import Node
from reachy2_sdk import ReachySDK from reachy2_sdk import ReachySDK
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Rotation


ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.80") ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.80")


@@ -26,7 +26,7 @@ l_default_pose = [
def l_arm_go_to_mixed_angles(reachy, x, y, z): def l_arm_go_to_mixed_angles(reachy, x, y, z):
"""TODO: Add docstring.""" """TODO: Add docstring."""
for theta in range(-80, -60, 10): for theta in range(-80, -60, 10):
r = R.from_euler("zyx", [0, theta, 0], degrees=True)
r = Rotation.from_euler("zyx", [0, theta, 0], degrees=True)
transform = np.eye(4) transform = np.eye(4)
transform[:3, :3] = r.as_matrix() transform[:3, :3] = r.as_matrix()
transform[:3, 3] = [x, y, z] transform[:3, 3] = [x, y, z]
@@ -41,7 +41,7 @@ def l_arm_go_to_mixed_angles(reachy, x, y, z):


## First try turning left ## First try turning left
pitch = -90 pitch = -90
r = R.from_euler("ZYX", (-yaw, 0, 0), degrees=True) * R.from_euler(
r = Rotation.from_euler("ZYX", (-yaw, 0, 0), degrees=True) * Rotation.from_euler(
"ZYX", "ZYX",
(0, pitch, 0), (0, pitch, 0),
degrees=True, degrees=True,


+ 2
- 2
node-hub/dora-reachy2/dora_reachy2/mobile_base.py View File

@@ -11,9 +11,9 @@ from reachy2_sdk import ReachySDK


def main(): def main():
"""TODO: Add docstring.""" """TODO: Add docstring."""
ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.80")
robot_ip = os.getenv("ROBOT_IP", "10.42.0.80")


reachy = ReachySDK(ROBOT_IP)
reachy = ReachySDK(robot_ip)


if reachy.mobile_base is not None: if reachy.mobile_base is not None:
reachy.mobile_base.turn_on() reachy.mobile_base.turn_on()


+ 3
- 3
node-hub/dora-reachy2/dora_reachy2/right_arm.py View File

@@ -7,7 +7,7 @@ import numpy as np
import pyarrow as pa import pyarrow as pa
from dora import Node from dora import Node
from reachy2_sdk import ReachySDK from reachy2_sdk import ReachySDK
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Rotation


ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.80") ROBOT_IP = os.getenv("ROBOT_IP", "10.42.0.80")


@@ -25,7 +25,7 @@ r_default_pose = [
def r_arm_go_to_mixed_angles(reachy, x, y, z): def r_arm_go_to_mixed_angles(reachy, x, y, z):
"""TODO: Add docstring.""" """TODO: Add docstring."""
for theta in range(-80, -60, 10): for theta in range(-80, -60, 10):
r = R.from_euler("zyx", [0, theta, 0], degrees=True)
r = Rotation.from_euler("zyx", [0, theta, 0], degrees=True)
transform = np.eye(4) transform = np.eye(4)
transform[:3, :3] = r.as_matrix() transform[:3, :3] = r.as_matrix()
transform[:3, 3] = [x, y, z] transform[:3, 3] = [x, y, z]
@@ -40,7 +40,7 @@ def r_arm_go_to_mixed_angles(reachy, x, y, z):


## First try turning left ## First try turning left
pitch = -90 pitch = -90
r = R.from_euler("ZYX", (yaw, 0, 0), degrees=True) * R.from_euler(
r = Rotation.from_euler("ZYX", (yaw, 0, 0), degrees=True) * Rotation.from_euler(
"ZYX", "ZYX",
(0, pitch, 0), (0, pitch, 0),
degrees=True, degrees=True,


Loading…
Cancel
Save