Browse Source

Add ios lidar (#762)

This PR add the possibility to work with IOS lidar as well as texture
rgb-d image within rerun

<img width="1800" alt="Screenshot 2025-01-22 at 09 20 50"
src="https://github.com/user-attachments/assets/97ec6efe-db79-437b-9b4b-6f254da38333"
/>
tags/v0.3.9-rc1
Haixuan Xavier Tao GitHub 1 year ago
parent
commit
18d26d137c
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
12 changed files with 288 additions and 63 deletions
  1. +16
    -0
      examples/depth_camera/ios-dev.yaml
  2. +16
    -0
      examples/depth_camera/ios.yaml
  3. +2
    -5
      examples/depth_camera/realsense-dev.yaml
  4. +16
    -0
      examples/depth_camera/realsense.yaml
  5. +0
    -20
      examples/realsense/camera.yaml
  6. +40
    -0
      node-hub/dora-ios-lidar/README.md
  7. +11
    -0
      node-hub/dora-ios-lidar/dora_ios_lidar/__init__.py
  8. +5
    -0
      node-hub/dora-ios-lidar/dora_ios_lidar/__main__.py
  9. +109
    -0
      node-hub/dora-ios-lidar/dora_ios_lidar/main.py
  10. +20
    -0
      node-hub/dora-ios-lidar/pyproject.toml
  11. +9
    -0
      node-hub/dora-ios-lidar/tests/test_dora_ios_lidar.py
  12. +44
    -38
      node-hub/dora-rerun/src/lib.rs

+ 16
- 0
examples/depth_camera/ios-dev.yaml View File

@@ -0,0 +1,16 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/dora-ios-lidar
path: dora-ios-lidar
inputs:
tick: dora/timer/millis/20
outputs:
- image
- depth

- id: plot
build: pip install -e ../../node-hub/dora-rerun
path: dora-rerun
inputs:
image: camera/image
depth: camera/depth

+ 16
- 0
examples/depth_camera/ios.yaml View File

@@ -0,0 +1,16 @@
nodes:
- id: camera
build: pip install dora-ios-lidar
path: dora-ios-lidar
inputs:
tick: dora/timer/millis/20
outputs:
- image
- depth

- id: plot
build: pip install dora-rerun
path: dora-rerun
inputs:
image: camera/image
depth: camera/depth

examples/realsense/camera_rerun.yaml → examples/depth_camera/realsense-dev.yaml View File

@@ -1,15 +1,13 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/dora-pyrealsense
path: dora-pyrealsense
path: sudo
args: dora-pyrealsense
inputs:
tick: dora/timer/millis/20
outputs:
- image
- depth
env:
IMAGE_WIDTH: 640
IMAGE_HEIGHT: 480

- id: plot
build: pip install -e ../../node-hub/dora-rerun
@@ -17,4 +15,3 @@ nodes:
inputs:
image: camera/image
world/camera/depth: camera/depth

+ 16
- 0
examples/depth_camera/realsense.yaml View File

@@ -0,0 +1,16 @@
nodes:
- id: camera
build: pip install dora-pyrealsense
path: dora-pyrealsense
inputs:
tick: dora/timer/millis/20
outputs:
- image
- depth

- id: plot
build: pip install dora-rerun
path: dora-rerun
inputs:
image: camera/image
depth: camera/depth

+ 0
- 20
examples/realsense/camera.yaml View File

@@ -1,20 +0,0 @@
nodes:
- id: camera
build: pip install -e ../../node-hub/dora-pyrealsense
path: dora-pyrealsense
inputs:
tick: dora/timer/millis/20
outputs:
- image
- depth
env:
IMAGE_WIDTH: 640
IMAGE_HEIGHT: 480

- id: plot
build: pip install ../../node-hub/opencv-plot
path: opencv-plot
inputs:
image:
source: camera/image
queue_size: 1

+ 40
- 0
node-hub/dora-ios-lidar/README.md View File

@@ -0,0 +1,40 @@
# dora-ios-lidar

## Getting started

- Install it with pip:

```bash
pip install -e .
```

- Make sure to install `record3d` app on your IOS device.
- Buy the USB connection package to start streaming on USB.

## Contribution Guide

- Format with [ruff](https://docs.astral.sh/ruff/):

```bash
ruff check . --fix
```

- Lint with ruff:

```bash
ruff check .
```

- Test with [pytest](https://github.com/pytest-dev/pytest)

```bash
pytest . # Test
```

## YAML Specification

## Examples

## License

dora-ios-lidar's code are released under the MIT License

+ 11
- 0
node-hub/dora-ios-lidar/dora_ios_lidar/__init__.py View File

@@ -0,0 +1,11 @@
import os

# Define the path to the README file relative to the package directory
readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.md")

# Read the content of the README file
try:
with open(readme_path, "r", encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."

+ 5
- 0
node-hub/dora-ios-lidar/dora_ios_lidar/__main__.py View File

@@ -0,0 +1,5 @@
from .main import main


if __name__ == "__main__":
main()

+ 109
- 0
node-hub/dora-ios-lidar/dora_ios_lidar/main.py View File

@@ -0,0 +1,109 @@
from threading import Event

import cv2
import numpy as np
import pyarrow as pa
from dora import Node
from record3d import Record3DStream


class DemoApp:
def __init__(self):
self.event = Event()
self.session = None
self.DEVICE_TYPE__TRUEDEPTH = 0
self.DEVICE_TYPE__LIDAR = 1
self.stop = False

def on_new_frame(self):
"""
This method is called from non-main thread, therefore cannot be used for presenting UI.
"""
self.event.set() # Notify the main thread to stop waiting and process new frame.

def on_stream_stopped(self):
self.stop = True
print("Stream stopped")

def connect_to_device(self, dev_idx):
print("Searching for devices")
devs = Record3DStream.get_connected_devices()
print("{} device(s) found".format(len(devs)))
for dev in devs:
print("\tID: {}\n".format(dev.product_id))

if len(devs) <= dev_idx:
raise RuntimeError(
"Cannot connect to device #{}, try different index.".format(dev_idx)
)

dev = devs[dev_idx]
self.session = Record3DStream()
self.session.on_new_frame = self.on_new_frame
self.session.on_stream_stopped = self.on_stream_stopped
self.session.connect(dev) # Initiate connection and start capturing

def get_intrinsic_mat_from_coeffs(self, coeffs):
return np.array(
[[coeffs.fx, 0, coeffs.tx], [0, coeffs.fy, coeffs.ty], [0, 0, 1]]
)

def start_processing_stream(self):
node = Node()

for event in node:
if self.stop:
break
if event["type"] == "INPUT":
if event["id"] == "TICK":
self.event.wait() # Wait for new frame to arrive

# Copy the newly arrived RGBD frame
depth = self.session.get_depth_frame()
rgb = self.session.get_rgb_frame()
intrinsic_mat = self.get_intrinsic_mat_from_coeffs(
self.session.get_intrinsic_mat()
)

if depth.shape != rgb.shape:
rgb = cv2.resize(rgb, (depth.shape[1], depth.shape[0]))

node.send_output(
"image",
pa.array(rgb.ravel()),
metadata={
"encoding": "rgb8",
"width": rgb.shape[1],
"height": rgb.shape[0],
},
)

node.send_output(
"depth",
pa.array(depth.ravel().astype(np.float64())),
metadata={
"width": depth.shape[1],
"height": depth.shape[0],
"encoding": "CV_64F",
"focal": [
int(intrinsic_mat[0, 0]),
int(intrinsic_mat[1, 1]),
],
"resolution": [
int(intrinsic_mat[0, 2]),
int(intrinsic_mat[1, 2]),
],
},
)

self.event.clear()


def main():
app = DemoApp()
app.connect_to_device(dev_idx=0)
app.start_processing_stream()


if __name__ == "__main__":
main()

+ 20
- 0
node-hub/dora-ios-lidar/pyproject.toml View File

@@ -0,0 +1,20 @@
[project]
name = "dora-ios-lidar"
version = "0.0.0"
authors = [{ name = "Your Name", email = "email@email.com" }]
description = "dora-ios-lidar"
license = { text = "MIT" }
readme = "README.md"
requires-python = ">=3.8"

dependencies = [
"dora-rs >= 0.3.6",
"opencv-python>=4.11.0.86",
"record3d>=1.4",
]

[dependency-groups]
dev = ["pytest >=8.1.1", "ruff >=0.9.1"]

[project.scripts]
dora-ios-lidar = "dora_ios_lidar.main:main"

+ 9
- 0
node-hub/dora-ios-lidar/tests/test_dora_ios_lidar.py View File

@@ -0,0 +1,9 @@
import pytest


def test_import_main():
from dora_ios_lidar.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.
with pytest.raises(RuntimeError):
main()

+ 44
- 38
node-hub/dora-rerun/src/lib.rs View File

@@ -1,6 +1,6 @@
//! Demonstrates the most barebone usage of the Rerun SDK.

use std::env::VarError;
use std::{collections::HashMap, env::VarError};

use dora_node_api::{
arrow::{
@@ -12,7 +12,8 @@ use dora_node_api::{
use eyre::{eyre, Context, ContextCompat, Result};

use rerun::{
components::ImageBuffer, external::re_types::ArrowBuffer, ImageFormat, SpawnOptions, Text,
components::ImageBuffer, external::re_types::ArrowBuffer, ImageFormat, Points3D, SpawnOptions,
Text,
};
pub mod series;
pub mod urdf;
@@ -41,6 +42,9 @@ pub fn lib_main() -> Result<()> {

options.memory_limit = memory_limit;

// Setup an image cache to paint depth images.
let mut image_cache = HashMap::new();

let rec = rerun::RecordingStreamBuilder::new("dora-rerun")
.spawn_opts(&options, None)
.context("Could not spawn rerun visualization")?;
@@ -94,6 +98,7 @@ pub fn lib_main() -> Result<()> {
// Transpose values from BGR to RGB
let buffer: Vec<u8> =
buffer.chunks(3).flat_map(|x| [x[2], x[1], x[0]]).collect();
image_cache.insert(id.clone(), buffer.clone());
let buffer = ArrowBuffer::from(buffer);
let image_buffer = ImageBuffer::try_from(buffer)
.context("Could not convert buffer to image buffer")?;
@@ -107,6 +112,7 @@ pub fn lib_main() -> Result<()> {
.context("could not log image")?;
} else if encoding == "rgb8" {
let buffer: &UInt8Array = data.as_any().downcast_ref().unwrap();
image_cache.insert(id.clone(), buffer.values().to_vec());
let buffer: &[u8] = buffer.values();
let buffer = ArrowBuffer::from(buffer);
let image_buffer = ImageBuffer::try_from(buffer)
@@ -127,49 +133,49 @@ pub fn lib_main() -> Result<()> {
.context("could not log image")?;
};
} else if id.as_str().contains("depth") {
let height =
if let Some(Parameter::Integer(height)) = metadata.parameters.get("height") {
height
} else {
&480
};
let width =
if let Some(Parameter::Integer(width)) = metadata.parameters.get("width") {
width
} else {
&640
};
// let focal_length = if let Some(Parameter::ListInt(focals)) =
// metadata.parameters.get("focal_length")
// {
// focals
// } else {
// &vec![605, 605]
// };
// let resolutions = if let Some(Parameter::ListInt(resolutions)) =
// metadata.parameters.get("resolutions")
// {
// resolutions
// } else {
// &vec![640, 480]
// };
let focal_length =
if let Some(Parameter::ListInt(focals)) = metadata.parameters.get("focal") {
focals.to_vec()
} else {
vec![605, 605]
};
let resolution = if let Some(Parameter::ListInt(resolution)) =
metadata.parameters.get("resolution")
{
resolution.to_vec()
} else {
vec![640, 480]
};
let buffer: &Float64Array = data.as_any().downcast_ref().unwrap();
let buffer: &[u8] = bytemuck::cast_slice(buffer.values());
let image_buffer = ImageBuffer::try_from(buffer.to_vec())
.context("Could not convert buffer to image buffer")?;
let image_format =
ImageFormat::depth([*width as _, *height as _], rerun::ChannelDatatype::F64);

let depth_image = rerun::DepthImage::new(image_buffer, image_format)
.with_colormap(rerun::components::Colormap::Inferno);

rec.log(
id.as_str().replace("/depth", ""),
&rerun::Pinhole::from_focal_length_and_resolution(&[605., 605.], &[640., 480.]),
)?;

// If we log a pinhole camera model, the depth gets automatically back-projected to 3D
rec.log(id.as_str(), &depth_image)?;
let points_3d = buffer.iter().enumerate().map(|(i, z)| {
let u = i as f32 % *width as f32; // Calculate x-coordinate (u)
let v = i as f32 / *width as f32; // Calculate y-coordinate (v)
let z = z.unwrap_or_default() as f32;

(
(u - resolution[0] as f32) * z / focal_length[0] as f32,
(v - resolution[1] as f32) * z / focal_length[1] as f32,
z,
)
});
let points_3d = Points3D::new(points_3d);
if let Some(color_buffer) = image_cache.get(&id.replace("depth", "image")) {
let colors = color_buffer
.chunks(3)
.map(|x| rerun::Color::from_rgb(x[0], x[1], x[2]))
.collect::<Vec<_>>();
rec.log(id.as_str(), &points_3d.with_colors(colors))
.context("could not log points")?;
} else {
rec.log(id.as_str(), &points_3d)
.context("could not log points")?;
}
} else if id.as_str().contains("text") {
let buffer: StringArray = data.to_data().into();
buffer.iter().try_for_each(|string| -> Result<()> {


Loading…
Cancel
Save