diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/.gitignore b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/.gitignore new file mode 100644 index 0000000..9244879 --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/.gitignore @@ -0,0 +1,209 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[codz] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py.cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock +#poetry.toml + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python. +# https://pdm-project.org/en/latest/usage/project/#working-with-version-control +#pdm.lock +#pdm.toml +.pdm-python +.pdm-build/ + +# pixi +# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control. +#pixi.lock +# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one +# in the .venv directory. It is recommended not to include this directory in version control. +.pixi + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.envrc +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Abstra +# Abstra is an AI-powered process automation framework. +# Ignore directories containing user credentials, local state, and settings. +# Learn more at https://abstra.io/docs +.abstra/ + +# Visual Studio Code +# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore +# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore +# and can be added to the global gitignore or merged into this file. However, if you prefer, +# you could uncomment the following to ignore the entire vscode folder +# .vscode/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# Cursor +# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to +# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data +# refer to https://docs.cursor.com/context/ignore-files +.cursorignore +.cursorindexingignore + +# Marimo +marimo/_static/ +marimo/_lsp/ +__marimo__/ + +**out/ diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/README.md b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/README.md new file mode 100644 index 0000000..f108250 --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/README.md @@ -0,0 +1,37 @@ +# robodriver-robot-deeprobotics-x30-ros1 + +## 1. Get Start / 快速开始 + +### 1.1 Install the project to RoboDriver / 安装到 RoboDriver 环境 + +Make sure you are in the `robodriver` conda env. +确保你已经进入 `robodriver` 的 conda 环境: + +```bash +conda activate robodriver +pip install -e . +``` + +## 2. Start Collecting / 开始采集数据 + +### 2.1 Activate environment / 激活环境 + +```bash +conda activate robodriver +``` + +### 2.2 在云深处X30机器狗开发板上运行ros1_to_zmq.py + +```bash +python sdk_to_zmq.py +``` + +### 2.4 Launch RoboXStudio / 启动 RoboXStudio + +In your RoboDriver project directory: +在你的 RoboDriver 工程目录中: + +```bash +cd /path/to/your/RoboDriver +python -m robodriver.scripts.run --robot.type=deeprobotics_x30_ros1 +``` diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/lerobot_robot_deeprobotics_x30_ros1/__init__.py b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/lerobot_robot_deeprobotics_x30_ros1/__init__.py new file mode 100644 index 0000000..c16fdc2 --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/lerobot_robot_deeprobotics_x30_ros1/__init__.py @@ -0,0 +1 @@ +from robodriver_robot_deeprobotics_x30_ros1 import * diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/pyproject.toml b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/pyproject.toml new file mode 100644 index 0000000..ec7da63 --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/pyproject.toml @@ -0,0 +1,22 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "robodriver_robot_deeprobotics_x30_ros1" +version = "0.1.0" +readme = "README.md" +requires-python = ">=3.10" +license = { text = "Apache-2.0" } +keywords = ["robotics", "lerobot", "deeprobotics_x30_ros1"] + +dependencies = [ + "logging_mp", + "numpy", + "pyzmq", + "opencv-python", +] + +[tool.setuptools.packages.find] +include = ["robodriver_robot_deeprobotics_x30_ros1"] + diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/__init__.py b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/__init__.py new file mode 100644 index 0000000..d2df119 --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/__init__.py @@ -0,0 +1,2 @@ +from .robot import DeeproboticsX30Ros1Robot +from .config import DeeproboticsX30Ros1RobotConfig diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/calibrate.py b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/calibrate.py new file mode 100644 index 0000000..dd513ce --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/calibrate.py @@ -0,0 +1,2 @@ +def calibrate(): + pass diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/config.py b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/config.py new file mode 100644 index 0000000..37e36b1 --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/config.py @@ -0,0 +1,74 @@ +from typing import Dict +from dataclasses import dataclass, field + +from lerobot.robots.config import RobotConfig +from lerobot.cameras import CameraConfig +from lerobot.cameras.opencv import OpenCVCameraConfig +from lerobot.motors import Motor, MotorNormMode + + +@RobotConfig.register_subclass("deeprobotics_x30_ros1") +@dataclass +class DeeproboticsX30Ros1RobotConfig(RobotConfig): + use_degrees = True + norm_mode_body = ( + MotorNormMode.DEGREES if use_degrees else MotorNormMode.RANGE_M100_100 + ) + + # 按组件分组:{ comp_id: { joint_name: Motor, ... }, ... } + leader_motors: Dict[str, Dict[str, Motor]] = field( + default_factory=lambda norm_mode_body=norm_mode_body: { + "leader_joints": { + # 前左腿 (front left) + "fl_hipx": Motor(1, "robot_motor", norm_mode_body), + "fl_hipy": Motor(2, "robot_motor", norm_mode_body), + "fl_knee": Motor(3, "robot_motor", norm_mode_body), + # 前右腿 (front right) + "fr_hipx": Motor(4, "robot_motor", norm_mode_body), + "fr_hipy": Motor(5, "robot_motor", norm_mode_body), + "fr_knee": Motor(6, "robot_motor", norm_mode_body), + # 后左腿 (hind left) + "hl_hipx": Motor(7, "robot_motor", norm_mode_body), + "hl_hipy": Motor(8, "robot_motor", norm_mode_body), + "hl_knee": Motor(9, "robot_motor", norm_mode_body), + # 后右腿 (hind right) + "hr_hipx": Motor(10, "robot_motor", norm_mode_body), + "hr_hipy": Motor(11, "robot_motor", norm_mode_body), + "hr_knee": Motor(11, "robot_motor", norm_mode_body), + }, + } + ) + + follower_motors: Dict[str, Dict[str, Motor]] = field( + default_factory=lambda norm_mode_body=norm_mode_body: { + "follower_joint": { + # 前左腿 (front left) + "fl_hipx": Motor(1, "robot_motor", norm_mode_body), + "fl_hipy": Motor(2, "robot_motor", norm_mode_body), + "fl_knee": Motor(3, "robot_motor", norm_mode_body), + # 前右腿 (front right) + "fr_hipx": Motor(4, "robot_motor", norm_mode_body), + "fr_hipy": Motor(5, "robot_motor", norm_mode_body), + "fr_knee": Motor(6, "robot_motor", norm_mode_body), + # 后左腿 (hind left) + "hl_hipx": Motor(7, "robot_motor", norm_mode_body), + "hl_hipy": Motor(8, "robot_motor", norm_mode_body), + "hl_knee": Motor(9, "robot_motor", norm_mode_body), + # 后右腿 (hind right) + "hr_hipx": Motor(10, "robot_motor", norm_mode_body), + "hr_hipy": Motor(11, "robot_motor", norm_mode_body), + "hr_knee": Motor(11, "robot_motor", norm_mode_body), + }, + } + ) + + cameras: Dict[str, CameraConfig] = field( + default_factory=lambda: { + "image_top": OpenCVCameraConfig(index_or_path=0, fps=30, width=640, height=480), + } + ) + + use_videos: bool = True + + microphones: Dict[str, int] = field(default_factory=lambda: {} + ) diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/node.py b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/node.py new file mode 100644 index 0000000..42b437b --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/node.py @@ -0,0 +1,226 @@ + +import threading +from typing import Any, Dict, List, Optional + +import cv2 +import numpy as np +import zmq +import pickle +import json + +import logging_mp + +logger = logging_mp.get_logger(__name__) + +CONNECT_TIMEOUT_FRAME = 10 + +NODE_CONFIG = { + "leader_joint_topics": { + "leader_joints": { + "topic": "/robot/lowstate/raw", + "msg": "JointState" + } + }, + "follower_joint_topics": { + "follower_joints": { + "topic": "/robot/lowstate/raw", + "msg": "JointState" + } + }, + "camera_topics": { + "image_top": { + "topic": "/robot/camera", + "msg": "Image" + } + } +} + + +class DeeproboticsX30Ros1Node: + """ + 第 2 部分:ZMQ → 本地数据存储 + - 不再依赖 ROS1 / ROS2 + - 通过 ZMQ SUB 接收第一个进程发来的数据 + - 提供 recv_images / recv_follower / recv_leader 给 Robot 调用 + """ + + def __init__( + self, + zmq_endpoint: str = "tcp://192.168.1.106:5555", + leader_joint_topics: Dict[str, Dict[str, str]] = NODE_CONFIG["leader_joint_topics"], + follower_joint_topics: Dict[str, Dict[str, str]] = NODE_CONFIG["follower_joint_topics"], + camera_topics: Dict[str, Dict[str, str]] = NODE_CONFIG["camera_topics"], + ): + self.leader_joint_cfgs = leader_joint_topics or {} + self.follower_joint_cfgs = follower_joint_topics or {} + self.camera_cfgs = camera_topics or {} + + self.recv_images: Dict[str, np.ndarray] = {} + self.recv_images_status: Dict[str, int] = {} + + self.recv_follower: Dict[str, np.ndarray] = {} + self.recv_follower_status: Dict[str, int] = {} + + self.recv_leader: Dict[str, np.ndarray] = {} + self.recv_leader_status: Dict[str, int] = {} + + self.lock = threading.Lock() + self.running = False + + + self._ctx = zmq.Context.instance() + self._socket = self._ctx.socket(zmq.SUB) + self._socket.connect(zmq_endpoint) + self._socket.setsockopt_string(zmq.SUBSCRIBE, "") + + logger.info(f"[ZMQ Node] connected to {zmq_endpoint}") + + self.spin_thread = None + + + + def start(self): + """启动 ZMQ 接收线程(兼容原来的 start 接口)""" + if self.running: + return + + self.running = True + self.spin_thread = threading.Thread(target=self._spin_loop, daemon=True) + self.spin_thread.start() + + logger.info("[ZMQ Node] Node started (receiver thread running)") + + def _spin_loop(self): + poller = zmq.Poller() + poller.register(self._socket, zmq.POLLIN) + + while self.running: + try: + socks = dict(poller.poll(timeout=100)) # 100 ms + except zmq.ZMQError as e: + logger.error(f"[ZMQ Node] Poll error: {e}") + break + + if self._socket in socks and socks[self._socket] == zmq.POLLIN: + try: + message_parts = self._socket.recv_multipart() + if len(message_parts) < 2: + if message_parts: + message = message_parts[0] + # 在bytes中查找空格 + if b' ' in message: + prefix, json_bytes = message.split(b' ', 1) + + try: + # 将bytes解码为字符串 + json_str = json_bytes.decode('utf-8') + + # 解析JSON数据 + raw_data = json.loads(json_str) + + # 处理数据 + # processed_data = self.extract_lowstate_data(raw_data) + self.recv_leader["leader_joints"] = raw_data + self.recv_leader_status["leader_joints"] = CONNECT_TIMEOUT_FRAME + + self.recv_follower["follower_joint"] = raw_data + self.recv_follower_status["follower_joint"] = CONNECT_TIMEOUT_FRAME + + except UnicodeDecodeError as e: + logger.debug(f"UTF-8解码失败: {e}") + except json.JSONDecodeError as e: + logger.debug(f"JSON解析失败: {e}") + logger.debug(f"原始JSON: {json_str}") + except Exception as e: + logger.debug(f"处理状态数据失败: {e}") + else: + # 解析相机消息 + topic_metadata = message_parts[0].decode('utf-8') + jpeg_data = message_parts[1] + + if " " in topic_metadata: + _, metadata_str = topic_metadata.split(" ", 1) + + try: + # 解析元数据 + metadata = json.loads(metadata_str) + + # 解码图像 + frame = self.decode_camera_image(jpeg_data) + + if frame is not None: + self.recv_images["image_top"] = frame + self.recv_images_status["image_top"] = CONNECT_TIMEOUT_FRAME + + except Exception as e: + logger.debug(f"处理相机数据失败: {e}") + except zmq.Again: + continue + except Exception as e: + logger.error(f"相机接收错误: {e}") + + + def decode_camera_image(self, jpeg_data: bytes) -> Optional[np.ndarray]: + """解码JPEG图像数据""" + try: + img_array = np.frombuffer(jpeg_data, dtype=np.uint8) + frame = cv2.imdecode(img_array, cv2.IMREAD_COLOR) + + if frame is not None: + # 转换为RGB格式 + frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + return frame_rgb + except Exception as e: + logger.warning(f"解码图像失败: {e}") + + return None + + + def stop(self): + """停止 ZMQ 接收线程(兼容原来的 stop 接口)""" + if not self.running: + return + + self.running = False + + if self.spin_thread is not None: + self.spin_thread.join(timeout=1.0) + + try: + self._socket.close(0) + except Exception: + pass + + logger.info("[ZMQ Node] Node stopped.") + + + def _handle_payload(self, payload: dict): + kind = payload.get("kind") + name = payload.get("name") + + if kind is None or name is None: + return + + with self.lock: + if kind == "camera": + frame = payload.get("frame") + if frame is not None: + self.recv_images[name] = frame + self.recv_images_status[name] = CONNECT_TIMEOUT_FRAME + + elif kind.startswith("follower_"): + values = payload.get("values") + if values is not None: + vec = np.asarray(values, dtype=float) + self.recv_follower[name] = vec + self.recv_follower_status[name] = CONNECT_TIMEOUT_FRAME + + elif kind.startswith("leader_"): + values = payload.get("values") + if values is not None: + vec = np.asarray(values, dtype=float) + self.recv_leader[name] = vec + self.recv_leader_status[name] = CONNECT_TIMEOUT_FRAME + + else: + logger.debug(f"[ZMQ Node] Unhandled kind={kind}, name={name}") diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/robot.py b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/robot.py new file mode 100644 index 0000000..703188e --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/robodriver_robot_deeprobotics_x30_ros1/robot.py @@ -0,0 +1,324 @@ + +import time +from functools import cached_property +from typing import Any + +import numpy as np +import logging_mp + +from lerobot.cameras import make_cameras_from_configs +from lerobot.utils.errors import DeviceNotConnectedError, DeviceAlreadyConnectedError +from lerobot.robots.robot import Robot + +from .config import DeeproboticsX30Ros1RobotConfig +from .node import DeeproboticsX30Ros1Node + + +logger = logging_mp.get_logger(__name__) + + +class DeeproboticsX30Ros1Robot(Robot): + config_class = DeeproboticsX30Ros1RobotConfig + name = "deeprobotics_x30_ros1" + + def __init__(self, config: DeeproboticsX30Ros1RobotConfig): + super().__init__(config) + self.config = config + self.robot_type = self.config.type + self.use_videos = self.config.use_videos + self.microphones = self.config.microphones + + self.leader_motors = config.leader_motors + self.follower_motors = config.follower_motors + self.cameras = make_cameras_from_configs(self.config.cameras) + + self.connect_excluded_cameras = ["image_pika_pose"] + + self.robot_ros2_node = DeeproboticsX30Ros1Node() + self.robot_ros2_node.start() + + self.connected = False + self.logs = {} + + # ========= features ========= + + @property + def _follower_motors_ft(self) -> dict[str, type]: + return { + f"follower_{joint_name}.pos": float + for comp_name, joints in self.follower_motors.items() + for joint_name in joints.keys() + } + + @property + def _leader_motors_ft(self) -> dict[str, type]: + return { + f"leader_{joint_name}.pos": float + for comp_name, joints in self.leader_motors.items() + for joint_name in joints.keys() + } + + @property + def _cameras_ft(self) -> dict[str, tuple]: + return { + cam: ( + self.config.cameras[cam].height, + self.config.cameras[cam].width, + 3, + ) + for cam in self.cameras + } + + @cached_property + def observation_features(self) -> dict[str, Any]: + return {**self._follower_motors_ft, **self._cameras_ft} + + @cached_property + def action_features(self) -> dict[str, Any]: + return self._leader_motors_ft + + @property + def is_connected(self) -> bool: + return self.connected + + # ========= connect / disconnect ========= + + def connect(self): + timeout = 20 + start_time = time.perf_counter() + + if self.connected: + raise DeviceAlreadyConnectedError(f"{self} already connected") + + node = self.robot_ros2_node + + conditions = [ + # 摄像头图像 + ( + lambda: all( + name in node.recv_images + for name in self.cameras + if name not in self.connect_excluded_cameras + ), + lambda: [ + name + for name in self.cameras + if name not in node.recv_images + and name not in self.connect_excluded_cameras + ], + "等待摄像头图像超时", + ), + # 主臂 + ( + lambda: all( + any(name in key for key in node.recv_leader) + for name in self.leader_motors + ), + lambda: [ + name + for name in self.leader_motors + if not any(name in key for key in node.recv_leader) + ], + "等待主臂数据超时", + ), + # 从臂 + ( + lambda: all( + any(name in key for key in node.recv_follower) + for name in self.follower_motors + ), + lambda: [ + name + for name in self.follower_motors + if not any(name in key for key in node.recv_follower) + ], + "等待从臂数据超时", + ), + ] + + completed = [False] * len(conditions) + + while True: + for i, (cond, _get_missing, _msg) in enumerate(conditions): + if not completed[i] and cond(): + completed[i] = True + + if all(completed): + break + + if time.perf_counter() - start_time > timeout: + failed_messages = [] + for i, (cond, get_missing, base_msg) in enumerate(conditions): + if completed[i]: + continue + + missing = get_missing() + if cond() or not missing: + completed[i] = True + continue + + if i == 0: + received = [ + name + for name in self.cameras + if name not in missing + ] + elif i == 1: + received = [ + name + for name in self.leader_motors + if name not in missing + ] + else: + received = [ + name + for name in self.follower_motors + if name not in missing + ] + + msg = ( + f"{base_msg}: 未收到 [{', '.join(missing)}]; " + f"已收到 [{', '.join(received)}]" + ) + failed_messages.append(msg) + + if not failed_messages: + break + + raise TimeoutError( + f"连接超时,未满足的条件: {{'; '.join(failed_messages)}}" + ) + + time.sleep(0.01) + + # 成功日志 + success_messages = [] + + if conditions[0][0](): + cam_received = [ + name + for name in self.cameras + if name in node.recv_images + and name not in self.connect_excluded_cameras + ] + success_messages.append(f"摄像头: {', '.join(cam_received)}") + + if conditions[1][0](): + leader_received = [ + name + for name in self.leader_motors + if any(name in key for key in node.recv_leader) + ] + success_messages.append(f"主臂数据: {', '.join(leader_received)}") + + if conditions[2][0](): + follower_received = [ + name + for name in self.follower_motors + if any(name in key for key in node.recv_follower) + ] + success_messages.append(f"从臂数据: {', '.join(follower_received)}") + + log_message = "\n[连接成功] 所有设备已就绪:\n" + log_message += "\n".join(f" - {msg}" for msg in success_messages) + log_message += f"\n 总耗时: {time.perf_counter() - start_time:.2f} 秒\n" + logger.info(log_message) + + self.connected = True + + def disconnect(self): + if not self.connected: + raise DeviceNotConnectedError() + self.connected = False + + def __del__(self): + if getattr(self, "connected", False): + self.disconnect() + + # ========= calibrate / configure ========= + + def calibrate(self): + pass + + def configure(self): + pass + + @property + def is_calibrated(self): + return True + + # ========= obs / action ========= + + def get_observation(self) -> dict[str, Any]: + if not self.connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + start = time.perf_counter() + obs_dict: dict[str, Any] = {} + + node = self.robot_ros2_node + + # follower joints + for comp_name, joints in self.follower_motors.items(): + state_dict = node.recv_follower.get(comp_name) + if state_dict: + position = state_dict.get("position", []) + name = state_dict.get("name", []) + + for joint_name, pos in zip(name, position): + obs_dict[f"follower_{joint_name}.pos"] = float(pos) + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read follower state: {dt_ms:.1f} ms") + + # camera images + for cam_key, _cam in self.cameras.items(): + start = time.perf_counter() + for name, val in node.recv_images.items(): + if cam_key == name or cam_key in name: + obs_dict[cam_key] = val + break + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read {cam_key}: {dt_ms:.1f} ms") + + return obs_dict + + def get_action(self) -> dict[str, Any]: + if not self.connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + start = time.perf_counter() + act_dict: dict[str, Any] = {} + + node = self.robot_ros2_node + + for comp_name, joints in self.leader_motors.items(): + state_dict = node.recv_leader.get(comp_name) + if state_dict: + position = state_dict.get("position", []) + name = state_dict.get("name", []) + + for joint_name, pos in zip(name, position): + act_dict[f"leader_{joint_name}.pos"] = float(pos) + + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read action: {dt_ms:.1f} ms") + + return act_dict + + # ========= send_action ========= + + def send_action(self, action: dict[str, Any]) -> dict[str, Any]: + """The provided action is expected to be a vector.""" + if not self.is_connected: + raise DeviceNotConnectedError( + f"{self} is not connected. You need to run `robot.connect()`." + ) + + goal_joint = [val for _key, val in action.items()] + goal_joint_numpy = np.array(goal_joint, dtype=np.float32) + + # 当前 ZMQ Node 没有实现下行控制,如果以后需要,可以在 node 中加一个 ZMQ PUB + if hasattr(self.robot_ros2_node, "ros2_send"): + self.robot_ros2_node.ros2_send("action_joint", goal_joint_numpy) + + return {f"{arm_motor}.pos": val for arm_motor, val in action.items()} diff --git a/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/ros1_to_zmq.py b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/ros1_to_zmq.py new file mode 100644 index 0000000..d36dab5 --- /dev/null +++ b/robodriver/robots/robodriver-robot-deeprobotics-x30-ros1/ros1_to_zmq.py @@ -0,0 +1,578 @@ +#!/usr/bin/python3 +""" +ROS1到ZeroMQ桥接节点 - 共享端口版本 +使用单个端口(5555)同时发布关节状态和RTSP图像数据 +""" + +import rospy +from sensor_msgs.msg import JointState +import zmq +import threading +import numpy as np +import time +import json +import cv2 +from datetime import datetime +from queue import Queue, Full, Empty +import signal +import sys +import logging + + +class ROSZeroMQBridge: + """ROS1到ZeroMQ桥接节点 - 共享端口设计""" + + def __init__(self, host="0.0.0.0", port=5555, rtsp_url="rtsp://192.168.1.105:8554/test"): + # ROS初始化 + rospy.init_node('ros_zeromq_bridge_shared') + + # 配置参数 + self.host = host + self.port = port + self.rtsp_url = rtsp_url + + # 运行标志 + self.running = True + + # 创建共享的ZeroMQ PUB socket + self._setup_zmq() + + # 初始化发布器 + self.joint_pub = JointStatePublisher(self.socket, self.host, self.port) + self.camera_pub = RTSPCameraPublisher(self.socket, self.host, self.port, self.rtsp_url) + + # 设置日志 + self._setup_logging() + + rospy.loginfo("=" * 60) + rospy.loginfo("ROS1->ZeroMQ桥接节点 (共享端口) 已启动") + rospy.loginfo(f"绑定地址: tcp://{self.host}:{self.port}") + rospy.loginfo("发布主题:") + rospy.loginfo(" Joint State: robot/joint_states") + rospy.loginfo(" Camera: robot/camera") + rospy.loginfo("=" * 60) + + def _setup_zmq(self): + """初始化ZeroMQ""" + self.context = zmq.Context() + self.socket = self.context.socket(zmq.PUB) + self.socket.setsockopt(zmq.SNDHWM, 5) # 防止积压 + self.socket.setsockopt(zmq.LINGER, 0) # 立即关闭 + + # 绑定端口 + bind_address = f"tcp://{self.host}:{self.port}" + self.socket.bind(bind_address) + rospy.loginfo(f"ZeroMQ PUB socket绑定到 {bind_address}") + + def _setup_logging(self): + """设置日志级别""" + # 设置ROS日志级别 + rospy.logdebug("日志系统初始化完成") + + def start(self): + """启动所有发布器""" + try: + # 启动关节状态发布器 + self.joint_pub.start() + time.sleep(0.5) + + # 启动相机发布器 + self.camera_pub.start() + + rospy.loginfo("所有发布器已启动") + + except Exception as e: + rospy.logerr(f"启动失败: {e}") + self.stop() + raise + + def stop(self): + """停止所有发布器""" + rospy.loginfo("正在停止所有发布器...") + self.running = False + + self.joint_pub.stop() + self.camera_pub.stop() + + # 关闭ZeroMQ + if hasattr(self, 'socket') and self.socket: + self.socket.close() + if hasattr(self, 'context') and self.context: + self.context.term() + + rospy.loginfo("所有发布器已停止") + + def run(self): + """运行主循环""" + self.start() + + # 设置信号处理 + signal.signal(signal.SIGINT, self._signal_handler) + signal.signal(signal.SIGTERM, self._signal_handler) + + try: + # ROS spin保持节点运行 + rospy.spin() + except KeyboardInterrupt: + rospy.loginfo("收到键盘中断信号") + finally: + self.stop() + + def _signal_handler(self, signum, frame): + """信号处理器""" + rospy.loginfo(f"收到信号 {signum}, 正在关闭...") + self.stop() + sys.exit(0) + + +class JointStatePublisher: + """关节状态发布器 - 使用共享socket""" + + def __init__(self, socket, host, port, topic="/joint_states", rate=30): + self.socket = socket + self.host = host + self.port = port + self.topic = topic + self.rate = rate + + # 状态 + self.running = False + self.publish_thread = None + + # 统计 + self.frame_count = 0 + self.last_publish_time = 0 + self.min_interval = 1.0 / rate + + # 日志 + self.logger = logging.getLogger('JointStatePublisher') + + # ROS订阅者 + self.subscriber = None + self.latest_msg = None + self.msg_lock = threading.Lock() + + def start(self): + """启动发布器""" + try: + # 订阅ROS话题 + self._init_ros_subscriber() + + # 启动发布线程 + self.running = True + self.publish_thread = threading.Thread(target=self._publish_loop, daemon=True) + self.publish_thread.start() + + rospy.loginfo(f"JointState发布器已启动 (主题: {self.topic}, 频率: {self.rate}Hz)") + + except Exception as e: + rospy.logerr(f"启动JointState发布器失败: {e}") + self.stop() + raise + + def _init_ros_subscriber(self): + """初始化ROS订阅者""" + self.subscriber = rospy.Subscriber( + self.topic, + JointState, + self._joint_callback, + queue_size=1 # 只保留最新消息 + ) + rospy.loginfo(f"已订阅ROS话题: {self.topic}") + + def _joint_callback(self, msg): + """关节数据回调 - 只保存最新数据""" + with self.msg_lock: + self.latest_msg = msg + + def _publish_loop(self): + """发布循环""" + rospy.logdebug("JointState发布循环已启动") + + while self.running and not rospy.is_shutdown(): + try: + current_time = time.time() + + # 频率控制 + if current_time - self.last_publish_time < self.min_interval: + time.sleep(0.001) + continue + + # 获取最新消息 + with self.msg_lock: + msg = self.latest_msg + self.latest_msg = None # 消费后清除 + + if msg is not None: + self._publish_joint_state(msg) + self.last_publish_time = current_time + + time.sleep(0.001) + + except Exception as e: + rospy.logerr(f"JointState发布循环错误: {e}") + time.sleep(0.1) + + rospy.logdebug("JointState发布循环已停止") + + def _publish_joint_state(self, msg): + """发布关节状态数据""" + try: + # 构建数据字典 + data = { + "header": { + "seq": msg.header.seq, + "stamp": { + "secs": msg.header.stamp.secs, + "nsecs": msg.header.stamp.nsecs + }, + "frame_id": msg.header.frame_id + }, + "name": msg.name, + "position": list(msg.position) if msg.position else [], + "velocity": list(msg.velocity) if msg.velocity else [], + "effort": list(msg.effort) if msg.effort else [], + "_timestamp": datetime.now().isoformat(), + "_frame_count": self.frame_count + } + + # 转换为JSON + json_data = json.dumps(data, default=self._json_serializer) + + # 发布 - 使用robot/joint_states主题 + full_message = f"robot/joint_states {json_data}" + + # 非阻塞发送 + try: + self.socket.send_string(full_message, flags=zmq.NOBLOCK) + self.frame_count += 1 + + if self.frame_count % 100 == 0: + rospy.logdebug(f"已发布 {self.frame_count} 帧关节数据") + + except zmq.Again: + # 发送队列满,跳过 + rospy.logdebug("JointState发送队列满,跳过此帧") + except Exception as e: + rospy.logerr(f"JointState发送错误: {e}") + + except Exception as e: + rospy.logerr(f"处理关节数据失败: {e}") + + def _json_serializer(self, obj): + """JSON序列化辅助函数""" + if hasattr(obj, '__dict__'): + return obj.__dict__ + elif hasattr(obj, 'tolist'): # numpy数组 + return obj.tolist() + else: + try: + return str(obj) + except: + return None + + def stop(self): + """停止发布器""" + rospy.loginfo("正在停止JointState发布器...") + self.running = False + + # 取消订阅 + if self.subscriber: + self.subscriber.unregister() + + if self.publish_thread: + self.publish_thread.join(timeout=2.0) + + rospy.loginfo(f"JointState统计: 总发布帧数={self.frame_count}") + + +class RTSPCameraPublisher: + """RTSP摄像头发布器 - 使用共享socket""" + + def __init__(self, socket, host, port, rtsp_url, rate=30): + self.socket = socket + self.host = host + self.port = port + self.rtsp_url = rtsp_url + self.rate = rate + + # 状态 + self.running = False + self.capture_thread = None + self.publish_thread = None + + # 帧队列(容量2,保持实时性) + self.frame_queue = Queue(maxsize=2) + + # 统计 + self.stats = { + 'frames_captured': 0, + 'frames_published': 0, + 'queue_drops': 0, + 'publish_errors': 0, + 'reconnects': 0, + 'avg_latency': 0 + } + self.stats_lock = threading.Lock() + + # 日志 + self.logger = logging.getLogger('RTSPCameraPublisher') + + # OpenCV捕获 + self.cap = None + + def start(self): + """启动发布器""" + try: + # 启动捕获线程 + self.running = True + self.capture_thread = threading.Thread(target=self._capture_loop, daemon=True) + self.capture_thread.start() + + # 启动发布线程 + self.publish_thread = threading.Thread(target=self._publish_loop, daemon=True) + self.publish_thread.start() + + rospy.loginfo(f"RTSP摄像头发布器已启动 (URL: {self.rtsp_url}, 频率: {self.rate}Hz)") + + except Exception as e: + rospy.logerr(f"启动RTSP摄像头发布器失败: {e}") + self.stop() + raise + + def _capture_loop(self): + """RTSP捕获循环""" + rospy.loginfo(f"RTSP捕获线程启动: {self.rtsp_url}") + + reconnect_delay = 1.0 + + while self.running: + try: + # 打开RTSP流 + if self.cap is None: + self.cap = cv2.VideoCapture(self.rtsp_url) + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) # 最小缓冲 + self.cap.set(cv2.CAP_PROP_FPS, self.rate) + + if not self.cap.isOpened(): + raise Exception("无法打开RTSP流") + + rospy.loginfo("RTSP连接成功") + reconnect_delay = 1.0 # 重置重连延迟 + + # 读取帧 + ret, frame = self.cap.read() + + if not ret: + rospy.logwarn("RTSP读取失败,准备重连...") + self._release_capture() + time.sleep(reconnect_delay) + reconnect_delay = min(reconnect_delay * 2, 10) # 指数退避 + with self.stats_lock: + self.stats['reconnects'] += 1 + continue + + # 更新统计 + with self.stats_lock: + self.stats['frames_captured'] += 1 + + # 放入队列(如果队列满则丢弃最旧的) + frame_data = { + 'frame': frame, + 'timestamp': time.time(), + 'frame_num': self.stats['frames_captured'] + } + + try: + # 非阻塞放入,如果队列满则丢弃旧帧 + if self.frame_queue.full(): + try: + self.frame_queue.get_nowait() # 丢弃最旧的 + with self.stats_lock: + self.stats['queue_drops'] += 1 + except Empty: + pass + + self.frame_queue.put_nowait(frame_data) + + except Full: + with self.stats_lock: + self.stats['queue_drops'] += 1 + + # 控制捕获速率(可选) + time.sleep(1.0 / (self.rate * 1.5)) # 略高于目标发布率 + + except Exception as e: + rospy.logerr(f"RTSP捕获错误: {e}") + self._release_capture() + time.sleep(reconnect_delay) + reconnect_delay = min(reconnect_delay * 2, 10) + + self._release_capture() + rospy.loginfo("RTSP捕获线程已退出") + + def _publish_loop(self): + """发布循环""" + rospy.logdebug("RTSP发布循环已启动") + + publish_interval = 1.0 / self.rate + + while self.running: + try: + loop_start = time.time() + + # 获取最新帧 + try: + frame_data = self.frame_queue.get(timeout=publish_interval) + except Empty: + # 没有新帧,跳过本次发布 + continue + + if frame_data: + self._publish_frame(frame_data) + + # 控制发布速率 + elapsed = time.time() - loop_start + sleep_time = max(0, publish_interval - elapsed) + if sleep_time > 0: + time.sleep(sleep_time) + + except Exception as e: + rospy.logerr(f"RTSP发布循环错误: {e}") + time.sleep(0.01) + + rospy.logdebug("RTSP发布循环已停止") + + def _publish_frame(self, frame_data): + """发布单帧图像""" + try: + frame = frame_data['frame'] + capture_time = frame_data['timestamp'] + frame_num = frame_data['frame_num'] + + # 计算延迟 + current_time = time.time() + latency = current_time - capture_time + + # 更新平均延迟 + with self.stats_lock: + self.stats['avg_latency'] = self.stats['avg_latency'] * 0.9 + latency * 0.1 + + # 编码为JPEG(可调节质量平衡带宽和画质) + encode_params = [cv2.IMWRITE_JPEG_QUALITY, 80] + success, buffer = cv2.imencode('.jpg', frame, encode_params) + + if not success: + rospy.logwarn("图像编码失败") + return + + # 准备元数据 + metadata = { + "timestamp": datetime.now().isoformat(), + "capture_time": capture_time, + "frame_id": frame_num, + "width": frame.shape[1], + "height": frame.shape[0], + "channels": frame.shape[2] if len(frame.shape) > 2 else 1, + "encoding": "jpeg", + "size_bytes": len(buffer), + "latency_ms": round(latency * 1000, 2), + "fps": self.rate + } + + # 发送元数据 - 使用robot/camera主题 + try: + self.socket.send_string( + f"robot/camera {json.dumps(metadata)}", + flags=zmq.SNDMORE | zmq.NOBLOCK + ) + # 发送图像数据 + self.socket.send(buffer.tobytes(), copy=False, flags=zmq.NOBLOCK) + + with self.stats_lock: + self.stats['frames_published'] += 1 + + # 定期输出统计信息 + if frame_num % 100 == 0: + self._log_stats() + + except zmq.Again: + with self.stats_lock: + self.stats['publish_errors'] += 1 + if self.stats['publish_errors'] % 50 == 0: + rospy.logwarn("ZMQ发送队列满,正在丢帧") + except Exception as e: + rospy.logerr(f"发送图像失败: {e}") + with self.stats_lock: + self.stats['publish_errors'] += 1 + + except Exception as e: + rospy.logerr(f"处理图像帧失败: {e}") + + def _release_capture(self): + """释放捕获资源""" + if self.cap is not None: + self.cap.release() + self.cap = None + + def _log_stats(self): + """记录统计信息""" + with self.stats_lock: + stats = self.stats.copy() + + stats_str = ( + f"RTSP统计: " + f"捕获={stats['frames_captured']}, " + f"发布={stats['frames_published']}, " + f"延迟={stats['avg_latency']*1000:.1f}ms, " + f"丢帧={stats['queue_drops']}, " + f"重连={stats['reconnects']}, " + f"队列={self.frame_queue.qsize()}" + ) + rospy.loginfo(stats_str) + + def stop(self): + """停止发布器""" + rospy.loginfo("正在停止RTSP摄像头发布器...") + self.running = False + + # 释放捕获 + self._release_capture() + + # 清空队列 + while not self.frame_queue.empty(): + try: + self.frame_queue.get_nowait() + except Empty: + break + + # 等待线程 + if self.capture_thread: + self.capture_thread.join(timeout=3.0) + if self.publish_thread: + self.publish_thread.join(timeout=2.0) + + self._log_stats() + rospy.loginfo(f"RTSP摄像头发布器已停止,总发布帧数: {self.stats['frames_published']}") + + +def main(): + """主函数""" + # 创建并运行桥接节点 + bridge = ROSZeroMQBridge( + host="0.0.0.0", + port=5555, + rtsp_url="rtsp://192.168.1.105:8554/test" + ) + + try: + bridge.run() + except Exception as e: + rospy.logerr(f"桥接节点运行失败: {e}") + import traceback + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/robodriver/robots/robodriver-robot-dobot-nova2-ros2/.gitignore b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/.gitignore new file mode 100644 index 0000000..9244879 --- /dev/null +++ b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/.gitignore @@ -0,0 +1,209 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[codz] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py.cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock +#poetry.toml + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python. +# https://pdm-project.org/en/latest/usage/project/#working-with-version-control +#pdm.lock +#pdm.toml +.pdm-python +.pdm-build/ + +# pixi +# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control. +#pixi.lock +# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one +# in the .venv directory. It is recommended not to include this directory in version control. +.pixi + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.envrc +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Abstra +# Abstra is an AI-powered process automation framework. +# Ignore directories containing user credentials, local state, and settings. +# Learn more at https://abstra.io/docs +.abstra/ + +# Visual Studio Code +# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore +# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore +# and can be added to the global gitignore or merged into this file. However, if you prefer, +# you could uncomment the following to ignore the entire vscode folder +# .vscode/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# Cursor +# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to +# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data +# refer to https://docs.cursor.com/context/ignore-files +.cursorignore +.cursorindexingignore + +# Marimo +marimo/_static/ +marimo/_lsp/ +__marimo__/ + +**out/ diff --git a/robodriver/robots/robodriver-robot-dobot-nova2-ros2/README.md b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/README.md new file mode 100644 index 0000000..89bd670 --- /dev/null +++ b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/README.md @@ -0,0 +1,46 @@ +# robodriver-robot-dobot-nova2-ros2 + +## 1. Get Start / 快速开始 + +### 1.1 Install the project to RoboDriver / 安装到 RoboDriver 环境 + +Make sure you are in the `robodriver` conda env. +确保你已经进入 `robodriver` 的 conda 环境: + +```bash +conda activate robodriver +pip install -e . +``` + +## 2. Start Collecting / 开始采集数据 + +### 2.1 Activate environment / 激活环境 + +```bash +conda activate robodriver +``` +### 2.2 启动realsense相机ros-sdk +```bash +https://github.com/IntelRealSense/realsense-ros +``` + +### 2.3 启动越疆nova2遥操作程序 +#### 项目名称:dobot_xtrainer(官方申请dobot_xtrainer开源版本) + +```bash +https://www.dobot.cn/service/download-center?keyword=xtrainer +``` + +### 2.4 Launch RoboXStudio / 启动 RoboXStudio + +In your RoboDriver project directory: +在你的 RoboDriver 工程目录中: + +```bash +cd /path/to/your/RoboDriver +python -m robodriver.scripts.run --robot.type=dobot-nova2-ros2 +``` +## 3. Notes / 注意事项 + +- dobot_xtrainer不支持ros2,需将采集数据自行发布成ros2 topic,代码位置:experiments/run_control.py。 + diff --git a/robodriver/robots/robodriver-robot-dobot-nova2-ros2/lerobot_robot_dobot_nova2_ros2/__init__.py b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/lerobot_robot_dobot_nova2_ros2/__init__.py new file mode 100644 index 0000000..c46a557 --- /dev/null +++ b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/lerobot_robot_dobot_nova2_ros2/__init__.py @@ -0,0 +1 @@ +from robodriver_robot_dobot_nova2_ros2 import * diff --git a/robodriver/robots/robodriver-robot-dobot-nova2-ros2/pyproject.toml b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/pyproject.toml new file mode 100644 index 0000000..30fed96 --- /dev/null +++ b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/pyproject.toml @@ -0,0 +1,25 @@ +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "robodriver_robot_dobot_nova2_ros2" +version = "0.1.0" +readme = "README.md" +requires-python = ">=3.8" +license = "Apache-2.0" +authors = [ + {name = "wangqi", email = "wangqi10@inspur.com"}, +] +keywords = ["robotics", "lerobot", "galaxea"] +dependencies = [ + "dora-rs-cli", + "logging_mp", + "opencv-python", +] + +[tool.setuptools.packages.find] +include = [ + "robodriver_robot_dobot_nova2_ros2", + "lerobot_robot_dobot_nova2_ros2" +] diff --git a/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/__init__.py b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/__init__.py new file mode 100644 index 0000000..7770f43 --- /dev/null +++ b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/__init__.py @@ -0,0 +1,3 @@ +from .config import DobotNova2Ros2RobotConfig +from .robot import DobotNova2Ros2Robot +from .status import DobotNova2Ros2RobotStatus diff --git a/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/config.py b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/config.py new file mode 100644 index 0000000..7e1ad75 --- /dev/null +++ b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/config.py @@ -0,0 +1,72 @@ +from typing import Dict +from dataclasses import dataclass, field + +from lerobot.robots.config import RobotConfig +from lerobot.cameras import CameraConfig +from lerobot.cameras.opencv import OpenCVCameraConfig +from lerobot.motors import Motor, MotorNormMode + + +@RobotConfig.register_subclass("dobot-nova2-ros2") +@dataclass +class DobotNova2Ros2RobotConfig(RobotConfig): + use_degrees = True + norm_mode_body = ( + MotorNormMode.DEGREES if use_degrees else MotorNormMode.RANGE_M100_100 + ) + + leader_motors: Dict[str, Motor] = field( + default_factory=lambda norm_mode_body=norm_mode_body: { + "leader_arms":{ + "joint1-l": Motor(1, "robot_motor", norm_mode_body), + "joint2-l": Motor(2, "robot_motor", norm_mode_body), + "joint3-l": Motor(3, "robot_motor", norm_mode_body), + "joint4-l": Motor(4, "robot_motor", norm_mode_body), + "joint5-l": Motor(5, "robot_motor", norm_mode_body), + "joint6-l": Motor(6, "robot_motor", norm_mode_body), + "gripper1-l": Motor(7, "robot_motor", norm_mode_body), + } + } + ) + + follower_motors: Dict[str, Motor] = field( + default_factory=lambda norm_mode_body=norm_mode_body: { + "follower_arms":{ + "joint1-r": Motor(1, "robot_motor", norm_mode_body), + "joint2-r": Motor(2, "robot_motor", norm_mode_body), + "joint3-r": Motor(3, "robot_motor", norm_mode_body), + "joint4-r": Motor(4, "robot_motor", norm_mode_body), + "joint5-r": Motor(5, "robot_motor", norm_mode_body), + "joint6-r": Motor(6, "robot_motor", norm_mode_body), + "gripper1-r": Motor(7, "robot_motor", norm_mode_body), + } + } + ) + + cameras: Dict[str, CameraConfig] = field( + default_factory=lambda: { + "image_top": OpenCVCameraConfig( + index_or_path=1, + fps=30, + width=848, + height=480, + ), + "image_wrist_left": OpenCVCameraConfig( + index_or_path=2, + fps=30, + width=848, + height=480, + ), + "image_wrist_right": OpenCVCameraConfig( + index_or_path=3, + fps=30, + width=848, + height=480, + ), + } + ) + + use_videos: bool = True + + microphones: Dict[str, int] = field(default_factory=lambda: {}) + diff --git a/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/node.py b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/node.py new file mode 100644 index 0000000..fd61063 --- /dev/null +++ b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/node.py @@ -0,0 +1,142 @@ +import threading +import time +from typing import Dict + +import numpy as np +import cv2 +import rclpy +from rclpy.node import Node as ROS2Node +from sensor_msgs.msg import JointState, CompressedImage +from message_filters import Subscriber, ApproximateTimeSynchronizer +from geometry_msgs.msg import PoseStamped +from rclpy.qos import QoSProfile, DurabilityPolicy, ReliabilityPolicy, HistoryPolicy + +import logging_mp + +CONNECT_TIMEOUT_FRAME = 10 +logger = logging_mp.get_logger(__name__) + +class DobotNova2Ros2RobotNode(ROS2Node): + def __init__(self): + super().__init__('ros2_recv_pub_driver') + self.stop_spin = False # 初始化停止标志 + self.qos = QoSProfile( + durability=DurabilityPolicy.VOLATILE, + reliability=ReliabilityPolicy.RELIABLE, + history=HistoryPolicy.KEEP_LAST, + depth=10 + ) + + self.qos_best_effort = QoSProfile( + durability=DurabilityPolicy.VOLATILE, + reliability=ReliabilityPolicy.BEST_EFFORT, + history=HistoryPolicy.KEEP_LAST, + depth=10 + ) + + + self.last_main_send_time_ns = 0 + self.last_follow_send_time_ns = 0 + self.min_interval_ns = 1e9 / 30 # 30Hz + + self._init_message_filters() + self._init_image_message_filters() + + self.recv_images: Dict[str, float] = {} + self.recv_leader: Dict[str, float] = {} + self.recv_follower: Dict[str, float] = {} + self.recv_images_status: Dict[str, int] = {} + self.recv_leader_status: Dict[str, int] = {} + self.recv_follower_status: Dict[str, int] = {} + + self.lock = threading.Lock() + + def _init_message_filters(self): + arm_left = Subscriber(self, JointState, '/joint_states_robot_left') + arm_right = Subscriber(self, JointState, '/joint_states_robot_right') + + self.sync = ApproximateTimeSynchronizer( + [arm_left, arm_right], + queue_size=10, + slop=0.1 + ) + self.sync.registerCallback(self.synchronized_callback) + + def synchronized_callback(self, arm_left, arm_right): + try: + current_time_ns = time.time_ns() + if (current_time_ns - self.last_follow_send_time_ns) < self.min_interval_ns: + return + self.last_follow_send_time_ns = current_time_ns + + left_pos = np.array(arm_left.position, dtype=np.float32) + right_pos = np.array(arm_right.position, dtype=np.float32) + + left_arm_data = left_pos + right_arm_data = right_pos + + left_merged_data = np.concatenate([left_arm_data]) + right_merged_data = np.concatenate([right_arm_data]) + with self.lock: + self.recv_follower['follower_arms'] = left_merged_data + self.recv_follower_status['follower_arms'] = CONNECT_TIMEOUT_FRAME + self.recv_leader['leader_arms'] = right_merged_data + self.recv_leader_status['leader_arms'] = CONNECT_TIMEOUT_FRAME + except Exception as e: + self.get_logger().error(f"Synchronized follow callback error: {e}") + + def _init_image_message_filters(self): + sub_camera_top = Subscriber(self, CompressedImage, '/camera/camera_top/color/image_rect_raw/compressed') + sub_camera_wrist_left = Subscriber(self, CompressedImage, '/camera/camera_left/color/image_rect_raw/compressed') + sub_camera_wrist_right = Subscriber(self, CompressedImage, '/camera/camera_right/color/image_rect_raw/compressed') + + self.image_sync = ApproximateTimeSynchronizer( + [sub_camera_top, sub_camera_wrist_left, sub_camera_wrist_right], + queue_size=5, + slop=0.1 + ) + self.image_sync.registerCallback(self.image_synchronized_callback) + + def image_synchronized_callback(self, top, wrist_left, wrist_right): + try: + self.images_recv(top, "image_top", 1280, 720) + self.images_recv(wrist_left, "image_wrist_left", 640, 360) + self.images_recv(wrist_right, "image_wrist_right", 640, 360) + except Exception as e: + self.get_logger().error(f"Image synchronized callback error: {e}") + + def images_recv(self, msg, event_id, width, height, encoding="jpeg"): + try: + if 'image' in event_id: + img_array = np.frombuffer(msg.data, dtype=np.uint8) + if encoding == "bgr8": + channels = 3 + frame = img_array.reshape((height, width, channels)).copy() + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + elif encoding == "rgb8": + channels = 3 + frame = img_array.reshape((height, width, channels)) + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + elif encoding in ["jpeg", "jpg", "jpe", "bmp", "webp", "png"]: + channels = 3 + frame = cv2.imdecode(img_array, cv2.IMREAD_COLOR) + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + elif encoding == "depth16": + frame = np.frombuffer(msg.data, dtype=np.uint16).reshape(height, width,1) + + if frame is not None: + with self.lock: + self.recv_images[event_id] = frame + self.recv_images_status[event_id] = CONNECT_TIMEOUT_FRAME + except Exception as e: + logger.error(f"recv image error: {e}") + + def destroy(self): + self.stop_spin = True + super().destroy_node() + +# 保留ros_spin_thread函数(供外部调用) +def ros_spin_thread(node): + while rclpy.ok() and not getattr(node, "stop_spin", False): + rclpy.spin_once(node, timeout_sec=0.01) \ No newline at end of file diff --git a/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/robot.py b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/robot.py new file mode 100644 index 0000000..7c502f2 --- /dev/null +++ b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/robot.py @@ -0,0 +1,349 @@ +import threading +import time +from typing import Any + +import logging_mp +import numpy as np +from lerobot.cameras import make_cameras_from_configs +from lerobot.robots.robot import Robot +from lerobot.utils.errors import DeviceNotConnectedError, DeviceAlreadyConnectedError +from functools import cached_property + +import rclpy + +from .config import DobotNova2Ros2RobotConfig +from .status import DobotNova2Ros2RobotStatus +from .node import DobotNova2Ros2RobotNode, ros_spin_thread + + +logger = logging_mp.get_logger(__name__) + + + +class DobotNova2Ros2Robot(Robot): + config_class = DobotNova2Ros2RobotConfig + name = "galaxealite-aio-ros2" + + def __init__(self, config: DobotNova2Ros2RobotConfig): + super().__init__(config) + self.config = config + self.robot_type = self.config.type + self.use_videos = self.config.use_videos + self.microphones = self.config.microphones + + self.leader_motors = config.leader_motors + self.follower_motors = config.follower_motors + self.cameras = make_cameras_from_configs(self.config.cameras) + + self.connect_excluded_cameras = ["image_pika_pose"] + + self.status = DobotNova2Ros2RobotStatus() + if not rclpy.ok(): + rclpy.init() + self.robot_ros2_node = DobotNova2Ros2RobotNode() # 创建节点实例 + self.ros_spin_thread = threading.Thread( + target=ros_spin_thread, + args=(self.robot_ros2_node,), + daemon=True + ) + self.ros_spin_thread.start() + + self.connected = False + self.logs = {} + + @property + def _follower_motors_ft(self) -> dict[str, type]: + return { + f"follower_{joint_name}.pos": float + for comp_name, joints in self.follower_motors.items() + for joint_name in joints.keys() + } + + @property + def _leader_motors_ft(self) -> dict[str, type]: + return { + f"leader_{joint_name}.pos": float + for comp_name, joints in self.leader_motors.items() + for joint_name in joints.keys() + } + + @property + def _cameras_ft(self) -> dict[str, tuple]: + return { + cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras + } + + @cached_property + def observation_features(self) -> dict[str, type | tuple]: + return {**self._follower_motors_ft, **self._cameras_ft} + + @cached_property + def action_features(self) -> dict[str, type]: + return self._leader_motors_ft + + @property + def is_connected(self) -> bool: + return self.connected + + def connect(self): + timeout = 20 # 统一的超时时间(秒) + start_time = time.perf_counter() + + if self.connected: + raise DeviceAlreadyConnectedError(f"{self} already connected") + + # 定义所有需要等待的条件及其错误信息 + conditions = [ + ( + lambda: all( + name in self.robot_ros2_node.recv_images + for name in self.cameras + if name not in self.connect_excluded_cameras + ), + lambda: [name for name in self.cameras if name not in self.robot_ros2_node.recv_images], + "等待摄像头图像超时", + ), + ( + lambda: all( + any(name in key for key in self.robot_ros2_node.recv_leader) + for name in self.leader_motors + ), + lambda: [ + name + for name in self.leader_motors + if not any(name in key for key in self.robot_ros2_node.recv_leader) + ], + "等待主臂关节角度超时", + ), + ( + lambda: all( + any(name in key for key in self.robot_ros2_node.recv_follower) + for name in self.follower_motors + ), + lambda: [ + name + for name in self.follower_motors + if not any(name in key for key in self.robot_ros2_node.recv_follower) + ], + "等待从臂关节角度超时", + ), + ] + + # 跟踪每个条件是否已完成 + completed = [False] * len(conditions) + + while True: + # 检查每个未完成的条件 + for i in range(len(conditions)): + if not completed[i]: + condition_func = conditions[i][0] + if condition_func(): + completed[i] = True + + # 如果所有条件都已完成,退出循环 + if all(completed): + break + + # 检查是否超时 + if time.perf_counter() - start_time > timeout: + failed_messages = [] + for i, (cond, get_missing, base_msg) in enumerate(conditions): + if completed[i]: + continue + + missing = get_missing() + if cond() or not missing: + completed[i] = True + continue + + if i == 0: + received = [ + name + for name in self.cameras + if name not in missing + ] + elif i == 1: + received = [ + name + for name in self.leader_motors + if name not in missing + ] + else: + received = [ + name + for name in self.follower_motors + if name not in missing + ] + + msg = ( + f"{base_msg}: 未收到 [{', '.join(missing)}]; " + f"已收到 [{', '.join(received)}]" + ) + failed_messages.append(msg) + + if not failed_messages: + break + + raise TimeoutError( + f"连接超时,未满足的条件: {'; '.join(failed_messages)}" + ) + + # 减少 CPU 占用 + time.sleep(0.01) + + # ===== 新增成功打印逻辑 ===== + success_messages = [] + + if conditions[0][0](): + cam_received = [ + name + for name in self.cameras + if name in self.robot_ros2_node.recv_images + and name not in self.connect_excluded_cameras + ] + success_messages.append(f"摄像头: {', '.join(cam_received)}") + + if conditions[1][0](): + leader_received = [ + name + for name in self.leader_motors + if any(name in key for key in self.robot_ros2_node.recv_leader) + ] + success_messages.append(f"主臂数据: {', '.join(leader_received)}") + + if conditions[2][0](): + follower_received = [ + name + for name in self.follower_motors + if any(name in key for key in self.robot_ros2_node.recv_follower) + ] + success_messages.append(f"从臂数据: {', '.join(follower_received)}") + + + log_message = "\n[连接成功] 所有设备已就绪:\n" + log_message += "\n".join(f" - {msg}" for msg in success_messages) + log_message += f"\n 总耗时: {time.perf_counter() - start_time:.2f} 秒\n" + logger.info(log_message) + # =========================== + + for i in range(self.status.specifications.camera.number): + self.status.specifications.camera.information[i].is_connect = True + for i in range(self.status.specifications.arm.number): + self.status.specifications.arm.information[i].is_connect = True + + self.connected = True + + @property + def is_calibrated(self) -> bool: + """Whether the robot is currently calibrated or not. Should be always `True` if not applicable""" + return True + + def calibrate(self) -> None: + """ + Calibrate the robot if applicable. If not, this should be a no-op. + + This method should collect any necessary data (e.g., motor offsets) and update the + :pyattr:`calibration` dictionary accordingly. + """ + pass + + def configure(self) -> None: + """ + Apply any one-time or runtime configuration to the robot. + This may include setting motor parameters, control modes, or initial state. + """ + pass + + def get_observation(self) -> dict[str, Any]: + if not self.connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + start = time.perf_counter() + obs_dict: dict[str, Any] = {} + for comp_name, joints in self.follower_motors.items(): + for follower_name, follower in self.robot_ros2_node.recv_follower.items(): + if follower_name == comp_name: + for i, joint_name in enumerate(joints.keys()): + obs_dict[f"follower_{joint_name}.pos"] = float(follower[i]) + + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read follower state: {dt_ms:.1f} ms") + + # ---- 摄像头图像保持不变 ---- + for cam_key, _cam in self.cameras.items(): + start = time.perf_counter() + for name, val in self.robot_ros2_node.recv_images.items(): + if cam_key == name or cam_key in name: + obs_dict[cam_key] = val + break + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read {cam_key}: {dt_ms:.1f} ms") + + return obs_dict + + def get_action(self) -> dict[str, Any]: + if not self.connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + start = time.perf_counter() + act_dict: dict[str, Any] = {} + + for comp_name, joints in self.leader_motors.items(): + for follower_name, follower in self.robot_ros2_node.recv_leader.items(): + if follower_name == comp_name: + for i, joint_name in enumerate(joints.keys()): + act_dict[f"leader_{joint_name}.pos"] = float(follower[i]) + + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read action: {dt_ms:.1f} ms") + + return act_dict + + def send_action(self, action: dict[str, Any]): + """没有实现下行控制""" + + def update_status(self) -> str: + for i in range(self.status.specifications.camera.number): + match_name = self.status.specifications.camera.information[i].name + for name in self.robot_ros2_node.recv_images_status: + if match_name in name: + self.status.specifications.camera.information[i].is_connect = ( + True if self.robot_ros2_node.recv_images_status[name] > 0 else False + ) + + for i in range(self.status.specifications.arm.number): + match_name = self.status.specifications.arm.information[i].name + for name in self.robot_ros2_node.recv_leader_status: + if match_name in name: + self.status.specifications.arm.information[i].is_connect = ( + True if self.robot_ros2_node.recv_leader_status[name] > 0 else False + ) + + for i in range(self.status.specifications.arm.number): + match_name = self.status.specifications.arm.information[i].name + for name in self.robot_ros2_node.recv_follower_status: + if match_name in name: + self.status.specifications.arm.information[i].is_connect = ( + True if self.robot_ros2_node.recv_follower_status[name] > 0 else False + ) + + return self.status.to_json() + + def disconnect(self): + if not self.is_connected: + raise DeviceNotConnectedError( + "robot is not connected. You need to run `robot.connect()` before disconnecting." + ) + if hasattr(self, "ros_node"): + self.robot_ros2_node.destroy() + if rclpy.ok(): + rclpy.shutdown() + + self.connected = False + + def __del__(self): + try: + if getattr(self, "is_connected", False): + self.disconnect() + except Exception: + pass diff --git a/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/status.py b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/status.py new file mode 100644 index 0000000..203a92c --- /dev/null +++ b/robodriver/robots/robodriver-robot-dobot-nova2-ros2/robodriver_robot_dobot_nova2_ros2/status.py @@ -0,0 +1,128 @@ +from dataclasses import dataclass, field, asdict +from typing import List, Optional +import json +import abc +import draccus + + +@dataclass +class CameraInfo: + name: str = "" + chinese_name: str = "" + type: str = "" + width: int = 0 + height: int = 0 + is_connect: bool = False + +@dataclass +class CameraStatus: + number: int = 0 + information: List[CameraInfo] = field(default_factory=list) + + def __post_init__(self): + if not self.information: # 如果 information 为空,则 number 设为 0 + self.number = 0 + else: + self.number = len(self.information) + +@dataclass +class ArmInfo: + name: str = "" + type: str = "" + start_pose: List[float] = field(default_factory=list) + joint_p_limit: List[float] = field(default_factory=list) + joint_n_limit: List[float] = field(default_factory=list) + is_connect: bool = False + +@dataclass +class ArmStatus: + number: int = 0 + information: List[ArmInfo] = field(default_factory=list) + + def __post_init__(self): + if not self.information: # 如果 information 为空,则 number 设为 0 + self.number = 0 + else: + self.number = len(self.information) + +@dataclass +class Specifications: + end_type: str = "Default" + fps: int = 30 + camera: Optional[CameraStatus] = None + arm: Optional[ArmStatus] = None + +@dataclass +class RobotStatus(draccus.ChoiceRegistry, abc.ABC): + device_name: str = "Default" + device_body: str = "Default" + specifications: Specifications = field(default_factory=Specifications) + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + def to_dict(self) -> dict: + return asdict(self) + + def to_json(self) -> str: + return json.dumps(self.to_dict(), ensure_ascii=False) + +RobotStatus.register_subclass("dobot-nova2-ros2") +@dataclass +class DobotNova2Ros2RobotStatus(RobotStatus): + device_name: str = "越疆-nova2" + device_body: str = "越疆" + + def __post_init__(self): + self.specifications.end_type = "二指夹爪" + self.specifications.fps = 30 + self.specifications.camera = CameraStatus( + information=[ + CameraInfo( + name="image_top", + chinese_name="头部摄像头", + type="纯双目视觉相机", + width=1280, + height=720, + is_connect=False + ), + CameraInfo( + name="image_wrist_left", + chinese_name="腕部左摄像头", + type="单目深度相机", + width=640, + height=360, + is_connect=False + ), + CameraInfo( + name="image_wrist_right", + chinese_name="腕部右摄像头", + type="单目深度相机", + width=640, + height=360, + is_connect=False + ) + ] + ) + + self.specifications.arm = ArmStatus( + information=[ + ArmInfo( + name="piper_left", + type="Galaxea A1X + Galaxea G1 - 7DOF", + start_pose=[], + joint_p_limit=[165.0,180.0,0.0,90.0,90.0,165.0], + joint_n_limit=[-165.0,0.0,-190.0,-90.0,-90.0,-165.0], + is_connect=False + ), + ArmInfo( + name="piper_right", + type="Galaxea A1X + Galaxea G1 - 7DOF", + start_pose=[], + joint_p_limit=[165.0,180.0,0.0,90.0,90.0,165.0], + joint_n_limit=[-165.0,0.0,-190.0,-90.0,-90.0,-165.0], + is_connect=False + ), + ] + )