From 62579212c18b9d8e549be87250eb5691c7061f60 Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Tue, 16 Jan 2024 23:17:16 -0800 Subject: [PATCH 01/21] Update teleop --- .../teleoperation/robot_teleoperate_demo.py | 50 ++- omnigibson/robots/behavior_robot.py | 22 +- omnigibson/robots/manipulation_robot.py | 45 ++- omnigibson/robots/robot_base.py | 8 +- omnigibson/robots/tiago.py | 10 +- omnigibson/robots/two_wheel_robot.py | 17 +- omnigibson/utils/teleop_utils.py | 299 +----------------- 7 files changed, 83 insertions(+), 368 deletions(-) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 75998ee8a..c429c0478 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -3,26 +3,33 @@ """ import omnigibson as og from omnigibson.utils.ui_utils import choose_from_options +from omnigibson.utils.teleop_utils import TeleopSystem +from real_tiago.utils.general_utils import AttrDict +teleop_config = AttrDict( + arm_left_controller='keyboard', + arm_right_controller='keyboard', + base_controller='keyboard', + torso_controller='keyboard', + + interface_kwargs=AttrDict( + vr={}, + human_kpt={}, + keyboard={}, + spacemouse={} + ) +) ROBOTS = { "FrankaPanda": "Franka Emika Panda (default)", "Fetch": "Mobile robot with one arm", "Tiago": "Mobile robot with two arms", } -SYSTEMS = { - "Keyboard": "Keyboard (default)", - "SteamVR": "SteamVR with HTC VIVE through OmniverseXR plugin", - "Oculus": "Oculus Reader with Quest 2", - "SpaceMouse": "Space Mouse", -} - def main(): - teleop_system = choose_from_options(options=SYSTEMS, name="system") robot_name = choose_from_options(options=ROBOTS, name="robot") # Create the config for generating the environment we want - env_cfg = {"action_timestep": 1 / 60., "physics_timestep": 1 / 300.} + env_cfg = {"action_timestep": 1 / 50., "physics_timestep": 1 / 200.} scene_cfg = {"type": "Scene"} # Add the robot we want to load robot_cfg = { @@ -36,7 +43,7 @@ def main(): for arm in arms: robot_cfg["controller_config"][f"arm_{arm}"] = { "name": "InverseKinematicsController", - "mode": "pose_absolute_ori", + "mode": "pose_delta_ori", "motor_type": "position" } robot_cfg["controller_config"][f"gripper_{arm}"] = { @@ -123,30 +130,13 @@ def main(): robot = env.robots[0] # Initialize teleoperation system - if teleop_system == "SteamVR": - from omnigibson.utils.teleop_utils import OVXRSystem as TeleopSystem - elif teleop_system == "Oculus": - from omnigibson.utils.teleop_utils import OculusReaderSystem as TeleopSystem - elif teleop_system == "SpaceMouse": - from omnigibson.utils.teleop_utils import SpaceMouseSystem as TeleopSystem - elif teleop_system == "Keyboard": - from omnigibson.utils.teleop_utils import KeyboardSystem as TeleopSystem - teleop_sys = TeleopSystem(robot=robot, disable_display_output=True, align_anchor_to_robot_base=True) + teleop_sys = TeleopSystem(config=teleop_config, robot=robot, disable_display_output=True, align_anchor_to_robot_base=True) teleop_sys.start() - # tracker variable of whether the robot is attached to the VR system - prev_robot_attached = False # main simulation loop for _ in range(10000): if og.sim.is_playing(): - teleop_sys.update() - if teleop_sys.teleop_data.robot_attached and not prev_robot_attached: - teleop_sys.reset_transform_mapping() - if robot_name == "Tiago": - teleop_sys.reset_transform_mapping("left") - else: - action = teleop_sys.teleop_data_to_action() - env.step(action) - prev_robot_attached = teleop_sys.teleop_data.robot_attached + action = teleop_sys.get_action() + env.step(action) # Shut down the environment cleanly at the end teleop_sys.stop() env.close() diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index e5435e536..5b3761e9e 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -5,6 +5,7 @@ import os from scipy.spatial.transform import Rotation as R from typing import List, Tuple, Iterable +from real_tiago.user_interfaces.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy @@ -14,7 +15,6 @@ from omnigibson.robots.manipulation_robot import ManipulationRobot, GraspingPoint from omnigibson.robots.active_camera_robot import ActiveCameraRobot from omnigibson.objects.usd_object import USDObject -from omnigibson.utils.teleop_utils import TeleopData m = create_module_macros(module_path=__file__) # component suffixes for the 6-DOF arm joint names @@ -415,9 +415,9 @@ def update_hand_contact_info(self): self._part_is_in_contact[hand_name] = len(self.eef_links[hand_name].contact_list()) > 0 \ or np.any([len(finger.contact_list()) > 0 for finger in self.finger_links[hand_name]]) - def teleop_data_to_action(self, teleop_data: TeleopData) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ - Generates an action for the BehaviorRobot to perform based on vr data dict. + Generates an action for the BehaviorRobot to perform based on teleop action data dict. Action space (all non-normalized values that will be clipped if they are too large) Body: @@ -433,8 +433,8 @@ def teleop_data_to_action(self, teleop_data: TeleopData) -> np.ndarray: # Actions are stored as 1D numpy array action = np.zeros(self.action_dim) # Update body action space - if teleop_data.is_valid["head"]: - head_pos, head_orn = teleop_data.transforms["head"] + if teleop_action.is_valid["head"]: + head_pos, head_orn = teleop_action.transforms["head"] des_body_pos = head_pos - np.array([0, 0, m.BODY_HEIGHT_OFFSET]) des_body_rpy = np.array([0, 0, R.from_quat(head_orn).as_euler("XYZ")[2]]) des_body_orn = T.euler2quat(des_body_rpy) @@ -446,20 +446,20 @@ def teleop_data_to_action(self, teleop_data: TeleopData) -> np.ndarray: for part_name, eef_part in self.parts.items(): # Process local transform adjustments hand_data = 0 - if teleop_data.is_valid[part_name]: - part_pos, part_orn = teleop_data.transforms[part_name] + if teleop_action.is_valid[part_name]: + part_pos, part_orn = teleop_action.transforms[part_name] # apply eef rotation offset to part transform first des_world_part_pos = part_pos des_world_part_orn = T.quat_multiply(part_orn, eef_part.offset_to_body[1]) if eef_part.name in self.arm_names: # compute gripper action - if hasattr(teleop_data, "hand_data"): + if hasattr(teleop_action, "hand_data"): # hand tracking mode, compute joint rotations for each independent hand joint - hand_data = teleop_data.hand_data[part_name] + hand_data = teleop_action.hand_data[part_name] hand_data = hand_data[:, :2].T.reshape(-1) else: # controller mode, map trigger fraction from [0, 1] to [-1, 1] range. - hand_data = teleop_data.grippers[part_name] * 2 - 1 + hand_data = teleop_action.grippers[part_name] * 2 - 1 action[self.controller_action_idx[f"gripper_{eef_part.name}"]] = hand_data # update ghost hand if necessary if self._use_ghost_hands: @@ -479,7 +479,7 @@ def teleop_data_to_action(self, teleop_data: TeleopData) -> np.ndarray: controller_name = "camera" if part_name == "head" else "arm_" + eef_part.name action[self.controller_action_idx[controller_name]] = np.r_[des_local_part_pos, des_part_rpy] # If we reset, teleop the robot parts to the desired pose - if eef_part.name in self.arm_names and teleop_data.reset[part_name]: + if eef_part.name in self.arm_names and teleop_action.reset[part_name]: self.parts[part_name].set_position_orientation(des_local_part_pos, des_part_rpy) return action diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index 85fc1f43e..cfc3f9f4d 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -2,6 +2,9 @@ from collections import namedtuple import numpy as np import networkx as nx +from omni.physx import get_physx_scene_query_interface +from pxr import Gf +from real_tiago.user_interfaces.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy @@ -20,7 +23,6 @@ from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function from omnigibson.utils.constants import JointType, PrimType from omnigibson.utils.usd_utils import create_joint -from omnigibson.utils.teleop_utils import TeleopData from omnigibson.utils.sampling_utils import raytest_batch # Create settings for this module @@ -1474,38 +1476,31 @@ def teleop_rotation_offset(self): """ return {arm: np.array([0, 0, 0, 1]) for arm in self.arm_names} - def teleop_data_to_action(self, teleop_data: TeleopData) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ - Generate action data from teleoperation data + Generate action data from teleoperation action data NOTE: This implementation only supports IK/OSC controller for arm and MultiFingerGripperController for gripper. Overwrite this function if the robot is using a different controller. Args: - teleop_data (TeleopData): teleoperation data + teleop_action (TeleopAction): teleoperation action data Returns: np.ndarray: array of action data for arm and gripper """ - action = super().teleop_data_to_action(teleop_data) + action = super().teleop_data_to_action(teleop_action) hands = ["left", "right"] if self.n_arms == 2 else ["right"] for i, hand in enumerate(hands): arm_name = self.arm_names[i] - if teleop_data.is_valid[hand]: - # arm action - assert \ - isinstance(self._controllers[f"arm_{arm_name}"], InverseKinematicsController) or \ - isinstance(self._controllers[f"arm_{arm_name}"], OperationalSpaceController), \ - f"Only IK and OSC controllers are supported for arm {arm_name}!" - cur_eef_pos, cur_eef_orn = self.links[self.eef_link_names[arm_name]].get_position_orientation() - if teleop_data.robot_attached: - target_pos, target_orn = teleop_data.transforms[hand] - else: - target_pos, target_orn = cur_eef_pos, cur_eef_orn - # get orientation relative to robot base - base_pos, base_orn = self.get_position_orientation() - rel_des_pos, rel_des_orn = T.relative_pose_transform(target_pos, target_orn, base_pos, base_orn) - rel_cur_pos, _ = T.relative_pose_transform(cur_eef_pos, cur_eef_orn, base_pos, base_orn) - action[self.arm_action_idx[arm_name]] = np.r_[rel_des_pos - rel_cur_pos, T.quat2axisangle(rel_des_orn)] - # gripper action - assert isinstance(self._controllers[f"gripper_{arm_name}"], MultiFingerGripperController), \ - f"Only MultiFingerGripperController is supported for gripper {arm_name}!" - action[self.gripper_action_idx[arm_name]] = teleop_data.grippers[hand] + arm_action = getattr(teleop_action, hand) + # arm action + assert \ + isinstance(self._controllers[f"arm_{arm_name}"], InverseKinematicsController) or \ + isinstance(self._controllers[f"arm_{arm_name}"], OperationalSpaceController), \ + f"Only IK and OSC controllers are supported for arm {arm_name}!" + target_pos, target_rpy = arm_action[:3], arm_action[3:6] + target_orn = T.quat2axisangle(T.euler2quat(target_rpy)) + action[self.arm_action_idx[arm_name]] = np.r_[target_pos, target_orn] + # gripper action + assert isinstance(self._controllers[f"gripper_{arm_name}"], MultiFingerGripperController), \ + f"Only MultiFingerGripperController is supported for gripper {arm_name}!" + action[self.gripper_action_idx[arm_name]] = arm_action[6] return action diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py index f15a34af7..f242d77b5 100644 --- a/omnigibson/robots/robot_base.py +++ b/omnigibson/robots/robot_base.py @@ -2,6 +2,8 @@ from copy import deepcopy import numpy as np import matplotlib.pyplot as plt +from real_tiago.user_interfaces.teleop_core import TeleopAction + from omnigibson.macros import create_module_macros from omnigibson.sensors import create_sensor, SENSOR_PRIMS_TO_SENSOR_CLS, ALL_SENSOR_MODALITIES, VisionSensor, ScanSensor from omnigibson.objects.usd_object import USDObject @@ -478,11 +480,11 @@ def reset_joint_pos_aabb_extent(self): """ return self._reset_joint_pos_aabb_extent - def teleop_data_to_action(self, teleop_data): + def teleop_data_to_action(self, teleop_action: TeleopAction): """ - Generate action data from teleoperation data + Generate action data from teleoperation action data Args: - teleop_data (TeleopData): teleoperation data + teleop_action (TeleopData): teleoperation action data Returns: np.ndarray: array of action data filled with update value """ diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index bd8c45249..f4b35ffa8 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -1,6 +1,8 @@ import os import numpy as np +from pxr import Gf +from real_tiago.user_interfaces.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy @@ -11,7 +13,6 @@ from omnigibson.robots.manipulation_robot import GraspingPoint, ManipulationRobot from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.utils.python_utils import assert_valid_key -from omnigibson.utils.teleop_utils import TeleopData from omnigibson.utils.usd_utils import JointType # Create settings for this module @@ -764,8 +765,7 @@ def get_angular_velocity(self) -> np.ndarray: def eef_usd_path(self): return {arm: os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_eef.usd") for arm in self.arm_names} - def teleop_data_to_action(self, teleop_data: TeleopData) -> np.ndarray: - action = ManipulationRobot.teleop_data_to_action(self, teleop_data) - # compute base movement (x, y, yaw) - action[self.base_action_idx] = teleop_data.transforms["base"][[0, 1, 3]] + def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + action = ManipulationRobot.teleop_data_to_action(self, teleop_action) + action[self.base_action_idx] = teleop_action.base return action diff --git a/omnigibson/robots/two_wheel_robot.py b/omnigibson/robots/two_wheel_robot.py index 8232e591e..95409627f 100644 --- a/omnigibson/robots/two_wheel_robot.py +++ b/omnigibson/robots/two_wheel_robot.py @@ -1,11 +1,11 @@ from abc import abstractmethod import gym import numpy as np +from real_tiago.user_interfaces.teleop_core import TeleopAction from omnigibson.controllers import DifferentialDriveController from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.utils.python_utils import classproperty -from omnigibson.utils.teleop_utils import TeleopData class TwoWheelRobot(LocomotionRobot): @@ -150,23 +150,18 @@ def _do_not_register_classes(cls): classes.add("TwoWheelRobot") return classes - def teleop_data_to_action(self, teleop_data: TeleopData) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ - Generate action data from teleoperation data + Generate action data from teleoperation action data NOTE: This implementation only supports DifferentialDriveController. Overwrite this function if the robot is using a different base controller. Args: - teleop_data (TeleopData): teleoperation data + teleop_action (TeleopAction): teleoperation action data Returns: np.ndarray: array of action data """ - action = super().teleop_data_to_action(teleop_data) + action = super().teleop_data_to_action(teleop_action) assert isinstance(self._controllers["base"], DifferentialDriveController), "Only DifferentialDriveController is supported!" - if teleop_data.robot_attached: - translation_offset = teleop_data.transforms["base"][0] - rotation_offset = teleop_data.transforms["base"][3] - else: - translation_offset, rotation_offset = 0, 0 - action[self.base_action_idx] = np.array([translation_offset, rotation_offset]) + action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[2]]) return action diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index c70e15df7..c1ca8173c 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -1,10 +1,6 @@ import numpy as np -import omni import time -from dataclasses import dataclass, field -from importlib import import_module -from threading import Thread -from typing import Any, Iterable, Optional, Tuple, Dict +from typing import Iterable, Optional, Tuple, Dict import omnigibson as og import omnigibson.lazy as lazy @@ -13,54 +9,26 @@ from omnigibson.objects import USDObject from omnigibson.robots.robot_base import BaseRobot +from real_tiago.user_interfaces.teleop_core import TeleopObservation +from real_tiago.user_interfaces.teleop_policy import TeleopPolicy m = create_module_macros(module_path=__file__) m.movement_speed = 0.2 # the speed of the robot base movement - -@dataclass -class TeleopData: - is_valid: Dict[str, bool] = field(default_factory=dict) - transforms: Dict[str, Any] = field(default_factory=dict) - grippers: Dict[str, float] = field(default_factory=dict) - reset: Dict[str, bool] = field(default_factory=dict) - robot_attached: bool = False - n_arms: int = 2 - - def __post_init__(self) -> None: - arms = ["left", "right"] if self.n_arms == 2 else ["right"] - self.is_valid = {arm: False for arm in arms} - self.transforms = {arm: (np.zeros(3), np.array([0, 0, 0, 1])) for arm in arms} - self.grippers = {arm: 0. for arm in arms} - self.reset = {arm: False for arm in arms} - - -class TeleopSystem: +class TeleopSystem(TeleopPolicy): """ - Base class for teleop systems + Base class for teleop policy """ - - def __init__(self, robot: BaseRobot, show_control_marker: bool = True, *args, **kwargs) -> None: + def __init__(self, config, robot: BaseRobot, show_control_marker: bool = True, *args, **kwargs) -> None: """ Initializes the VR system Args: robot (BaseRobot): the robot that will be controlled. show_control_marker (bool): whether to show a visual marker that indicates the target pose of the control. """ - # raw data directly obtained from the teleoperation device - self.raw_data = { - "transforms": {}, - "button_data": {}, - } + super().__init__(config) self.robot = robot self.robot_arms = ["left", "right"] if self.robot.n_arms == 2 else ["right"] - self.teleop_data = TeleopData(n_arms=self.robot.n_arms) - # device_origin stores the original pose of the teleop device (e.g. VR controller) upon calling reset_transform_mapping - # This is used to subtract with the current teleop device pose in each frame to get the delta pose - self.device_origin = {arm: T.mat2pose(np.eye(4)) for arm in self.robot_arms} - # robot_origin stores the relative pose of the robot eef w.r.t. to the robot base upon calling reset_transform_mapping - # This is used to apply the delta offset of the VR controller to get the final target robot eef pose (relative to the robot base) - self.robot_origin = {arm: T.mat2pose(np.eye(4)) for arm in self.robot_arms} # robot parameters self.robot_attached = False self.movement_speed = m.movement_speed @@ -73,53 +41,22 @@ def __init__(self, robot: BaseRobot, show_control_marker: bool = True, *args, ** visual_only=True) og.sim.import_object(self.control_markers[arm_name]) - def start(self) -> None: - """ - Starts the teleop system - """ - raise NotImplementedError - - def stop(self) -> None: - """ - Stops the teleop system - """ - raise NotImplementedError - - def update(self) -> None: - """ - Update self.teleop_data - NOTE: all tracking data should be in world frame - """ - raise NotImplementedError - - def teleop_data_to_action(self) -> np.ndarray: + def get_action(self) -> np.ndarray: """ Generate action data from VR input for robot teleoperation Returns: np.ndarray: array of action data """ + # construct robot observation + robot_obs = TeleopObservation() + # get teleop action + teleop_actions = super().get_action(robot_obs) # optionally update control marker if self.show_control_marker: - self.update_control_marker() - return self.robot.teleop_data_to_action(self.teleop_data) - - def update_control_marker(self) -> None: - """ - Update the target object's pose to the VR right controller's pose - """ - for arm_name in self.control_markers: - if arm_name in self.teleop_data.transforms: - self.control_markers[arm_name].set_position_orientation(*self.teleop_data.transforms[arm_name]) - - def reset_transform_mapping(self, arm: str = "right") -> None: - """ - Snap device to the robot end effector (ManipulationRobot only) - Args: - arm(str): name of the arm, one of "left" or "right". Default is "right". - """ - robot_base_pose = self.robot.get_position_orientation() - eef_pose = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position_orientation() - self.robot_origin[arm] = T.relative_pose_transform(*eef_pose, *robot_base_pose) + for arm_name in self.control_markers: + arm_pos, arm_orn = self.actions.arms[arm_name][:3], T.euler2quat(self.actions.arms[arm_name][3:6]) + self.control_markers[arm_name].set_position_orientation(arm_pos, arm_orn) + return self.robot.teleop_data_to_action(teleop_actions) class OVXRSystem(TeleopSystem): @@ -563,207 +500,3 @@ def reset_transform_mapping(self, arm: str = "right") -> None: super().reset_transform_mapping(arm) if arm in self.raw_data["transforms"]: self.device_origin[arm] = self.raw_data["transforms"][arm] - - -class SpaceMouseSystem(TeleopSystem): - def __init__(self, robot: BaseRobot, show_control_marker: bool = True, *args, **kwargs) -> None: - try: - self.pyspacemouse = import_module('pyspacemouse') - except ModuleNotFoundError: - raise ModuleNotFoundError("Please install pyspacemouse to use SpaceMouseSystem") - super().__init__(robot, show_control_marker) - - self.raw_data = None - self.data_thread = None - # robot is always attached to the space mouse system - self.teleop_data.robot_attached = True - # data is always valid for space mouse - for arm in self.robot_arms: - self.teleop_data.is_valid[arm] = True - self.delta_pose = {arm: [np.zeros(3), np.array([0, 0, 0, 1])] for arm in self.robot_arms} - # tracker of which robot part we are controlling - self.controllable_robot_parts = self.robot_arms.copy() - if "base" in self.robot.controllers: - self.controllable_robot_parts.append("base") - self.cur_control_idx = 0 - # we want to scale down the movement speed to have better control for arms - self.arm_speed_scaledown = 0.1 - - - def start(self) -> None: - """ - Start the space mouse connection and the data thread - """ - assert self.pyspacemouse.open( - button_callback=self._button_callback), "[SpaceMouseSys] Cannot connect to space mouse!" - for arm in self.robot_arms: - self.reset_transform_mapping(arm) - self.data_thread = Thread(target=self._update_internal_data, daemon=True) - self.data_thread.start() - - def stop(self) -> None: - """ - Stop the space mouse connection and the data thread - """ - self.data_thread.join() - self.pyspacemouse.close() - - def _update_internal_data(self) -> None: - """ - Thread that stores the the spacemouse input to self.raw_data - """ - while True: - self.raw_data = self.pyspacemouse.read() - - def _button_callback(self, _, buttons) -> None: - """ - Callback function for space mouse button press - """ - if buttons[0]: - # left button pressed, switch controlling part - self.cur_control_idx = (self.cur_control_idx + 1) % len(self.controllable_robot_parts) - print(f"Now controlling robot part {self.controllable_robot_parts[self.cur_control_idx]}") - elif buttons[1]: - # right button pressed, switch gripper open/close state if we are controlling one - if self.controllable_robot_parts[self.cur_control_idx] != "base": - arm = self.controllable_robot_parts[self.cur_control_idx] - self.teleop_data.grippers[arm] = (self.teleop_data.grippers[arm] + 1) % 2 - - def update(self) -> None: - """ - Steps the VR system and update self.teleop_data - """ - self.teleop_data.transforms["base"] = np.zeros(4) - if self.raw_data: - controlling_robot_part = self.controllable_robot_parts[self.cur_control_idx] - # update controlling part pose - if controlling_robot_part == "base": - self.teleop_data.transforms[controlling_robot_part][0] = self.raw_data.y * self.movement_speed - self.teleop_data.transforms[controlling_robot_part][1] = -self.raw_data.x * self.movement_speed - self.teleop_data.transforms[controlling_robot_part][2] = self.raw_data.z * self.movement_speed - self.teleop_data.transforms[controlling_robot_part][3] = -self.raw_data.yaw * self.movement_speed - else: - self.delta_pose[controlling_robot_part][0] += np.array( - [self.raw_data.y, -self.raw_data.x, self.raw_data.z] - ) * self.movement_speed * self.arm_speed_scaledown - self.delta_pose[controlling_robot_part][1] = T.quat_multiply( - T.euler2quat( - np.array([ - self.raw_data.roll, self.raw_data.pitch, -self.raw_data.yaw - ]) * self.movement_speed * self.arm_speed_scaledown - ), - self.delta_pose[controlling_robot_part][1] - ) - # additionally update eef pose (to ensure it's up to date relative to robot base pose) - robot_base_pose = self.robot.get_position_orientation() - for arm in self.robot_arms: - target_rel_pos = self.robot_origin[arm][0] + self.delta_pose[arm][0] - target_rel_orn = T.quat_multiply(self.delta_pose[arm][1], self.robot_origin[arm][1]) - self.teleop_data.transforms[arm] = T.pose_transform(*robot_base_pose, target_rel_pos, target_rel_orn) - - -class KeyboardSystem(TeleopSystem): - def __init__(self, robot: BaseRobot, show_control_marker: bool = True, *args, **kwargs) -> None: - super().__init__(robot, show_control_marker) - # robot is always attached to the keyboard system, gripper is open initially - self.teleop_data.robot_attached = True - # data is always valid for keyboard - for arm in self.robot_arms: - self.raw_data["transforms"][arm] = (np.zeros(3), np.zeros(3)) - self.teleop_data.is_valid[arm] = True - self.raw_data["transforms"]["base"] = np.zeros(4) - self.cur_control_idx = 0 - # we want to scale down the movement speed to have better control for arms - self.arm_speed_scaledown = 0.01 - self.delta_pose = {arm: [np.zeros(3), np.array([0, 0, 0, 1])] for arm in self.robot_arms} - - def start(self) -> None: - # start the keyboard subscriber - appwindow = omni.appwindow.get_default_app_window() - input_interface = lazy.carb.input.acquire_input_interface() - keyboard = appwindow.get_keyboard() - input_interface.subscribe_to_keyboard_events(keyboard, self._update_internal_data) - for arm in self.robot_arms: - self.reset_transform_mapping(arm) - - def stop(self) -> None: - pass - - def _update_internal_data(self, event) -> None: - hand = self.robot_arms[self.cur_control_idx] - if event.type == lazy.carb.input.KeyboardEventType.KEY_RELEASE: - for arm in self.robot_arms: - self.raw_data["transforms"][arm] = (np.zeros(3), np.zeros(3)) - self.raw_data["transforms"]["base"] = np.zeros(4) - if event.type == lazy.carb.input.KeyboardEventType.KEY_PRESS \ - or event.type == lazy.carb.input.KeyboardEventType.KEY_REPEAT: - key = event.input - # update gripper state - if key == lazy.carb.input.KeyboardInput.KEY_4: - self.teleop_data.grippers[hand] = (self.teleop_data.grippers[hand] + 1) % 2 - # update current finger positions - elif key == lazy.carb.input.KeyboardInput.W: - self.raw_data["transforms"][hand][0][0] = self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.S: - self.raw_data["transforms"][hand][0][0] = -self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.A: - self.raw_data["transforms"][hand][0][1] = self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.D: - self.raw_data["transforms"][hand][0][1] = -self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.E: - self.raw_data["transforms"][hand][0][2] = self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.Q: - self.raw_data["transforms"][hand][0][2] = -self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.C: - self.raw_data["transforms"][hand][1][0] = self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.X: - self.raw_data["transforms"][hand][1][0] = -self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.T: - self.raw_data["transforms"][hand][1][1] = self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.B: - self.raw_data["transforms"][hand][1][1] = -self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.Z: - self.raw_data["transforms"][hand][1][2] = self.movement_speed * self.arm_speed_scaledown - elif key == lazy.carb.input.KeyboardInput.V: - self.raw_data["transforms"][hand][1][2] = -self.movement_speed * self.arm_speed_scaledown - # rotate around hands to control - elif key == lazy.carb.input.KeyboardInput.KEY_1: - self.cur_control_idx = (self.cur_control_idx - 1) % len(self.robot_arms) - print(f"Now controlling robot part {self.robot_arms[self.cur_control_idx]}") - elif key == lazy.carb.input.KeyboardInput.KEY_2: - self.cur_control_idx = (self.cur_control_idx + 1) % len(self.robot_arms) - print(f"Now controlling robot part {self.robot_arms[self.cur_control_idx]}") - # update base positions - elif key == lazy.carb.input.KeyboardInput.I: - self.raw_data["transforms"]["base"][0] = self.movement_speed - elif key == lazy.carb.input.KeyboardInput.K: - self.raw_data["transforms"]["base"][0] = -self.movement_speed - elif key == lazy.carb.input.KeyboardInput.U: - self.raw_data["transforms"]["base"][1] = self.movement_speed - elif key == lazy.carb.input.KeyboardInput.O: - self.raw_data["transforms"]["base"][1] = -self.movement_speed - elif key == lazy.carb.input.KeyboardInput.P: - self.raw_data["transforms"]["base"][2] = self.movement_speed - elif key == lazy.carb.input.KeyboardInput.SEMICOLON: - self.raw_data["transforms"]["base"][2] = -self.movement_speed - elif key == lazy.carb.input.KeyboardInput.J: - self.raw_data["transforms"]["base"][3] = self.movement_speed - elif key == lazy.carb.input.KeyboardInput.L: - self.raw_data["transforms"]["base"][3] = -self.movement_speed - return True - - def update(self) -> None: - # update robot base pose - self.teleop_data.transforms["base"] = self.raw_data["transforms"]["base"] - # update robot arm poses - cur_arm = self.robot_arms[self.cur_control_idx] - self.delta_pose[cur_arm][0] += self.raw_data["transforms"][cur_arm][0] - self.delta_pose[cur_arm][1] = T.quat_multiply( - T.euler2quat(self.raw_data["transforms"][cur_arm][1]), - self.delta_pose[cur_arm][1] - ) - robot_base_pose = self.robot.get_position_orientation() - for arm in self.robot_arms: - target_rel_pos = self.robot_origin[arm][0] + self.delta_pose[arm][0] - target_rel_orn = T.quat_multiply(self.delta_pose[arm][1], self.robot_origin[arm][1]) - self.teleop_data.transforms[arm] = T.pose_transform(*robot_base_pose, target_rel_pos, target_rel_orn) From 6c42678ebd91e2a56c9c4bd4dc3a9661ac74c40e Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Wed, 17 Jan 2024 16:05:29 -0800 Subject: [PATCH 02/21] Update teleop system, include more methods --- .../teleoperation/robot_teleoperate_demo.py | 28 +++++++++++-------- omnigibson/robots/behavior_robot.py | 4 +-- omnigibson/robots/manipulation_robot.py | 14 +++++----- omnigibson/robots/robot_base.py | 4 +-- omnigibson/robots/tiago.py | 4 +-- omnigibson/robots/two_wheel_robot.py | 6 ++-- omnigibson/utils/teleop_utils.py | 17 ++++++++--- 7 files changed, 46 insertions(+), 31 deletions(-) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index c429c0478..c973d2b2b 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -1,20 +1,27 @@ """ Example script for using external devices to teleoperate a robot. """ +try: + from mediapipe import solutions +except ModuleNotFoundError: + pass + import omnigibson as og from omnigibson.utils.ui_utils import choose_from_options from omnigibson.utils.teleop_utils import TeleopSystem + from real_tiago.utils.general_utils import AttrDict +from real_tiago.utils.camera_utils import RealSenseCamera teleop_config = AttrDict( - arm_left_controller='keyboard', - arm_right_controller='keyboard', - base_controller='keyboard', - torso_controller='keyboard', + arm_left_controller='human_kpt', + arm_right_controller='human_kpt', + base_controller='human_kpt', + torso_controller='human_kpt', interface_kwargs=AttrDict( - vr={}, - human_kpt={}, + oculus={}, + human_kpt={"camera": RealSenseCamera()}, keyboard={}, spacemouse={} ) @@ -44,13 +51,13 @@ def main(): robot_cfg["controller_config"][f"arm_{arm}"] = { "name": "InverseKinematicsController", "mode": "pose_delta_ori", - "motor_type": "position" + "motor_type": "position", + "command_input_limits": None, } robot_cfg["controller_config"][f"gripper_{arm}"] = { "name": "MultiFingerGripperController", "command_input_limits": (0.0, 1.0), "mode": "smooth", - "inverted": True } object_cfg = [ { @@ -134,9 +141,8 @@ def main(): teleop_sys.start() # main simulation loop for _ in range(10000): - if og.sim.is_playing(): - action = teleop_sys.get_action() - env.step(action) + action = teleop_sys.get_action() + env.step(action) # Shut down the environment cleanly at the end teleop_sys.stop() env.close() diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index 5b3761e9e..1d3d01180 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -5,7 +5,7 @@ import os from scipy.spatial.transform import Rotation as R from typing import List, Tuple, Iterable -from real_tiago.user_interfaces.teleop_core import TeleopAction +from real_tiago.user_interfaces.teleop_core import TeleopData import omnigibson as og import omnigibson.lazy as lazy @@ -415,7 +415,7 @@ def update_hand_contact_info(self): self._part_is_in_contact[hand_name] = len(self.eef_links[hand_name].contact_list()) > 0 \ or np.any([len(finger.contact_list()) > 0 for finger in self.finger_links[hand_name]]) - def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: """ Generates an action for the BehaviorRobot to perform based on teleop action data dict. diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index cfc3f9f4d..70c6abcfb 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -4,7 +4,7 @@ import networkx as nx from omni.physx import get_physx_scene_query_interface from pxr import Gf -from real_tiago.user_interfaces.teleop_core import TeleopAction +from real_tiago.user_interfaces.teleop_core import TeleopData import omnigibson as og import omnigibson.lazy as lazy @@ -1476,13 +1476,13 @@ def teleop_rotation_offset(self): """ return {arm: np.array([0, 0, 0, 1]) for arm in self.arm_names} - def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: """ Generate action data from teleoperation action data NOTE: This implementation only supports IK/OSC controller for arm and MultiFingerGripperController for gripper. Overwrite this function if the robot is using a different controller. Args: - teleop_action (TeleopAction): teleoperation action data + teleop_action (TeleopData): teleoperation action data Returns: np.ndarray: array of action data for arm and gripper """ @@ -1490,17 +1490,17 @@ def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: hands = ["left", "right"] if self.n_arms == 2 else ["right"] for i, hand in enumerate(hands): arm_name = self.arm_names[i] - arm_action = getattr(teleop_action, hand) + arm_action = teleop_action[hand] # arm action assert \ isinstance(self._controllers[f"arm_{arm_name}"], InverseKinematicsController) or \ isinstance(self._controllers[f"arm_{arm_name}"], OperationalSpaceController), \ f"Only IK and OSC controllers are supported for arm {arm_name}!" - target_pos, target_rpy = arm_action[:3], arm_action[3:6] - target_orn = T.quat2axisangle(T.euler2quat(target_rpy)) + target_pos, target_orn = arm_action[:3], arm_action[3:7] + target_orn = T.quat2axisangle(arm_action[3:7]) action[self.arm_action_idx[arm_name]] = np.r_[target_pos, target_orn] # gripper action assert isinstance(self._controllers[f"gripper_{arm_name}"], MultiFingerGripperController), \ f"Only MultiFingerGripperController is supported for gripper {arm_name}!" - action[self.gripper_action_idx[arm_name]] = arm_action[6] + action[self.gripper_action_idx[arm_name]] = arm_action[7] return action diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py index f242d77b5..5d093f057 100644 --- a/omnigibson/robots/robot_base.py +++ b/omnigibson/robots/robot_base.py @@ -2,7 +2,7 @@ from copy import deepcopy import numpy as np import matplotlib.pyplot as plt -from real_tiago.user_interfaces.teleop_core import TeleopAction +from real_tiago.user_interfaces.teleop_core import TeleopData from omnigibson.macros import create_module_macros from omnigibson.sensors import create_sensor, SENSOR_PRIMS_TO_SENSOR_CLS, ALL_SENSOR_MODALITIES, VisionSensor, ScanSensor @@ -480,7 +480,7 @@ def reset_joint_pos_aabb_extent(self): """ return self._reset_joint_pos_aabb_extent - def teleop_data_to_action(self, teleop_action: TeleopAction): + def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: """ Generate action data from teleoperation action data Args: diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index f4b35ffa8..45da18e99 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -2,7 +2,7 @@ import numpy as np from pxr import Gf -from real_tiago.user_interfaces.teleop_core import TeleopAction +from real_tiago.user_interfaces.teleop_core import TeleopData import omnigibson as og import omnigibson.lazy as lazy @@ -765,7 +765,7 @@ def get_angular_velocity(self) -> np.ndarray: def eef_usd_path(self): return {arm: os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_eef.usd") for arm in self.arm_names} - def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) action[self.base_action_idx] = teleop_action.base return action diff --git a/omnigibson/robots/two_wheel_robot.py b/omnigibson/robots/two_wheel_robot.py index 95409627f..72be71684 100644 --- a/omnigibson/robots/two_wheel_robot.py +++ b/omnigibson/robots/two_wheel_robot.py @@ -1,7 +1,7 @@ from abc import abstractmethod import gym import numpy as np -from real_tiago.user_interfaces.teleop_core import TeleopAction +from real_tiago.user_interfaces.teleop_core import TeleopData from omnigibson.controllers import DifferentialDriveController from omnigibson.robots.locomotion_robot import LocomotionRobot @@ -150,7 +150,7 @@ def _do_not_register_classes(cls): classes.add("TwoWheelRobot") return classes - def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: """ Generate action data from teleoperation action data NOTE: This implementation only supports DifferentialDriveController. @@ -162,6 +162,6 @@ def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ action = super().teleop_data_to_action(teleop_action) assert isinstance(self._controllers["base"], DifferentialDriveController), "Only DifferentialDriveController is supported!" - action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[2]]) + action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[1]]) return action diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index c1ca8173c..8854d551b 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -9,7 +9,7 @@ from omnigibson.objects import USDObject from omnigibson.robots.robot_base import BaseRobot -from real_tiago.user_interfaces.teleop_core import TeleopObservation +from real_tiago.user_interfaces.teleop_core import TeleopData from real_tiago.user_interfaces.teleop_policy import TeleopPolicy m = create_module_macros(module_path=__file__) @@ -48,14 +48,23 @@ def get_action(self) -> np.ndarray: np.ndarray: array of action data """ # construct robot observation - robot_obs = TeleopObservation() + robot_obs = TeleopData() + base_pos, base_orn = self.robot.get_position_orientation() + robot_obs.base_delta_pose = np.r_[base_pos, base_orn] + for arm in self.robot_arms: + abs_cur_pos, abs_cur_orn = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position_orientation() + rel_cur_pos, rel_cur_orn = T.relative_pose_transform(abs_cur_pos, abs_cur_orn, base_pos, base_orn) + robot_obs[arm] = np.r_[rel_cur_pos, rel_cur_orn, [1]] # get teleop action teleop_actions = super().get_action(robot_obs) # optionally update control marker if self.show_control_marker: for arm_name in self.control_markers: - arm_pos, arm_orn = self.actions.arms[arm_name][:3], T.euler2quat(self.actions.arms[arm_name][3:6]) - self.control_markers[arm_name].set_position_orientation(arm_pos, arm_orn) + delta_pos, delta_orn = teleop_actions[arm_name][:3], teleop_actions[arm_name][3:7] + rel_target_pos = robot_obs[arm_name][:3] + delta_pos + rel_target_orn = T.quat_multiply(delta_orn, robot_obs[arm_name][3:7]) + target_pos, target_orn = T.pose_transform(base_pos, base_orn, rel_target_pos, rel_target_orn) + self.control_markers[arm_name].set_position_orientation(target_pos, target_orn) return self.robot.teleop_data_to_action(teleop_actions) From 284c303819575b974e484e5b06e3701d3ee8211f Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Wed, 17 Jan 2024 22:02:07 -0800 Subject: [PATCH 03/21] Fix human kpt base teleop --- .../teleoperation/robot_teleoperate_demo.py | 4 +- omnigibson/robots/two_wheel_robot.py | 2 +- omnigibson/utils/teleop_utils.py | 110 +----------------- 3 files changed, 4 insertions(+), 112 deletions(-) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index c973d2b2b..1a79b1814 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -36,7 +36,7 @@ def main(): robot_name = choose_from_options(options=ROBOTS, name="robot") # Create the config for generating the environment we want - env_cfg = {"action_timestep": 1 / 50., "physics_timestep": 1 / 200.} + env_cfg = {"action_timestep": 1 / 50., "physics_timestep": 1 / 300.} scene_cfg = {"type": "Scene"} # Add the robot we want to load robot_cfg = { @@ -51,7 +51,7 @@ def main(): robot_cfg["controller_config"][f"arm_{arm}"] = { "name": "InverseKinematicsController", "mode": "pose_delta_ori", - "motor_type": "position", + "kv": 5.0, "command_input_limits": None, } robot_cfg["controller_config"][f"gripper_{arm}"] = { diff --git a/omnigibson/robots/two_wheel_robot.py b/omnigibson/robots/two_wheel_robot.py index 72be71684..099f1235a 100644 --- a/omnigibson/robots/two_wheel_robot.py +++ b/omnigibson/robots/two_wheel_robot.py @@ -162,6 +162,6 @@ def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: """ action = super().teleop_data_to_action(teleop_action) assert isinstance(self._controllers["base"], DifferentialDriveController), "Only DifferentialDriveController is supported!" - action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[1]]) + action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[2]]) return action diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index 8854d551b..b22988c20 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -50,7 +50,7 @@ def get_action(self) -> np.ndarray: # construct robot observation robot_obs = TeleopData() base_pos, base_orn = self.robot.get_position_orientation() - robot_obs.base_delta_pose = np.r_[base_pos, base_orn] + robot_obs.base = np.r_[base_pos[:2], [T.quat2euler(base_orn)[2]]] for arm in self.robot_arms: abs_cur_pos, abs_cur_orn = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position_orientation() rel_cur_pos, rel_cur_orn = T.relative_pose_transform(abs_cur_pos, abs_cur_orn, base_pos, base_orn) @@ -401,111 +401,3 @@ def _update_hand_tracking_data(self, e) -> None: self.teleop_data.hand_data[hand] = joint_angles else: self.teleop_data.is_valid[hand] = False - - -class OculusReaderSystem(TeleopSystem): - """ - NOTE: The origin of the oculus system is the headset position. For orientation, x is right, y is up, z is back - """ - - def __init__(self, robot: BaseRobot, show_control_marker: bool = True, *args, **kwargs) -> None: - try: - import oculus_reader - except ModuleNotFoundError: - raise ModuleNotFoundError( - "[OculusReaderSys] Please install oculus_reader (https://github.com/rail-berkeley/oculus_reader) to use OculusReaderSystem" - ) - super().__init__(robot, show_control_marker) - # initialize oculus reader - self.oculus_reader = oculus_reader.OculusReader(run=False) - self.reset_button_pressed = False - - def oc2og(self, transform: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """ - Apply the orientation offset from the OculusReader coordinate system to the OmniGibson coordinate system - Args: - transform (np.ndarray): the transform matrix in the OculusReader coordinate system - Returns: - tuple(np.ndarray, np.ndarray): the position and orientation in the OmniGibson coordinate system - """ - return T.mat2pose( - T.pose2mat(([0, 0, 0], T.euler2quat([np.pi / 2, 0, np.pi / 2]))) @ transform @ T.pose2mat( - ([0, 0, 0], T.euler2quat([-np.pi / 2, np.pi / 2, 0]))) - ) - - def start(self) -> None: - """ - Start the oculus reader and the data thread - """ - self.oculus_reader.run() - self.data_thread = Thread(target=self._update_internal_data, daemon=True) - self.data_thread.start() - - def stop(self) -> None: - """ - Stop the oculus reader and the data thread - """ - self.data_thread.join() - self.oculus_reader.stop() - - def _update_internal_data(self, hz: float = 50.) -> None: - """ - Thread that updates the raw data at a given frenquency - Args: - hz (float): the frequency to update the raw data, default is 50. - """ - while True: - time.sleep(1 / hz) - transform, self.raw_data["button_data"] = self.oculus_reader.get_transformations_and_buttons() - for hand in ["left", "right"]: - if hand[0] in transform: - self.raw_data["transforms"][hand] = self.oc2og(transform[hand[0]]) - - def update(self) -> None: - """ - Steps the VR system and update self.teleop_data - """ - # generate teleop data - self.teleop_data.transforms["base"] = np.zeros(4) - robot_based_pose = self.robot.get_position_orientation() - # update transform data - for hand in self.robot_arms: - if hand in self.raw_data["transforms"]: - self.teleop_data.is_valid[hand] = True - delta_pos, delta_orn = T.relative_pose_transform(*self.raw_data["transforms"][hand], - *self.device_origin[hand]) - target_rel_pos = self.robot_origin[hand][0] + delta_pos - target_rel_orn = T.quat_multiply(delta_orn, self.robot_origin[hand][1]) - self.teleop_data.transforms[hand] = T.pose_transform(*robot_based_pose, target_rel_pos, - target_rel_orn) - if f"{hand}Trig" in self.raw_data["button_data"]: - self.teleop_data.grippers[hand] = self.raw_data["button_data"][f"{hand}Trig"][0] - else: - self.teleop_data.is_valid[hand] = False - # update button data - if "rightJS" in self.raw_data["button_data"]: - rightJS_data = self.raw_data["button_data"]["rightJS"] - self.teleop_data.transforms["base"][0] = rightJS_data[1] * self.movement_speed - self.teleop_data.transforms["base"][3] = -rightJS_data[0] * self.movement_speed - if "leftJS" in self.raw_data["button_data"]: - leftJS_data = self.raw_data["button_data"]["leftJS"] - self.teleop_data.transforms["base"][1] = -leftJS_data[0] * self.movement_speed - self.teleop_data.transforms["base"][2] = leftJS_data[1] * self.movement_speed - # update robot attachment info - if "rightGrip" in self.raw_data["button_data"] and self.raw_data["button_data"]["rightGrip"][0] >= 0.5: - if not self.reset_button_pressed: - self.reset_button_pressed = True - self.robot_attached = not self.robot_attached - else: - self.reset_button_pressed = False - self.teleop_data.robot_attached = self.robot_attached - - def reset_transform_mapping(self, arm: str = "right") -> None: - """ - Snap device to the robot end effector (ManipulationRobot only) - Args: - arm(str): name of the arm, one of "left" or "right". Default is "right". - """ - super().reset_transform_mapping(arm) - if arm in self.raw_data["transforms"]: - self.device_origin[arm] = self.raw_data["transforms"][arm] From 065dfcc4f9f2c1bf016e221c60a5c19f3a861ae0 Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Thu, 18 Jan 2024 19:52:16 -0800 Subject: [PATCH 04/21] Update robot teleoperation --- .../examples/teleoperation/robot_teleoperate_demo.py | 10 +++++----- omnigibson/robots/behavior_robot.py | 4 ++-- omnigibson/robots/manipulation_robot.py | 11 +++++------ omnigibson/robots/robot_base.py | 6 +++--- omnigibson/robots/tiago.py | 4 ++-- omnigibson/robots/two_wheel_robot.py | 4 ++-- omnigibson/utils/teleop_utils.py | 6 +++--- 7 files changed, 22 insertions(+), 23 deletions(-) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 1a79b1814..8af3e969c 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -14,14 +14,14 @@ from real_tiago.utils.camera_utils import RealSenseCamera teleop_config = AttrDict( - arm_left_controller='human_kpt', - arm_right_controller='human_kpt', - base_controller='human_kpt', - torso_controller='human_kpt', + arm_left_controller='oculus', + arm_right_controller='oculus', + base_controller='oculus', + torso_controller='oculus', interface_kwargs=AttrDict( oculus={}, - human_kpt={"camera": RealSenseCamera()}, + # human_kpt={"camera": RealSenseCamera()}, keyboard={}, spacemouse={} ) diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index 1d3d01180..5b3761e9e 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -5,7 +5,7 @@ import os from scipy.spatial.transform import Rotation as R from typing import List, Tuple, Iterable -from real_tiago.user_interfaces.teleop_core import TeleopData +from real_tiago.user_interfaces.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy @@ -415,7 +415,7 @@ def update_hand_contact_info(self): self._part_is_in_contact[hand_name] = len(self.eef_links[hand_name].contact_list()) > 0 \ or np.any([len(finger.contact_list()) > 0 for finger in self.finger_links[hand_name]]) - def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ Generates an action for the BehaviorRobot to perform based on teleop action data dict. diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index 70c6abcfb..f45ff388c 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -4,7 +4,7 @@ import networkx as nx from omni.physx import get_physx_scene_query_interface from pxr import Gf -from real_tiago.user_interfaces.teleop_core import TeleopData +from real_tiago.user_interfaces.teleop_core import TeleopAction, TeleopObservation import omnigibson as og import omnigibson.lazy as lazy @@ -1476,13 +1476,13 @@ def teleop_rotation_offset(self): """ return {arm: np.array([0, 0, 0, 1]) for arm in self.arm_names} - def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ Generate action data from teleoperation action data NOTE: This implementation only supports IK/OSC controller for arm and MultiFingerGripperController for gripper. Overwrite this function if the robot is using a different controller. Args: - teleop_action (TeleopData): teleoperation action data + teleop_action (TeleopAction): teleoperation action data Returns: np.ndarray: array of action data for arm and gripper """ @@ -1496,11 +1496,10 @@ def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: isinstance(self._controllers[f"arm_{arm_name}"], InverseKinematicsController) or \ isinstance(self._controllers[f"arm_{arm_name}"], OperationalSpaceController), \ f"Only IK and OSC controllers are supported for arm {arm_name}!" - target_pos, target_orn = arm_action[:3], arm_action[3:7] - target_orn = T.quat2axisangle(arm_action[3:7]) + target_pos, target_orn = arm_action[:3], T.quat2axisangle(T.euler2quat(arm_action[3:6])) action[self.arm_action_idx[arm_name]] = np.r_[target_pos, target_orn] # gripper action assert isinstance(self._controllers[f"gripper_{arm_name}"], MultiFingerGripperController), \ f"Only MultiFingerGripperController is supported for gripper {arm_name}!" - action[self.gripper_action_idx[arm_name]] = arm_action[7] + action[self.gripper_action_idx[arm_name]] = arm_action[6] return action diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py index 5d093f057..c5cdd3008 100644 --- a/omnigibson/robots/robot_base.py +++ b/omnigibson/robots/robot_base.py @@ -2,7 +2,7 @@ from copy import deepcopy import numpy as np import matplotlib.pyplot as plt -from real_tiago.user_interfaces.teleop_core import TeleopData +from real_tiago.user_interfaces.teleop_core import TeleopAction from omnigibson.macros import create_module_macros from omnigibson.sensors import create_sensor, SENSOR_PRIMS_TO_SENSOR_CLS, ALL_SENSOR_MODALITIES, VisionSensor, ScanSensor @@ -480,11 +480,11 @@ def reset_joint_pos_aabb_extent(self): """ return self._reset_joint_pos_aabb_extent - def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ Generate action data from teleoperation action data Args: - teleop_action (TeleopData): teleoperation action data + teleop_action (TeleopAction): teleoperation action data Returns: np.ndarray: array of action data filled with update value """ diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 45da18e99..f4b35ffa8 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -2,7 +2,7 @@ import numpy as np from pxr import Gf -from real_tiago.user_interfaces.teleop_core import TeleopData +from real_tiago.user_interfaces.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy @@ -765,7 +765,7 @@ def get_angular_velocity(self) -> np.ndarray: def eef_usd_path(self): return {arm: os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_eef.usd") for arm in self.arm_names} - def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) action[self.base_action_idx] = teleop_action.base return action diff --git a/omnigibson/robots/two_wheel_robot.py b/omnigibson/robots/two_wheel_robot.py index 099f1235a..931e7dcb7 100644 --- a/omnigibson/robots/two_wheel_robot.py +++ b/omnigibson/robots/two_wheel_robot.py @@ -1,7 +1,7 @@ from abc import abstractmethod import gym import numpy as np -from real_tiago.user_interfaces.teleop_core import TeleopData +from real_tiago.user_interfaces.teleop_core import TeleopAction from omnigibson.controllers import DifferentialDriveController from omnigibson.robots.locomotion_robot import LocomotionRobot @@ -150,7 +150,7 @@ def _do_not_register_classes(cls): classes.add("TwoWheelRobot") return classes - def teleop_data_to_action(self, teleop_action: TeleopData) -> np.ndarray: + def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ Generate action data from teleoperation action data NOTE: This implementation only supports DifferentialDriveController. diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index b22988c20..25edefbfa 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -9,7 +9,7 @@ from omnigibson.objects import USDObject from omnigibson.robots.robot_base import BaseRobot -from real_tiago.user_interfaces.teleop_core import TeleopData +from real_tiago.user_interfaces.teleop_core import TeleopObservation from real_tiago.user_interfaces.teleop_policy import TeleopPolicy m = create_module_macros(module_path=__file__) @@ -48,7 +48,7 @@ def get_action(self) -> np.ndarray: np.ndarray: array of action data """ # construct robot observation - robot_obs = TeleopData() + robot_obs = TeleopObservation() base_pos, base_orn = self.robot.get_position_orientation() robot_obs.base = np.r_[base_pos[:2], [T.quat2euler(base_orn)[2]]] for arm in self.robot_arms: @@ -60,7 +60,7 @@ def get_action(self) -> np.ndarray: # optionally update control marker if self.show_control_marker: for arm_name in self.control_markers: - delta_pos, delta_orn = teleop_actions[arm_name][:3], teleop_actions[arm_name][3:7] + delta_pos, delta_orn = teleop_actions[arm_name][:3], T.euler2quat(teleop_actions[arm_name][3:6]) rel_target_pos = robot_obs[arm_name][:3] + delta_pos rel_target_orn = T.quat_multiply(delta_orn, robot_obs[arm_name][3:7]) target_pos, target_orn = T.pose_transform(base_pos, base_orn, rel_target_pos, rel_target_orn) From 873637759247702d80a0a64dbcedfe6fdae69b65 Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Thu, 18 Jan 2024 20:47:33 -0800 Subject: [PATCH 05/21] Update base teleop speed --- .../examples/teleoperation/robot_teleoperate_demo.py | 8 ++++---- omnigibson/robots/tiago.py | 2 +- omnigibson/robots/two_wheel_robot.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 8af3e969c..6c8434dbd 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -14,10 +14,10 @@ from real_tiago.utils.camera_utils import RealSenseCamera teleop_config = AttrDict( - arm_left_controller='oculus', - arm_right_controller='oculus', - base_controller='oculus', - torso_controller='oculus', + arm_left_controller='keyboard', + arm_right_controller='keyboard', + base_controller='keyboard', + torso_controller='keyboard', interface_kwargs=AttrDict( oculus={}, diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index f4b35ffa8..c36d03b59 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -767,5 +767,5 @@ def eef_usd_path(self): def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) - action[self.base_action_idx] = teleop_action.base + action[self.base_action_idx] = teleop_action.base * 0.2 return action diff --git a/omnigibson/robots/two_wheel_robot.py b/omnigibson/robots/two_wheel_robot.py index 931e7dcb7..57fa379db 100644 --- a/omnigibson/robots/two_wheel_robot.py +++ b/omnigibson/robots/two_wheel_robot.py @@ -162,6 +162,6 @@ def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ action = super().teleop_data_to_action(teleop_action) assert isinstance(self._controllers["base"], DifferentialDriveController), "Only DifferentialDriveController is supported!" - action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[2]]) + action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[2]]) * 0.2 return action From 5ab634c5c5f2c82307a908e5e63c396128ca46a1 Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Fri, 19 Jan 2024 11:11:15 -0800 Subject: [PATCH 06/21] Fix import issues --- omnigibson/robots/manipulation_robot.py | 2 -- omnigibson/robots/tiago.py | 2 -- omnigibson/utils/teleop_utils.py | 9 +++++---- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index f45ff388c..a8f000ba2 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -2,8 +2,6 @@ from collections import namedtuple import numpy as np import networkx as nx -from omni.physx import get_physx_scene_query_interface -from pxr import Gf from real_tiago.user_interfaces.teleop_core import TeleopAction, TeleopObservation import omnigibson as og diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index c36d03b59..74090a2cf 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -1,7 +1,5 @@ import os - import numpy as np -from pxr import Gf from real_tiago.user_interfaces.teleop_core import TeleopAction import omnigibson as og diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index 25edefbfa..deeafa1bf 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -9,7 +9,7 @@ from omnigibson.objects import USDObject from omnigibson.robots.robot_base import BaseRobot -from real_tiago.user_interfaces.teleop_core import TeleopObservation +from real_tiago.user_interfaces.teleop_core import TeleopAction, TeleopObservation from real_tiago.user_interfaces.teleop_policy import TeleopPolicy m = create_module_macros(module_path=__file__) @@ -27,6 +27,7 @@ def __init__(self, config, robot: BaseRobot, show_control_marker: bool = True, * show_control_marker (bool): whether to show a visual marker that indicates the target pose of the control. """ super().__init__(config) + self.teleop_action: TeleopAction = TeleopAction() self.robot = robot self.robot_arms = ["left", "right"] if self.robot.n_arms == 2 else ["right"] # robot parameters @@ -56,16 +57,16 @@ def get_action(self) -> np.ndarray: rel_cur_pos, rel_cur_orn = T.relative_pose_transform(abs_cur_pos, abs_cur_orn, base_pos, base_orn) robot_obs[arm] = np.r_[rel_cur_pos, rel_cur_orn, [1]] # get teleop action - teleop_actions = super().get_action(robot_obs) + self.teleop_action = super().get_action(robot_obs) # optionally update control marker if self.show_control_marker: for arm_name in self.control_markers: - delta_pos, delta_orn = teleop_actions[arm_name][:3], T.euler2quat(teleop_actions[arm_name][3:6]) + delta_pos, delta_orn = self.teleop_action[arm_name][:3], T.euler2quat(self.teleop_action[arm_name][3:6]) rel_target_pos = robot_obs[arm_name][:3] + delta_pos rel_target_orn = T.quat_multiply(delta_orn, robot_obs[arm_name][3:7]) target_pos, target_orn = T.pose_transform(base_pos, base_orn, rel_target_pos, rel_target_orn) self.control_markers[arm_name].set_position_orientation(target_pos, target_orn) - return self.robot.teleop_data_to_action(teleop_actions) + return self.robot.teleop_data_to_action(self.teleop_action) class OVXRSystem(TeleopSystem): From 4fa3376a4f171acec4fb8eabfe161fd680962dac Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Tue, 23 Jan 2024 20:52:09 -0800 Subject: [PATCH 07/21] Expose robot observation in teleop sys --- omnigibson/utils/teleop_utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index deeafa1bf..d06c2b449 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -28,6 +28,7 @@ def __init__(self, config, robot: BaseRobot, show_control_marker: bool = True, * """ super().__init__(config) self.teleop_action: TeleopAction = TeleopAction() + self.robot_obs: TeleopObservation = TeleopObservation() self.robot = robot self.robot_arms = ["left", "right"] if self.robot.n_arms == 2 else ["right"] # robot parameters @@ -49,21 +50,21 @@ def get_action(self) -> np.ndarray: np.ndarray: array of action data """ # construct robot observation - robot_obs = TeleopObservation() + self.robot_obs = TeleopObservation() base_pos, base_orn = self.robot.get_position_orientation() - robot_obs.base = np.r_[base_pos[:2], [T.quat2euler(base_orn)[2]]] + self.robot_obs.base = np.r_[base_pos[:2], [T.quat2euler(base_orn)[2]]] for arm in self.robot_arms: abs_cur_pos, abs_cur_orn = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position_orientation() rel_cur_pos, rel_cur_orn = T.relative_pose_transform(abs_cur_pos, abs_cur_orn, base_pos, base_orn) - robot_obs[arm] = np.r_[rel_cur_pos, rel_cur_orn, [1]] + self.robot_obs[arm] = np.r_[rel_cur_pos, rel_cur_orn, [1]] # get teleop action - self.teleop_action = super().get_action(robot_obs) + self.teleop_action = super().get_action(self.robot_obs) # optionally update control marker if self.show_control_marker: for arm_name in self.control_markers: delta_pos, delta_orn = self.teleop_action[arm_name][:3], T.euler2quat(self.teleop_action[arm_name][3:6]) - rel_target_pos = robot_obs[arm_name][:3] + delta_pos - rel_target_orn = T.quat_multiply(delta_orn, robot_obs[arm_name][3:7]) + rel_target_pos = self.robot_obs[arm_name][:3] + delta_pos + rel_target_orn = T.quat_multiply(delta_orn, self.robot_obs[arm_name][3:7]) target_pos, target_orn = T.pose_transform(base_pos, base_orn, rel_target_pos, rel_target_orn) self.control_markers[arm_name].set_position_orientation(target_pos, target_orn) return self.robot.teleop_data_to_action(self.teleop_action) From 75e9120f0c9eb94c90edb4098e79f72f38741c51 Mon Sep 17 00:00:00 2001 From: "Wensi (Vince) Ai" Date: Wed, 24 Jan 2024 23:15:45 -0800 Subject: [PATCH 08/21] add gripper obs for teleop --- omnigibson/utils/teleop_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index d06c2b449..42a7c0061 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -56,7 +56,11 @@ def get_action(self) -> np.ndarray: for arm in self.robot_arms: abs_cur_pos, abs_cur_orn = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position_orientation() rel_cur_pos, rel_cur_orn = T.relative_pose_transform(abs_cur_pos, abs_cur_orn, base_pos, base_orn) - self.robot_obs[arm] = np.r_[rel_cur_pos, rel_cur_orn, [1]] + self.robot_obs[arm] = np.r_[ + rel_cur_pos, + rel_cur_orn, + np.mean(self.robot.get_joint_positions(normalized=True)[self.robot.gripper_control_idx[self.robot.arm_names[self.robot_arms.index(arm)]]]) + ] # get teleop action self.teleop_action = super().get_action(self.robot_obs) # optionally update control marker From 59fdf1bec86c2009150ad2c3a19e7dd2162ccf98 Mon Sep 17 00:00:00 2001 From: "Wensi (Vince) Ai" Date: Fri, 26 Jan 2024 10:12:44 -0800 Subject: [PATCH 09/21] Update teleoperation --- .../teleoperation/robot_teleoperate_demo.py | 13 ++++++------- omnigibson/robots/tiago.py | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 6c8434dbd..1f26435b9 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -11,13 +11,14 @@ from omnigibson.utils.teleop_utils import TeleopSystem from real_tiago.utils.general_utils import AttrDict -from real_tiago.utils.camera_utils import RealSenseCamera +# from real_tiago.utils.camera_utils import RealSenseCamera +TELEOP_METHOD = "oculus" teleop_config = AttrDict( - arm_left_controller='keyboard', - arm_right_controller='keyboard', - base_controller='keyboard', - torso_controller='keyboard', + arm_left_controller=TELEOP_METHOD, + arm_right_controller=TELEOP_METHOD, + base_controller=TELEOP_METHOD, + torso_controller=TELEOP_METHOD, interface_kwargs=AttrDict( oculus={}, @@ -50,8 +51,6 @@ def main(): for arm in arms: robot_cfg["controller_config"][f"arm_{arm}"] = { "name": "InverseKinematicsController", - "mode": "pose_delta_ori", - "kv": 5.0, "command_input_limits": None, } robot_cfg["controller_config"][f"gripper_{arm}"] = { diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 74090a2cf..84f9bc760 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -765,5 +765,5 @@ def eef_usd_path(self): def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) - action[self.base_action_idx] = teleop_action.base * 0.2 + action[self.base_action_idx] = teleop_action.base * 0.1 return action From a084129d6aa0680146fcc88a152bd59ded09ff34 Mon Sep 17 00:00:00 2001 From: "Wensi (Vince) Ai" Date: Sat, 27 Jan 2024 22:29:50 -0800 Subject: [PATCH 10/21] Add reset, refactor out robot observation --- .../teleoperation/robot_teleoperate_demo.py | 2 +- omnigibson/utils/teleop_utils.py | 36 +++++++++++++------ 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 1f26435b9..41e32c6b9 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -140,7 +140,7 @@ def main(): teleop_sys.start() # main simulation loop for _ in range(10000): - action = teleop_sys.get_action() + action = teleop_sys.get_action(teleop_sys.get_obs()) env.step(action) # Shut down the environment cleanly at the end teleop_sys.stop() diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index 42a7c0061..ae5f339b4 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -43,36 +43,52 @@ def __init__(self, config, robot: BaseRobot, show_control_marker: bool = True, * visual_only=True) og.sim.import_object(self.control_markers[arm_name]) - def get_action(self) -> np.ndarray: + def get_obs(self) -> TeleopObservation: """ - Generate action data from VR input for robot teleoperation + Retrieve observation data from robot Returns: - np.ndarray: array of action data + TeleopObservation: dataclass containing robot observations """ - # construct robot observation - self.robot_obs = TeleopObservation() + robot_obs = TeleopObservation() base_pos, base_orn = self.robot.get_position_orientation() - self.robot_obs.base = np.r_[base_pos[:2], [T.quat2euler(base_orn)[2]]] + robot_obs.base = np.r_[base_pos[:2], [T.quat2euler(base_orn)[2]]] for arm in self.robot_arms: abs_cur_pos, abs_cur_orn = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position_orientation() rel_cur_pos, rel_cur_orn = T.relative_pose_transform(abs_cur_pos, abs_cur_orn, base_pos, base_orn) - self.robot_obs[arm] = np.r_[ + robot_obs[arm] = np.r_[ rel_cur_pos, rel_cur_orn, np.mean(self.robot.get_joint_positions(normalized=True)[self.robot.gripper_control_idx[self.robot.arm_names[self.robot_arms.index(arm)]]]) ] + return robot_obs + + def get_action(self, robot_obs: TeleopObservation) -> np.ndarray: + """ + Generate action data from VR input for robot teleoperation + Args: + robot_obs (TeleopObservation): dataclass containing robot observations + Returns: + np.ndarray: array of action data + """ # get teleop action - self.teleop_action = super().get_action(self.robot_obs) + self.teleop_action = super().get_action(robot_obs) # optionally update control marker if self.show_control_marker: for arm_name in self.control_markers: delta_pos, delta_orn = self.teleop_action[arm_name][:3], T.euler2quat(self.teleop_action[arm_name][3:6]) - rel_target_pos = self.robot_obs[arm_name][:3] + delta_pos - rel_target_orn = T.quat_multiply(delta_orn, self.robot_obs[arm_name][3:7]) + rel_target_pos = robot_obs[arm_name][:3] + delta_pos + rel_target_orn = T.quat_multiply(delta_orn, robot_obs[arm_name][3:7]) target_pos, target_orn = T.pose_transform(base_pos, base_orn, rel_target_pos, rel_target_orn) self.control_markers[arm_name].set_position_orientation(target_pos, target_orn) return self.robot.teleop_data_to_action(self.teleop_action) + def reset(self) -> None: + """ + Reset the teleop policy + """ + self.teleop_action = TeleopAction() + self.robot_obs = TeleopObservation() + super().reset_state() class OVXRSystem(TeleopSystem): """ From b3a93cdc139938a90cc34342e1c78ba5443be119 Mon Sep 17 00:00:00 2001 From: "Wensi (Vince) Ai" Date: Thu, 22 Feb 2024 14:16:51 -0800 Subject: [PATCH 11/21] Update telemoma dependencies --- omnigibson/configs/fetch_behavior.yaml | 3 +- .../teleoperation/robot_teleoperate_demo.py | 67 ++++++++----------- omnigibson/robots/behavior_robot.py | 2 +- omnigibson/robots/manipulation_robot.py | 2 +- omnigibson/robots/robot_base.py | 2 +- omnigibson/robots/tiago.py | 2 +- omnigibson/robots/two_wheel_robot.py | 4 +- omnigibson/utils/teleop_utils.py | 46 +++++++++---- 8 files changed, 67 insertions(+), 61 deletions(-) diff --git a/omnigibson/configs/fetch_behavior.yaml b/omnigibson/configs/fetch_behavior.yaml index 63e2ab13d..517ad4325 100644 --- a/omnigibson/configs/fetch_behavior.yaml +++ b/omnigibson/configs/fetch_behavior.yaml @@ -47,7 +47,7 @@ robots: rigid_trunk: false default_trunk_offset: 0.365 default_arm_pose: diagonal30 - reset_joint_pos: tuck + default_reset_mode: tuck sensor_config: VisionSensor: sensor_kwargs: @@ -62,7 +62,6 @@ robots: name: DifferentialDriveController arm_0: name: InverseKinematicsController - kv: 2.0 gripper_0: name: MultiFingerGripperController mode: binary diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 41e32c6b9..1b75afade 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -10,32 +10,36 @@ from omnigibson.utils.ui_utils import choose_from_options from omnigibson.utils.teleop_utils import TeleopSystem -from real_tiago.utils.general_utils import AttrDict -# from real_tiago.utils.camera_utils import RealSenseCamera +from telemoma.utils.general_utils import AttrDict +from telemoma.utils.camera_utils import RealSenseCamera +from telemoma.configs.base_config import teleop_config -TELEOP_METHOD = "oculus" -teleop_config = AttrDict( - arm_left_controller=TELEOP_METHOD, - arm_right_controller=TELEOP_METHOD, - base_controller=TELEOP_METHOD, - torso_controller=TELEOP_METHOD, - - interface_kwargs=AttrDict( - oculus={}, - # human_kpt={"camera": RealSenseCamera()}, - keyboard={}, - spacemouse={} - ) -) ROBOTS = { "FrankaPanda": "Franka Emika Panda (default)", "Fetch": "Mobile robot with one arm", "Tiago": "Mobile robot with two arms", } - +TELEOP_METHOD = { + "keyboard": "Keyboard (default)", + "spacemouse": "SpaceMouse", + "oculus": "Oculus Quest", + "human_kpt": "Human Keypoints with Camera", +} def main(): robot_name = choose_from_options(options=ROBOTS, name="robot") + arm_teleop_method = choose_from_options(options=TELEOP_METHOD, name="robot arm teleop method") + if robot_name != "FrankaPanda": + base_teleop_method = choose_from_options(options=TELEOP_METHOD, name="robot base teleop method") + else: + base_teleop_method = "keyboard" # Dummy value since FrankaPanda does not have a base + # Generate teleop config + teleop_config.arm_left_controller = arm_teleop_method + teleop_config.arm_right_controller = arm_teleop_method + teleop_config.interface_kwargs[arm_teleop_method] = {} if arm_teleop_method != "human_kpt" else {"camera": RealSenseCamera()} + teleop_config.base_controller = base_teleop_method + teleop_config.interface_kwargs[base_teleop_method] = {} if base_teleop_method != "human_kpt" else {"camera": RealSenseCamera()} + # Create the config for generating the environment we want env_cfg = {"action_timestep": 1 / 50., "physics_timestep": 1 / 300.} scene_cfg = {"type": "Scene"} @@ -76,7 +80,7 @@ def main(): "category": "frail", "model": "zmjovr", "scale": [2, 2, 2], - "position": [0.6, -0.3, 0.5], + "position": [0.6, -0.35, 0.5], }, { "type": "DatasetObject", @@ -94,7 +98,7 @@ def main(): "category": "toy_figure", "model": "nncqfn", "scale": [0.75, 0.75, 0.75], - "position": [0.6, 0.1, 0.5], + "position": [0.6, 0.15, 0.5], }, { "type": "DatasetObject", @@ -103,26 +107,8 @@ def main(): "category": "toy_figure", "model": "eulekw", "scale": [0.25, 0.25, 0.25], - "position": [0.6, 0.2, 0.5], - }, - { - "type": "DatasetObject", - "prim_path": "/World/toy_figure4", - "name": "toy_figure4", - "category": "toy_figure", - "model": "yxiksm", - "scale": [0.25, 0.25, 0.25], "position": [0.6, 0.3, 0.5], - }, - { - "type": "DatasetObject", - "prim_path": "/World/toy_figure5", - "name": "toy_figure5", - "category": "toy_figure", - "model": "wvpqbf", - "scale": [0.75, 0.75, 0.75], - "position": [0.6, 0.4, 0.5], - }, + } ] cfg = dict(env=env_cfg, scene=scene_cfg, robots=[robot_cfg], objects=object_cfg) @@ -131,17 +117,18 @@ def main(): env.reset() # update viewer camera pose og.sim.viewer_camera.set_position_orientation([-0.22, 0.99, 1.09], [-0.14, 0.47, 0.84, -0.23]) - # Start teleoperation system robot = env.robots[0] # Initialize teleoperation system - teleop_sys = TeleopSystem(config=teleop_config, robot=robot, disable_display_output=True, align_anchor_to_robot_base=True) + teleop_sys = TeleopSystem(config=teleop_config, robot=robot, show_control_marker=True) teleop_sys.start() + # main simulation loop for _ in range(10000): action = teleop_sys.get_action(teleop_sys.get_obs()) env.step(action) + # Shut down the environment cleanly at the end teleop_sys.stop() env.close() diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index 5b3761e9e..b5b6c4ddd 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -5,7 +5,7 @@ import os from scipy.spatial.transform import Rotation as R from typing import List, Tuple, Iterable -from real_tiago.user_interfaces.teleop_core import TeleopAction +from telemoma.input_interface.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index a8f000ba2..582675ed0 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np import networkx as nx -from real_tiago.user_interfaces.teleop_core import TeleopAction, TeleopObservation +from telemoma.input_interface.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py index c5cdd3008..cb93395cf 100644 --- a/omnigibson/robots/robot_base.py +++ b/omnigibson/robots/robot_base.py @@ -2,7 +2,7 @@ from copy import deepcopy import numpy as np import matplotlib.pyplot as plt -from real_tiago.user_interfaces.teleop_core import TeleopAction +from telemoma.input_interface.teleop_core import TeleopAction from omnigibson.macros import create_module_macros from omnigibson.sensors import create_sensor, SENSOR_PRIMS_TO_SENSOR_CLS, ALL_SENSOR_MODALITIES, VisionSensor, ScanSensor diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 84f9bc760..fbf9de5cc 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -1,6 +1,6 @@ import os import numpy as np -from real_tiago.user_interfaces.teleop_core import TeleopAction +from telemoma.input_interface.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy diff --git a/omnigibson/robots/two_wheel_robot.py b/omnigibson/robots/two_wheel_robot.py index 57fa379db..493a1ef75 100644 --- a/omnigibson/robots/two_wheel_robot.py +++ b/omnigibson/robots/two_wheel_robot.py @@ -1,7 +1,7 @@ from abc import abstractmethod import gym import numpy as np -from real_tiago.user_interfaces.teleop_core import TeleopAction +from telemoma.input_interface.teleop_core import TeleopAction from omnigibson.controllers import DifferentialDriveController from omnigibson.robots.locomotion_robot import LocomotionRobot @@ -162,6 +162,6 @@ def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: """ action = super().teleop_data_to_action(teleop_action) assert isinstance(self._controllers["base"], DifferentialDriveController), "Only DifferentialDriveController is supported!" - action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[2]]) * 0.2 + action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[2]]) * 0.3 return action diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index ae5f339b4..33efadb68 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -1,6 +1,6 @@ import numpy as np import time -from typing import Iterable, Optional, Tuple, Dict +from typing import Iterable, Optional, Tuple import omnigibson as og import omnigibson.lazy as lazy @@ -9,8 +9,10 @@ from omnigibson.objects import USDObject from omnigibson.robots.robot_base import BaseRobot -from real_tiago.user_interfaces.teleop_core import TeleopAction, TeleopObservation -from real_tiago.user_interfaces.teleop_policy import TeleopPolicy +from telemoma.input_interface.teleop_core import TeleopAction, TeleopObservation +from telemoma.input_interface.teleop_policy import TeleopPolicy +from telemoma.utils.general_utils import AttrDict +from telemoma.configs.base_config import teleop_config m = create_module_macros(module_path=__file__) m.movement_speed = 0.2 # the speed of the robot base movement @@ -19,10 +21,11 @@ class TeleopSystem(TeleopPolicy): """ Base class for teleop policy """ - def __init__(self, config, robot: BaseRobot, show_control_marker: bool = True, *args, **kwargs) -> None: + def __init__(self, config: AttrDict, robot: BaseRobot, show_control_marker: bool = False) -> None: """ - Initializes the VR system + Initializes the Teleoperatin System Args: + config (AttrDict): configuration dictionary robot (BaseRobot): the robot that will be controlled. show_control_marker (bool): whether to show a visual marker that indicates the target pose of the control. """ @@ -52,13 +55,19 @@ def get_obs(self) -> TeleopObservation: robot_obs = TeleopObservation() base_pos, base_orn = self.robot.get_position_orientation() robot_obs.base = np.r_[base_pos[:2], [T.quat2euler(base_orn)[2]]] - for arm in self.robot_arms: + for i, arm in enumerate(self.robot_arms): abs_cur_pos, abs_cur_orn = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position_orientation() rel_cur_pos, rel_cur_orn = T.relative_pose_transform(abs_cur_pos, abs_cur_orn, base_pos, base_orn) + gripper_pos = np.mean( + self.robot.get_joint_positions(normalized=True)[self.robot.gripper_control_idx[self.robot.arm_names[i]]] + ) + # if we are grasping, we manually set the gripper position to be at most 0.5 + if self.robot.controllers[f"gripper_{self.robot.arm_names[i]}"].is_grasping(): + gripper_pos = min(gripper_pos, 0.5) robot_obs[arm] = np.r_[ rel_cur_pos, rel_cur_orn, - np.mean(self.robot.get_joint_positions(normalized=True)[self.robot.gripper_control_idx[self.robot.arm_names[self.robot_arms.index(arm)]]]) + gripper_pos ] return robot_obs @@ -78,6 +87,7 @@ def get_action(self, robot_obs: TeleopObservation) -> np.ndarray: delta_pos, delta_orn = self.teleop_action[arm_name][:3], T.euler2quat(self.teleop_action[arm_name][3:6]) rel_target_pos = robot_obs[arm_name][:3] + delta_pos rel_target_orn = T.quat_multiply(delta_orn, robot_obs[arm_name][3:7]) + base_pos, base_orn = self.robot.get_position_orientation() target_pos, target_orn = T.pose_transform(base_pos, base_orn, rel_target_pos, rel_target_orn) self.control_markers[arm_name].set_position_orientation(target_pos, target_orn) return self.robot.teleop_data_to_action(self.teleop_action) @@ -90,22 +100,21 @@ def reset(self) -> None: self.robot_obs = TeleopObservation() super().reset_state() + class OVXRSystem(TeleopSystem): """ - VR Teleoperation System build on top of Omniverse XR extension + VR Teleoperation System build on top of Omniverse XR extension and TeleMoMa's TeleopSystem """ def __init__( self, robot: BaseRobot, - show_control_marker: bool = True, + show_control_marker: bool = False, system: str = "SteamVR", disable_display_output: bool = False, enable_touchpad_movement: bool = False, align_anchor_to_robot_base: bool = False, use_hand_tracking: bool = False, - *args, - **kwargs ) -> None: """ Initializes the VR system @@ -126,7 +135,7 @@ def __init__( lazy.omni.isaac.core.utils.extensions.enable_extension("omni.kit.xr.profile.vr") self.xr_device_class = lazy.omni.kit.xr.core.XRDeviceClass # run super method - super().__init__(robot, show_control_marker) + super().__init__(teleop_config, robot, show_control_marker) # we want to further slow down the movement speed if we are using touchpad movement if enable_touchpad_movement: self.movement_speed *= 0.1 @@ -143,7 +152,7 @@ def __init__( self.vr_profile.set_avatar(lazy.omni.kit.xr.ui.stage.common.XRAvatarManager.get_singleton().create_avatar("basic_avatar", {})) else: self.vr_profile.set_avatar(lazy.omni.kit.xr.ui.stage.common.XRAvatarManager.get_singleton().create_avatar("empty_avatar", {})) - # # set anchor mode to be custom anchor + # set anchor mode to be custom anchor lazy.carb.settings.get_settings().set(self.vr_profile.get_scene_persistent_path() + "anchorMode", "scene origin") # set vr system lazy.carb.settings.get_settings().set(self.vr_profile.get_persistent_path() + "system/display", system) @@ -282,6 +291,17 @@ def update(self) -> None: robot_base_pos, robot_base_orn = self.robot.get_position_orientation() self.vr_profile.set_virtual_world_anchor_transform(self.og2xr(robot_base_pos, robot_base_orn[[0, 2, 1, 3]])) + def teleop_data_to_action(self) -> np.ndarray: + """ + Generate action data from VR input for robot teleoperation + Returns: + np.ndarray: array of action data + """ + # optionally update control marker + if self.show_control_marker: + self.update_control_marker() + return self.robot.teleop_data_to_action(self.teleop_data) + def reset_transform_mapping(self, arm: str = "right") -> None: """ Snap device to the robot end effector (ManipulationRobot only) From 52374a9dd1a7cf2730c3ce6acdc9160c82cc701e Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Mon, 4 Mar 2024 15:15:53 -0800 Subject: [PATCH 12/21] Update to telemoma --- omnigibson/examples/teleoperation/hand_tracking_demo.py | 3 +-- omnigibson/examples/teleoperation/robot_teleoperate_demo.py | 4 +--- omnigibson/examples/teleoperation/vr_simple_demo.py | 5 ++--- omnigibson/robots/behavior_robot.py | 2 +- omnigibson/robots/manipulation_robot.py | 2 +- omnigibson/robots/robot_base.py | 2 +- omnigibson/robots/tiago.py | 2 +- omnigibson/robots/two_wheel_robot.py | 2 +- omnigibson/utils/teleop_utils.py | 4 ++-- 9 files changed, 11 insertions(+), 15 deletions(-) diff --git a/omnigibson/examples/teleoperation/hand_tracking_demo.py b/omnigibson/examples/teleoperation/hand_tracking_demo.py index 8965bd607..6f02d7a05 100644 --- a/omnigibson/examples/teleoperation/hand_tracking_demo.py +++ b/omnigibson/examples/teleoperation/hand_tracking_demo.py @@ -9,7 +9,6 @@ def main(): # Create the config for generating the environment we want - env_cfg = {"action_timestep": 1 / 60., "physics_timestep": 1 / 120.} scene_cfg = {"type": "Scene"} robot0_cfg = { "type": "BehaviorRobot", @@ -57,7 +56,7 @@ def main(): "rgba": [0.0, 1.0, 0.0, 1.0], } for i in range(52)]) - cfg = dict(env=env_cfg, scene=scene_cfg, robots=[robot0_cfg], objects=object_cfg) + cfg = dict(scene=scene_cfg, robots=[robot0_cfg], objects=object_cfg) # Create the environment env = og.Environment(configs=cfg) env.reset() diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 1b75afade..1d210f60c 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -10,7 +10,6 @@ from omnigibson.utils.ui_utils import choose_from_options from omnigibson.utils.teleop_utils import TeleopSystem -from telemoma.utils.general_utils import AttrDict from telemoma.utils.camera_utils import RealSenseCamera from telemoma.configs.base_config import teleop_config @@ -41,7 +40,6 @@ def main(): teleop_config.interface_kwargs[base_teleop_method] = {} if base_teleop_method != "human_kpt" else {"camera": RealSenseCamera()} # Create the config for generating the environment we want - env_cfg = {"action_timestep": 1 / 50., "physics_timestep": 1 / 300.} scene_cfg = {"type": "Scene"} # Add the robot we want to load robot_cfg = { @@ -110,7 +108,7 @@ def main(): "position": [0.6, 0.3, 0.5], } ] - cfg = dict(env=env_cfg, scene=scene_cfg, robots=[robot_cfg], objects=object_cfg) + cfg = dict(scene=scene_cfg, robots=[robot_cfg], objects=object_cfg) # Create the environment env = og.Environment(configs=cfg) diff --git a/omnigibson/examples/teleoperation/vr_simple_demo.py b/omnigibson/examples/teleoperation/vr_simple_demo.py index 3f084db37..7abf37fcd 100644 --- a/omnigibson/examples/teleoperation/vr_simple_demo.py +++ b/omnigibson/examples/teleoperation/vr_simple_demo.py @@ -6,8 +6,7 @@ def main(): # Create the config for generating the environment we want - env_cfg = {"action_timestep": 1 / 60., "physics_timestep": 1 / 120.} - scene_cfg = {"type": "InteractiveTraversableScene", "scene_model": "Rs_int"} + scene_cfg = {"type": "Scene"}#, "scene_model": "Rs_int"} robot0_cfg = { "type": "BehaviorRobot", "controller_config": { @@ -15,7 +14,7 @@ def main(): "gripper_1": {"command_input_limits": "default"}, } } - cfg = dict(env=env_cfg, scene=scene_cfg, robots=[robot0_cfg]) + cfg = dict(scene=scene_cfg, robots=[robot0_cfg]) # Create the environment env = og.Environment(configs=cfg) diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index b5b6c4ddd..16bb0e4c0 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -5,7 +5,7 @@ import os from scipy.spatial.transform import Rotation as R from typing import List, Tuple, Iterable -from telemoma.input_interface.teleop_core import TeleopAction +from telemoma.human_interface.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index 582675ed0..f32476183 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np import networkx as nx -from telemoma.input_interface.teleop_core import TeleopAction +from telemoma.human_interface.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py index cb93395cf..f092a0ca0 100644 --- a/omnigibson/robots/robot_base.py +++ b/omnigibson/robots/robot_base.py @@ -2,7 +2,7 @@ from copy import deepcopy import numpy as np import matplotlib.pyplot as plt -from telemoma.input_interface.teleop_core import TeleopAction +from telemoma.human_interface.teleop_core import TeleopAction from omnigibson.macros import create_module_macros from omnigibson.sensors import create_sensor, SENSOR_PRIMS_TO_SENSOR_CLS, ALL_SENSOR_MODALITIES, VisionSensor, ScanSensor diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index fbf9de5cc..7e6e144b8 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -1,6 +1,6 @@ import os import numpy as np -from telemoma.input_interface.teleop_core import TeleopAction +from telemoma.human_interface.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy diff --git a/omnigibson/robots/two_wheel_robot.py b/omnigibson/robots/two_wheel_robot.py index 493a1ef75..0d05fb08c 100644 --- a/omnigibson/robots/two_wheel_robot.py +++ b/omnigibson/robots/two_wheel_robot.py @@ -1,7 +1,7 @@ from abc import abstractmethod import gym import numpy as np -from telemoma.input_interface.teleop_core import TeleopAction +from telemoma.human_interface.teleop_core import TeleopAction from omnigibson.controllers import DifferentialDriveController from omnigibson.robots.locomotion_robot import LocomotionRobot diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index 33efadb68..9205f19b8 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -9,8 +9,8 @@ from omnigibson.objects import USDObject from omnigibson.robots.robot_base import BaseRobot -from telemoma.input_interface.teleop_core import TeleopAction, TeleopObservation -from telemoma.input_interface.teleop_policy import TeleopPolicy +from telemoma.human_interface.teleop_core import TeleopAction, TeleopObservation +from telemoma.human_interface.teleop_policy import TeleopPolicy from telemoma.utils.general_utils import AttrDict from telemoma.configs.base_config import teleop_config From ea37bedd624c856ca18dfbe7946ecda66b9f1a52 Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Wed, 6 Mar 2024 17:52:15 -0800 Subject: [PATCH 13/21] Update VR teleoperation --- .../examples/robots/robot_control_example.py | 2 +- omnigibson/robots/behavior_robot.py | 76 ++++++++++------- omnigibson/utils/teleop_utils.py | 85 ++++++++++--------- 3 files changed, 89 insertions(+), 74 deletions(-) diff --git a/omnigibson/examples/robots/robot_control_example.py b/omnigibson/examples/robots/robot_control_example.py index e08667944..013b29769 100644 --- a/omnigibson/examples/robots/robot_control_example.py +++ b/omnigibson/examples/robots/robot_control_example.py @@ -24,7 +24,7 @@ # Don't use GPU dynamics and use flatcache for performance boost gm.USE_GPU_DYNAMICS = False -gm.ENABLE_FLATCACHE = True +gm.ENABLE_FLATCACHE = False def choose_controllers(robot, random_selection=False): diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index 16bb0e4c0..435ffe86e 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -21,9 +21,11 @@ m.COMPONENT_SUFFIXES = ['x', 'y', 'z', 'rx', 'ry', 'rz'] # Offset between the body and parts -m.HEAD_TO_BODY_OFFSET = ([0, 0, -0.4], [0, 0, 0, 1]) -m.RH_TO_BODY_OFFSET = ([0, 0.15, -0.4], T.euler2quat([0, 0, 0])) -m.LH_TO_BODY_OFFSET = ([0, -0.15, -0.4], T.euler2quat([0, 0, 0])) +m.HEAD_TO_BODY_OFFSET = [0, 0, -0.4] +m.HAND_TO_BODY_OFFSET = { + "left": [0, -0.15, -0.4], + "right": [0, 0.15, -0.4] +} m.BODY_HEIGHT_OFFSET = 0.45 # Hand parameters m.HAND_GHOST_HAND_APPEAR_THRESHOLD = 0.15 @@ -118,22 +120,16 @@ def __init__( # setup eef parts self.parts = OrderedDict() - self.parts["left"] = BRPart( - name="lh", parent=self, prim_path="lh_palm", eef_type="hand", - offset_to_body=m.LH_TO_BODY_OFFSET, **kwargs - ) - - self.parts["right"] = BRPart( - name="rh", parent=self, prim_path="rh_palm", eef_type="hand", - offset_to_body=m.RH_TO_BODY_OFFSET, **kwargs - ) - + for arm_name in self.arm_names: + self.parts[arm_name] = BRPart( + name=arm_name, parent=self, prim_path=f"{arm_name}_palm", eef_type="hand", + offset_to_body=m.HAND_TO_BODY_OFFSET[arm_name], **kwargs + ) self.parts["head"] = BRPart( name="head", parent=self, prim_path="eye", eef_type="head", offset_to_body=m.HEAD_TO_BODY_OFFSET, **kwargs ) - # private fields # whether to use ghost hands (visual markers to help visualize current vr hand pose) self._use_ghost_hands = use_ghost_hands # prim for the world_base_fixed_joint, used to reset the robot pose @@ -155,7 +151,7 @@ def n_arms(self): @property def arm_names(self): - return ["lh", "rh"] + return ["left", "right"] @property def eef_link_names(self): @@ -199,9 +195,8 @@ def finger_joint_names(self): @property def base_control_idx(self): - # TODO: might need to refactor out joints joints = list(self.joints.keys()) - return tuple(joints.index(joint) for joint in self.base_joint_names) + return [joints.index(joint) for joint in self.base_joint_names] @property def arm_control_idx(self): @@ -296,6 +291,26 @@ def _default_camera_joint_controller_config(self): "use_delta_commands": False, } + @property + def _default_gripper_joint_controller_configs(self): + """ + Returns: + dict: Dictionary mapping arm appendage name to default gripper joint controller config + to control this robot's gripper + """ + dic = {} + for arm in self.arm_names: + dic[arm] = { + "name": "JointController", + "control_freq": self._control_freq, + "motor_type": "position", + "control_limits": self.control_limits, + "dof_idx": self.gripper_control_idx[arm], + "command_input_limits": None, + "use_delta_commands": False, + } + return dic + @property def _default_controller_config(self): controllers = { @@ -349,7 +364,7 @@ def update_controller_mode(self): for joint_name in self.finger_joint_names[arm]: self.joints[joint_name].stiffness = m.FINGER_JOINT_STIFFNESS self.joints[joint_name].max_effort = m.FINGER_JOINT_MAX_EFFORT - + @property def base_footprint_link_name(self): """ @@ -379,7 +394,7 @@ def set_position_orientation(self, position=None, orientation=None): @property def assisted_grasp_start_points(self): - side_coefficients = {"lh": np.array([1, -1, 1]), "rh": np.array([1, 1, 1])} + side_coefficients = {"left": np.array([1, -1, 1]), "right": np.array([1, 1, 1])} return { arm: [ GraspingPoint(link_name=f"{arm}_{m.PALM_LINK_NAME}", position=m.PALM_BASE_POS), @@ -396,7 +411,7 @@ def assisted_grasp_start_points(self): @property def assisted_grasp_end_points(self): - side_coefficients = {"lh": np.array([1, -1, 1]), "rh": np.array([1, 1, 1])} + side_coefficients = {"left": np.array([1, -1, 1]), "right": np.array([1, 1, 1])} return { arm: [ GraspingPoint(link_name=f"{arm}_{finger}", position=m.FINGER_TIP_POS * side_coefficients[arm]) @@ -434,7 +449,7 @@ def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: action = np.zeros(self.action_dim) # Update body action space if teleop_action.is_valid["head"]: - head_pos, head_orn = teleop_action.transforms["head"] + head_pos, head_orn = teleop_action.head[:3], T.euler2quat(teleop_action.head[3:6]) des_body_pos = head_pos - np.array([0, 0, m.BODY_HEIGHT_OFFSET]) des_body_rpy = np.array([0, 0, R.from_quat(head_orn).as_euler("XYZ")[2]]) des_body_orn = T.euler2quat(des_body_rpy) @@ -447,11 +462,8 @@ def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: # Process local transform adjustments hand_data = 0 if teleop_action.is_valid[part_name]: - part_pos, part_orn = teleop_action.transforms[part_name] - # apply eef rotation offset to part transform first - des_world_part_pos = part_pos - des_world_part_orn = T.quat_multiply(part_orn, eef_part.offset_to_body[1]) - if eef_part.name in self.arm_names: + des_world_part_pos, des_world_part_orn = teleop_action[part_name][:3], T.euler2quat(teleop_action[part_name][3:6]) + if part_name in self.arm_names: # compute gripper action if hasattr(teleop_action, "hand_data"): # hand tracking mode, compute joint rotations for each independent hand joint @@ -459,8 +471,8 @@ def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: hand_data = hand_data[:, :2].T.reshape(-1) else: # controller mode, map trigger fraction from [0, 1] to [-1, 1] range. - hand_data = teleop_action.grippers[part_name] * 2 - 1 - action[self.controller_action_idx[f"gripper_{eef_part.name}"]] = hand_data + hand_data = teleop_action[part_name][6] * 2 - 1 + action[self.controller_action_idx[f"gripper_{part_name}"]] = hand_data # update ghost hand if necessary if self._use_ghost_hands: self.parts[part_name].update_ghost_hands(des_world_part_pos, des_world_part_orn) @@ -473,13 +485,13 @@ def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: ) # apply shoulder position offset to the part transform to get final destination pose des_local_part_pos, des_local_part_orn = T.pose_transform( - eef_part.offset_to_body[0], [0, 0, 0, 1], des_local_part_pos, des_local_part_orn + eef_part.offset_to_body, [0, 0, 0, 1], des_local_part_pos, des_local_part_orn ) des_part_rpy = R.from_quat(des_local_part_orn).as_euler("XYZ") - controller_name = "camera" if part_name == "head" else "arm_" + eef_part.name + controller_name = "camera" if part_name == "head" else "arm_" + part_name action[self.controller_action_idx[controller_name]] = np.r_[des_local_part_pos, des_part_rpy] # If we reset, teleop the robot parts to the desired pose - if eef_part.name in self.arm_names and teleop_action.reset[part_name]: + if part_name in self.arm_names and teleop_action.reset[part_name]: self.parts[part_name].set_position_orientation(des_local_part_pos, des_part_rpy) return action @@ -496,7 +508,7 @@ def __init__(self, name: str, parent: BehaviorRobot, prim_path: str, eef_type: s parent (BehaviorRobot): the parent BR object prim_path (str): prim path to the root link of the eef eef_type (str): type of eef. One of hand, head - offset_to_body (List[float]): relative rotation offset between the shoulder_rz link and the eef link. + offset_to_body (List[float]): relative POSITION offset between the rz link and the eef link. """ self.name = name self.parent = parent diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index 9205f19b8..e2cb19ad9 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -35,7 +35,6 @@ def __init__(self, config: AttrDict, robot: BaseRobot, show_control_marker: bool self.robot = robot self.robot_arms = ["left", "right"] if self.robot.n_arms == 2 else ["right"] # robot parameters - self.robot_attached = False self.movement_speed = m.movement_speed self.show_control_marker = show_control_marker self.control_markers = {} @@ -98,7 +97,8 @@ def reset(self) -> None: """ self.teleop_action = TeleopAction() self.robot_obs = TeleopObservation() - super().reset_state() + for interface in self.interfaces.values(): + interface.reset_state() class OVXRSystem(TeleopSystem): @@ -131,6 +131,7 @@ def __init__( NOTE: enable_touchpad_movement and align_anchor_to_robot_base cannot be enabled at the same time. The former is to enable free movement of the VR system (i.e. the user), while the latter is constraining the VR system to the robot pose. """ + self.raw_data = {} # enable xr extension lazy.omni.isaac.core.utils.extensions.enable_extension("omni.kit.xr.profile.vr") self.xr_device_class = lazy.omni.kit.xr.core.XRDeviceClass @@ -165,14 +166,14 @@ def __init__( self.hmd = None self.controllers = {} self.trackers = {} - self.reset_button_pressed = False self.xr2og_orn_offset = np.array([0.5, -0.5, -0.5, -0.5]) self.og2xr_orn_offset = np.array([-0.5, 0.5, 0.5, -0.5]) # setup event subscriptions + self.reset() self.use_hand_tracking = use_hand_tracking if use_hand_tracking: self.raw_data["hand_data"] = {} - self.teleop_data.hand_data = {} + self.teleop_action.hand_data = {} self._hand_tracking_subscription = self.xr_core.get_event_stream().create_subscription_to_pop_by_type( lazy.omni.kit.xr.core.XRCoreEventType.hand_joints, self._update_hand_tracking_data, name="hand tracking" ) @@ -203,6 +204,16 @@ def og2xr(self, pos: np.ndarray, orn: np.ndarray) -> np.ndarray: orn = T.quat_multiply(self.og2xr_orn_offset, orn) return T.pose2mat((pos, orn)).T.astype(np.float64) + def reset(self) -> None: + """ + Reset the teleop policy + """ + super().reset() + self.raw_data = {} + self.teleop_action.is_valid = {"left": False, "right": False, "head": False} + self.teleop_action.reset = {"left": False, "right": False} + self.teleop_action.head = np.zeros(6) + @property def is_enabled(self) -> bool: """ @@ -238,7 +249,7 @@ def stop(self) -> None: def update(self) -> None: """ - Steps the VR system and update self.teleop_data + Steps the VR system and update self.teleop_action """ # update raw data self._update_devices() @@ -246,47 +257,40 @@ def update(self) -> None: self._update_button_data() # Update teleop data based on controller input if not using hand tracking if not self.use_hand_tracking: - self.teleop_data.transforms["base"] = np.zeros(4) + self.teleop_action.base = np.zeros(3) + self.teleop_action.torso = 0.0 # update right hand related info - for arm in self.robot_arms: + for arm_name, arm in zip(["left", "right"], self.robot_arms): if arm in self.controllers: - self.teleop_data.transforms[arm] = ( + self.teleop_action[arm_name] = np.r_[ self.raw_data["transforms"]["controllers"][arm][0], - T.quat_multiply( + T.quat2euler(T.quat_multiply( self.raw_data["transforms"]["controllers"][arm][1], self.robot.teleop_rotation_offset[self.robot.arm_names[self.robot_arms.index(arm)]] - ) - ) - self.teleop_data.grippers[arm] = self.raw_data["button_data"][arm]["axis"]["trigger"] - self.teleop_data.is_valid[arm] = self._is_valid_transform(self.raw_data["transforms"]["controllers"][arm]) + )), + [self.raw_data["button_data"][arm]["axis"]["trigger"]] + ] + self.teleop_action.is_valid[arm_name] = self._is_valid_transform(self.raw_data["transforms"]["controllers"][arm]) else: - self.teleop_data.is_valid[arm] = False + self.teleop_action.is_valid[arm_name] = False # update base and reset info if "right" in self.controllers: - self.teleop_data.reset["right"] = self.raw_data["button_data"]["right"]["press"]["grip"] + self.teleop_action.reset["right"] = self.raw_data["button_data"]["right"]["press"]["grip"] right_axis = self.raw_data["button_data"]["right"]["axis"] - self.teleop_data.transforms["base"][0] = right_axis["touchpad_y"] * self.movement_speed - self.teleop_data.transforms["base"][3] = -right_axis["touchpad_x"] * self.movement_speed + self.teleop_action.base[0] = right_axis["touchpad_y"] * self.movement_speed + self.teleop_action.torso = -right_axis["touchpad_x"] * self.movement_speed if "left" in self.controllers: - self.teleop_data.reset["left"] = self.raw_data["button_data"]["left"]["press"]["grip"] + self.teleop_action.reset["left"] = self.raw_data["button_data"]["left"]["press"]["grip"] left_axis = self.raw_data["button_data"]["left"]["axis"] - self.teleop_data.transforms["base"][1] = -left_axis["touchpad_x"] * self.movement_speed - self.teleop_data.transforms["base"][2] = left_axis["touchpad_y"] * self.movement_speed + self.teleop_action.base[1] = -left_axis["touchpad_x"] * self.movement_speed + self.teleop_action.base[2] = left_axis["touchpad_y"] * self.movement_speed # update head related info - self.teleop_data.transforms["head"] = self.raw_data["transforms"]["head"] - self.teleop_data.is_valid["head"] = self._is_valid_transform(self.teleop_data.transforms["head"]) - # update robot reset and attachment info - if "right" in self.controllers and self.raw_data["button_data"]["right"]["press"]["grip"]: - if not self.reset_button_pressed: - self.reset_button_pressed = True - self.robot_attached = not self.robot_attached - else: - self.reset_button_pressed = False - self.teleop_data.robot_attached = self.robot_attached + self.teleop_action.head = np.r_[self.raw_data["transforms"]["head"][0], T.quat2euler(self.raw_data["transforms"]["head"][1])] + self.teleop_action.is_valid["head"] = self._is_valid_transform(self.raw_data["transforms"]["head"]) # Optionally move anchor if self.enable_touchpad_movement: # we use x, y from right controller for 2d movement and y from left controller for z movement - self._move_anchor(pos_offset=self.teleop_data.transforms["base"][[3, 0, 2]]) + self._move_anchor(pos_offset=np.r_[[self.teleop_action.torso], self.teleop_action.base[[0, 2]]]) if self.align_anchor_to_robot_base: robot_base_pos, robot_base_orn = self.robot.get_position_orientation() self.vr_profile.set_virtual_world_anchor_transform(self.og2xr(robot_base_pos, robot_base_orn[[0, 2, 1, 3]])) @@ -300,7 +304,7 @@ def teleop_data_to_action(self) -> np.ndarray: # optionally update control marker if self.show_control_marker: self.update_control_marker() - return self.robot.teleop_data_to_action(self.teleop_data) + return self.robot.teleop_data_to_action(self.teleop_action) def reset_transform_mapping(self, arm: str = "right") -> None: """ @@ -404,9 +408,9 @@ def _update_hand_tracking_data(self, e) -> None: """ e.consume() data_dict = e.payload - for hand in self.robot_arms: + for hand_name, hand in zip(["left, right"], self.robot_arms): if data_dict[f"joint_count_{hand}"] != 0: - self.teleop_data.is_valid[hand] = True + self.teleop_action.is_valid[hand_name] = True self.raw_data["hand_data"][hand] = {"pos": [], "orn": []} # hand_joint_matrices is an array of flattened 4x4 transform matrices for the 26 hand markers hand_joint_matrices = data_dict[f"joint_matrices_{hand}"] @@ -415,13 +419,14 @@ def _update_hand_tracking_data(self, e) -> None: pos, orn = self.xr2og(np.reshape(hand_joint_matrices[16 * i: 16 * (i + 1)], (4, 4))) self.raw_data["hand_data"][hand]["pos"].append(pos) self.raw_data["hand_data"][hand]["orn"].append(orn) - self.teleop_data.transforms[hand] = ( + self.teleop_action[hand_name] = np.r_[ self.raw_data["hand_data"][hand]["pos"][0], - T.quat_multiply( + T.quat2euler(T.quat_multiply( self.raw_data["hand_data"][hand]["orn"][0], self.robot.teleop_rotation_offset[self.robot.arm_names[self.robot_arms.index(hand)]] - ) - ) + )), + [0] + ] # Get each finger joint's rotation angle from hand tracking data # joint_angles is a 5 x 3 array of joint rotations (from thumb to pinky, from base to tip) joint_angles = np.zeros((5, 3)) @@ -440,6 +445,4 @@ def _update_hand_tracking_data(self, e) -> None: v1 /= np.linalg.norm(v1) v2 /= np.linalg.norm(v2) joint_angles[i, j] = np.arccos(v1 @ v2) - self.teleop_data.hand_data[hand] = joint_angles - else: - self.teleop_data.is_valid[hand] = False + self.teleop_action.hand_data[hand_name] = joint_angles From 0aac2b78f059ac75ca6651470b1b453dee2c8646 Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Thu, 7 Mar 2024 16:30:31 -0800 Subject: [PATCH 14/21] Fix behaviorrobot finger bug --- omnigibson/examples/teleoperation/vr_simple_demo.py | 6 +++--- omnigibson/robots/behavior_robot.py | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/omnigibson/examples/teleoperation/vr_simple_demo.py b/omnigibson/examples/teleoperation/vr_simple_demo.py index 7abf37fcd..5a79c7a45 100644 --- a/omnigibson/examples/teleoperation/vr_simple_demo.py +++ b/omnigibson/examples/teleoperation/vr_simple_demo.py @@ -6,12 +6,12 @@ def main(): # Create the config for generating the environment we want - scene_cfg = {"type": "Scene"}#, "scene_model": "Rs_int"} + scene_cfg = {"type": "InteractiveIndoorScene", "scene_model": "Rs_int"} robot0_cfg = { "type": "BehaviorRobot", "controller_config": { - "gripper_0": {"command_input_limits": "default"}, - "gripper_1": {"command_input_limits": "default"}, + "gripper_left": {"command_input_limits": "default"}, + "gripper_right": {"command_input_limits": "default"}, } } cfg = dict(scene=scene_cfg, robots=[robot0_cfg]) diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index 435ffe86e..8e2a6ae6a 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -48,6 +48,7 @@ m.ARM_JOINT_MAX_EFFORT = 300 m.FINGER_JOINT_STIFFNESS = 1e3 m.FINGER_JOINT_MAX_EFFORT = 50 +m.FINGER_JOINT_MAX_VELOCITY = np.pi * 4 class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot): @@ -364,6 +365,7 @@ def update_controller_mode(self): for joint_name in self.finger_joint_names[arm]: self.joints[joint_name].stiffness = m.FINGER_JOINT_STIFFNESS self.joints[joint_name].max_effort = m.FINGER_JOINT_MAX_EFFORT + self.joints[joint_name].max_velocity = m.FINGER_JOINT_MAX_VELOCITY @property def base_footprint_link_name(self): From e6540b90a31db3362b25ca25eb71f3ff54787da4 Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Fri, 8 Mar 2024 16:24:32 -0800 Subject: [PATCH 15/21] Update code --- omnigibson/examples/teleoperation/hand_tracking_demo.py | 4 ++-- omnigibson/examples/teleoperation/vr_simple_demo.py | 8 +++----- omnigibson/utils/teleop_utils.py | 2 +- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/omnigibson/examples/teleoperation/hand_tracking_demo.py b/omnigibson/examples/teleoperation/hand_tracking_demo.py index 6f02d7a05..df0207e4f 100644 --- a/omnigibson/examples/teleoperation/hand_tracking_demo.py +++ b/omnigibson/examples/teleoperation/hand_tracking_demo.py @@ -76,12 +76,12 @@ def main(): vrsys.update() if DEBUG_MODE: # update the 26 markers' position and orientation for each hand - if vrsys.teleop_data.is_valid["left"]: + if vrsys.teleop_action.is_valid["left"]: for i in range(26): pos = vrsys.raw_data["hand_data"]["left"]["pos"][i] orn = vrsys.raw_data["hand_data"]["left"]["orn"][i] markers[i].set_position_orientation(pos, orn) - if vrsys.teleop_data.is_valid["right"]: + if vrsys.teleop_action.is_valid["right"]: for i in range(26): pos = vrsys.raw_data["hand_data"]["right"]["pos"][i] orn = vrsys.raw_data["hand_data"]["right"]["orn"][i] diff --git a/omnigibson/examples/teleoperation/vr_simple_demo.py b/omnigibson/examples/teleoperation/vr_simple_demo.py index 5a79c7a45..30696de1a 100644 --- a/omnigibson/examples/teleoperation/vr_simple_demo.py +++ b/omnigibson/examples/teleoperation/vr_simple_demo.py @@ -6,9 +6,9 @@ def main(): # Create the config for generating the environment we want - scene_cfg = {"type": "InteractiveIndoorScene", "scene_model": "Rs_int"} + scene_cfg = {"type": "Scene"} #"InteractiveTraversableScene", "scene_model": "Rs_int"} robot0_cfg = { - "type": "BehaviorRobot", + "type": "Tiago", "controller_config": { "gripper_left": {"command_input_limits": "default"}, "gripper_right": {"command_input_limits": "default"}, @@ -20,9 +20,7 @@ def main(): env = og.Environment(configs=cfg) env.reset() # start vrsys - vrsys = OVXRSystem(robot=env.robots[0], show_control_marker=False, system="SteamVR", enable_touchpad_movement=True) - # We want a lower movement speed for controlling with VR headset - vrsys.base_movement_speed = 0.03 + vrsys = OVXRSystem(robot=env.robots[0], show_control_marker=False, system="SteamVR", align_anchor_to_robot_base=True) vrsys.start() # set headset position to be 1m above ground and facing +x vrsys.set_initial_transform(pos=[0, 0, 1], orn=[0, 0, 0, 1]) diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index e2cb19ad9..83b87ea65 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -139,7 +139,7 @@ def __init__( super().__init__(teleop_config, robot, show_control_marker) # we want to further slow down the movement speed if we are using touchpad movement if enable_touchpad_movement: - self.movement_speed *= 0.1 + self.movement_speed *= 0.3 # get xr core and profile self.xr_core = lazy.omni.kit.xr.core.XRCore.get_singleton() self.vr_profile = self.xr_core.get_profile("vr") From fb83e437af76b1ed3531297f5d9ad53a2a419d9a Mon Sep 17 00:00:00 2001 From: "Wensi (Vince) Ai" Date: Fri, 15 Mar 2024 13:25:08 -0700 Subject: [PATCH 16/21] Update robot control example --- omnigibson/examples/robots/robot_control_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omnigibson/examples/robots/robot_control_example.py b/omnigibson/examples/robots/robot_control_example.py index 013b29769..e08667944 100644 --- a/omnigibson/examples/robots/robot_control_example.py +++ b/omnigibson/examples/robots/robot_control_example.py @@ -24,7 +24,7 @@ # Don't use GPU dynamics and use flatcache for performance boost gm.USE_GPU_DYNAMICS = False -gm.ENABLE_FLATCACHE = False +gm.ENABLE_FLATCACHE = True def choose_controllers(robot, random_selection=False): From 01bfa09b01d8fa2ed3b78a0d9e0d74f7f4255122 Mon Sep 17 00:00:00 2001 From: "Wensi (Vince) Ai" Date: Fri, 15 Mar 2024 20:28:23 -0700 Subject: [PATCH 17/21] Add telemoma dependency --- .../examples/teleoperation/robot_teleoperate_demo.py | 10 ++++++---- setup.py | 3 ++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 1d210f60c..4b1333187 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -22,7 +22,7 @@ "keyboard": "Keyboard (default)", "spacemouse": "SpaceMouse", "oculus": "Oculus Quest", - "human_kpt": "Human Keypoints with Camera", + "vision": "Human Keypoints with Camera", } def main(): @@ -35,10 +35,12 @@ def main(): # Generate teleop config teleop_config.arm_left_controller = arm_teleop_method teleop_config.arm_right_controller = arm_teleop_method - teleop_config.interface_kwargs[arm_teleop_method] = {} if arm_teleop_method != "human_kpt" else {"camera": RealSenseCamera()} teleop_config.base_controller = base_teleop_method - teleop_config.interface_kwargs[base_teleop_method] = {} if base_teleop_method != "human_kpt" else {"camera": RealSenseCamera()} - + teleop_config.interface_kwargs["keyboard"] = {"arm_speed_scaledown": 0.04} + teleop_config.interface_kwargs["spacemouse"] = {"arm_speed_scaledown": 0.04} + if arm_teleop_method == "vision" or base_teleop_method == "vision": + teleop_config.interface_kwargs["vision"] = {"camera": RealSenseCamera()} + # Create the config for generating the environment we want scene_cfg = {"type": "Scene"} # Add the robot we want to load diff --git a/setup.py b/setup.py index 51ccc23c4..9ef751d9b 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,8 @@ "pymeshlab~=2022.2", "click~=8.1.3", "aenum~=3.1.15", - "rtree~=1.2.0" + "rtree~=1.2.0", + "telemoma~=0.1.1" ], tests_require=[], python_requires=">=3", From 29a9046f7d9b4546121b89b674aa7da2924473f2 Mon Sep 17 00:00:00 2001 From: Wensi Ai Date: Sat, 16 Mar 2024 03:34:18 -0700 Subject: [PATCH 18/21] Fix viewer camera bug --- omnigibson/envs/env_base.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/omnigibson/envs/env_base.py b/omnigibson/envs/env_base.py index 61db0dc0f..bd5d33581 100644 --- a/omnigibson/envs/env_base.py +++ b/omnigibson/envs/env_base.py @@ -15,6 +15,7 @@ from omnigibson.utils.ui_utils import create_module_logger from omnigibson.utils.python_utils import assert_valid_key, merge_nested_dicts, create_class_from_registry_and_config,\ Recreatable +from omnigibson.macros import gm # Create module logger @@ -202,8 +203,9 @@ def _load_scene(self): og.sim.import_scene(scene) # Set the rendering settings - og.sim.viewer_width = self.render_config["viewer_width"] - og.sim.viewer_height = self.render_config["viewer_height"] + if gm.RENDER_VIEWER_CAMERA: + og.sim.viewer_width = self.render_config["viewer_width"] + og.sim.viewer_height = self.render_config["viewer_height"] og.sim.device = self.device assert og.sim.is_stopped(), "Simulator must be stopped after loading scene!" From 9c6c79bd6624da22f8f292e84d6713170733bb88 Mon Sep 17 00:00:00 2001 From: "Wensi (Vince) Ai" Date: Fri, 15 Mar 2024 21:17:02 -0700 Subject: [PATCH 19/21] Update telemoma version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9ef751d9b..a0897d80e 100644 --- a/setup.py +++ b/setup.py @@ -45,7 +45,7 @@ "click~=8.1.3", "aenum~=3.1.15", "rtree~=1.2.0", - "telemoma~=0.1.1" + "telemoma~=0.1.2" ], tests_require=[], python_requires=">=3", From 5a8aae703f83d473906f90f4aa285950ee538543 Mon Sep 17 00:00:00 2001 From: "Wensi (Vince) Ai" Date: Fri, 15 Mar 2024 23:22:30 -0700 Subject: [PATCH 20/21] Address comments --- .../teleoperation/robot_teleoperate_demo.py | 13 ++---- omnigibson/robots/behavior_robot.py | 3 +- omnigibson/robots/manipulation_robot.py | 3 +- omnigibson/robots/robot_base.py | 3 +- omnigibson/robots/tiago.py | 4 +- omnigibson/robots/two_wheel_robot.py | 3 +- omnigibson/utils/teleop_utils.py | 17 ++++--- requirements-dev.txt | 3 +- setup.py | 1 - tests/test_robot_teleoperation.py | 44 +++++++------------ 10 files changed, 36 insertions(+), 58 deletions(-) diff --git a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py index 4b1333187..d96cbd06e 100644 --- a/omnigibson/examples/teleoperation/robot_teleoperate_demo.py +++ b/omnigibson/examples/teleoperation/robot_teleoperate_demo.py @@ -1,17 +1,8 @@ """ Example script for using external devices to teleoperate a robot. """ -try: - from mediapipe import solutions -except ModuleNotFoundError: - pass - import omnigibson as og from omnigibson.utils.ui_utils import choose_from_options -from omnigibson.utils.teleop_utils import TeleopSystem - -from telemoma.utils.camera_utils import RealSenseCamera -from telemoma.configs.base_config import teleop_config ROBOTS = { "FrankaPanda": "Franka Emika Panda (default)", @@ -26,6 +17,10 @@ } def main(): + from omnigibson.utils.teleop_utils import TeleopSystem + from telemoma.utils.camera_utils import RealSenseCamera + from telemoma.configs.base_config import teleop_config + robot_name = choose_from_options(options=ROBOTS, name="robot") arm_teleop_method = choose_from_options(options=TELEOP_METHOD, name="robot arm teleop method") if robot_name != "FrankaPanda": diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index 8e2a6ae6a..652cf70d4 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -5,7 +5,6 @@ import os from scipy.spatial.transform import Rotation as R from typing import List, Tuple, Iterable -from telemoma.human_interface.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy @@ -432,7 +431,7 @@ def update_hand_contact_info(self): self._part_is_in_contact[hand_name] = len(self.eef_links[hand_name].contact_list()) > 0 \ or np.any([len(finger.contact_list()) > 0 for finger in self.finger_links[hand_name]]) - def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + def teleop_data_to_action(self, teleop_action) -> np.ndarray: """ Generates an action for the BehaviorRobot to perform based on teleop action data dict. diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index f32476183..bbed13460 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -2,7 +2,6 @@ from collections import namedtuple import numpy as np import networkx as nx -from telemoma.human_interface.teleop_core import TeleopAction import omnigibson as og import omnigibson.lazy as lazy @@ -1474,7 +1473,7 @@ def teleop_rotation_offset(self): """ return {arm: np.array([0, 0, 0, 1]) for arm in self.arm_names} - def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + def teleop_data_to_action(self, teleop_action) -> np.ndarray: """ Generate action data from teleoperation action data NOTE: This implementation only supports IK/OSC controller for arm and MultiFingerGripperController for gripper. diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py index f092a0ca0..5e188f44a 100644 --- a/omnigibson/robots/robot_base.py +++ b/omnigibson/robots/robot_base.py @@ -2,7 +2,6 @@ from copy import deepcopy import numpy as np import matplotlib.pyplot as plt -from telemoma.human_interface.teleop_core import TeleopAction from omnigibson.macros import create_module_macros from omnigibson.sensors import create_sensor, SENSOR_PRIMS_TO_SENSOR_CLS, ALL_SENSOR_MODALITIES, VisionSensor, ScanSensor @@ -480,7 +479,7 @@ def reset_joint_pos_aabb_extent(self): """ return self._reset_joint_pos_aabb_extent - def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + def teleop_data_to_action(self, teleop_action) -> np.ndarray: """ Generate action data from teleoperation action data Args: diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 7e6e144b8..4cd1b03cb 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -1,7 +1,5 @@ import os import numpy as np -from telemoma.human_interface.teleop_core import TeleopAction - import omnigibson as og import omnigibson.lazy as lazy from omnigibson.macros import gm @@ -763,7 +761,7 @@ def get_angular_velocity(self) -> np.ndarray: def eef_usd_path(self): return {arm: os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_eef.usd") for arm in self.arm_names} - def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + def teleop_data_to_action(self, teleop_action) -> np.ndarray: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) action[self.base_action_idx] = teleop_action.base * 0.1 return action diff --git a/omnigibson/robots/two_wheel_robot.py b/omnigibson/robots/two_wheel_robot.py index 0d05fb08c..73bb7cb26 100644 --- a/omnigibson/robots/two_wheel_robot.py +++ b/omnigibson/robots/two_wheel_robot.py @@ -1,7 +1,6 @@ from abc import abstractmethod import gym import numpy as np -from telemoma.human_interface.teleop_core import TeleopAction from omnigibson.controllers import DifferentialDriveController from omnigibson.robots.locomotion_robot import LocomotionRobot @@ -150,7 +149,7 @@ def _do_not_register_classes(cls): classes.add("TwoWheelRobot") return classes - def teleop_data_to_action(self, teleop_action: TeleopAction) -> np.ndarray: + def teleop_data_to_action(self, teleop_action) -> np.ndarray: """ Generate action data from teleoperation action data NOTE: This implementation only supports DifferentialDriveController. diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index 83b87ea65..ffc4ecd15 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -9,10 +9,13 @@ from omnigibson.objects import USDObject from omnigibson.robots.robot_base import BaseRobot -from telemoma.human_interface.teleop_core import TeleopAction, TeleopObservation -from telemoma.human_interface.teleop_policy import TeleopPolicy -from telemoma.utils.general_utils import AttrDict -from telemoma.configs.base_config import teleop_config +try: + from telemoma.human_interface.teleop_core import TeleopAction, TeleopObservation + from telemoma.human_interface.teleop_policy import TeleopPolicy + from telemoma.utils.general_utils import AttrDict + from telemoma.configs.base_config import teleop_config +except ImportError as e: + raise e from ValueError("For teleoperation, install telemoma by running 'pip install telemoma'") m = create_module_macros(module_path=__file__) m.movement_speed = 0.2 # the speed of the robot base movement @@ -23,7 +26,7 @@ class TeleopSystem(TeleopPolicy): """ def __init__(self, config: AttrDict, robot: BaseRobot, show_control_marker: bool = False) -> None: """ - Initializes the Teleoperatin System + Initializes the Teleoperation System Args: config (AttrDict): configuration dictionary robot (BaseRobot): the robot that will be controlled. @@ -262,14 +265,14 @@ def update(self) -> None: # update right hand related info for arm_name, arm in zip(["left", "right"], self.robot_arms): if arm in self.controllers: - self.teleop_action[arm_name] = np.r_[ + self.teleop_action[arm_name] = np.concatenate(( self.raw_data["transforms"]["controllers"][arm][0], T.quat2euler(T.quat_multiply( self.raw_data["transforms"]["controllers"][arm][1], self.robot.teleop_rotation_offset[self.robot.arm_names[self.robot_arms.index(arm)]] )), [self.raw_data["button_data"][arm]["axis"]["trigger"]] - ] + )) self.teleop_action.is_valid[arm_name] = self._is_valid_transform(self.raw_data["transforms"]["controllers"][arm]) else: self.teleop_action.is_valid[arm_name] = False diff --git a/requirements-dev.txt b/requirements-dev.txt index fc18b391e..12c08c7ab 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -8,4 +8,5 @@ mkdocs-material-extensions mkdocstrings[python] mkdocs-section-index mkdocs-literate-nav -pynvml \ No newline at end of file +pynvml +telemoma~=0.1.2 diff --git a/setup.py b/setup.py index a0897d80e..2ddb032b1 100644 --- a/setup.py +++ b/setup.py @@ -45,7 +45,6 @@ "click~=8.1.3", "aenum~=3.1.15", "rtree~=1.2.0", - "telemoma~=0.1.2" ], tests_require=[], python_requires=">=3", diff --git a/tests/test_robot_teleoperation.py b/tests/test_robot_teleoperation.py index f7028dde2..5345ef071 100644 --- a/tests/test_robot_teleoperation.py +++ b/tests/test_robot_teleoperation.py @@ -1,11 +1,9 @@ -import pytest import omnigibson as og import numpy as np from omnigibson.macros import gm -from omnigibson.utils.teleop_utils import TeleopSystem -from omnigibson.utils.transform_utils import quat2euler, euler2quat +from telemoma.human_interface.teleop_core import TeleopAction +from omnigibson.utils.transform_utils import quat2euler -@pytest.mark.skip(reason="test is broken") def test_teleop(): cfg = { "env": {"action_timestep": 1 / 60., "physics_timestep": 1 / 120.}, @@ -17,8 +15,7 @@ def test_teleop(): "controller_config": { "arm_0": { "name": "InverseKinematicsController", - "mode": "pose_absolute_ori", - "motor_type": "position" + "command_input_limits": None, }, } } @@ -26,49 +23,38 @@ def test_teleop(): } # Make sure sim is stopped - og.sim.stop() + if og.sim is not None: + og.sim.stop() # Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache - gm.ENABLE_OBJECT_STATES = True - gm.USE_GPU_DYNAMICS = True + gm.USE_GPU_DYNAMICS = False gm.ENABLE_FLATCACHE = False # Create the environment env = og.Environment(configs=cfg) robot = env.robots[0] env.reset() - teleop_system = TeleopSystem(robot=robot, show_control_marker=False) + teleop_action = TeleopAction() start_base_pose = robot.get_position_orientation() start_eef_pose = robot.links[robot.eef_link_names[robot.default_arm]].get_position_orientation() # test moving robot arm - teleop_system.teleop_data.robot_attached = True - teleop_system.reset_transform_mapping() - target_eef_pos = start_eef_pose[0] + np.array([0.05, 0.05, 0]) - target_eef_orn = euler2quat(quat2euler(start_eef_pose[1]) + np.array([0.5, 0.5, 0])) - teleop_system.teleop_data.is_valid["right"] = True - teleop_system.teleop_data.transforms["base"] = np.zeros(4) - teleop_system.teleop_data.transforms["right"] = (target_eef_pos, target_eef_orn) + teleop_action.right = np.concatenate(([0.01], np.zeros(6))) for _ in range(50): - action = teleop_system.teleop_data_to_action() + action = robot.teleop_data_to_action(teleop_action) env.step(action) cur_eef_pose = robot.links[robot.eef_link_names[robot.default_arm]].get_position_orientation() - cur_base_pose = robot.get_position_orientation() - assert np.allclose(cur_base_pose[0], start_base_pose[0], atol=1e-2), "base pos not in starting place" - assert np.allclose(cur_base_pose[1], start_base_pose[1], atol=1e-2), "base orn not in starting place" - assert np.allclose(cur_eef_pose[0], target_eef_pos, atol=1e-2), "eef pos not in target place" - assert np.allclose(cur_eef_pose[1], target_eef_orn, atol=1e-2) or np.allclose(cur_eef_pose[1], -target_eef_orn, atol=1e-2), \ - "eef orn not in target place" + assert cur_eef_pose[0][0] - start_eef_pose[0][0] > 0.02, "Robot arm not moving forward" # test moving robot base - teleop_system.teleop_data.transforms["right"] = cur_eef_pose - teleop_system.teleop_data.transforms["base"] = np.array([0.02, 0, 0, 0.02]) + teleop_action.right = np.zeros(7) + teleop_action.base = np.array([0.1, 0, 0.1]) for _ in range(50): - action = teleop_system.teleop_data_to_action() + action = robot.teleop_data_to_action(teleop_action) env.step(action) cur_base_pose = robot.get_position_orientation() - assert cur_base_pose[0][0] > start_base_pose[0][0], "robot base not moving forward" - assert quat2euler(cur_base_pose[1])[2] > quat2euler(start_base_pose[1])[2], "robot base not rotating counter-clockwise" + assert cur_base_pose[0][0] - start_base_pose[0][0] > 0.02, "robot base not moving forward" + assert quat2euler(cur_base_pose[1])[2] - quat2euler(start_base_pose[1])[2] > 0.02, "robot base not rotating counter-clockwise" # Clear the sim og.sim.clear() From 086f74f4a4e8de30377dd9913fc92d4a3663fc6c Mon Sep 17 00:00:00 2001 From: "Wensi (Vince) Ai" Date: Sun, 17 Mar 2024 00:25:30 -0700 Subject: [PATCH 21/21] fix --- .../teleoperation/hand_tracking_demo.py | 97 ------------------- 1 file changed, 97 deletions(-) delete mode 100644 omnigibson/examples/teleoperation/hand_tracking_demo.py diff --git a/omnigibson/examples/teleoperation/hand_tracking_demo.py b/omnigibson/examples/teleoperation/hand_tracking_demo.py deleted file mode 100644 index df0207e4f..000000000 --- a/omnigibson/examples/teleoperation/hand_tracking_demo.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Example script for using hand tracking (OpenXR only) with dexterous hand. -You can set DEBUG_MODE to True to visualize the landmarks of the hands! -""" -import omnigibson as og -from omnigibson.utils.teleop_utils import OVXRSystem - -DEBUG_MODE = True # set to True to visualize the landmarks of the hands - -def main(): - # Create the config for generating the environment we want - scene_cfg = {"type": "Scene"} - robot0_cfg = { - "type": "BehaviorRobot", - "obs_modalities": ["rgb", "depth", "normal", "scan", "occupancy_grid"], - "action_normalize": False, - "grasping_mode": "assisted" - } - object_cfg = [ - { - "type": "DatasetObject", - "prim_path": "/World/breakfast_table", - "name": "breakfast_table", - "category": "breakfast_table", - "model": "kwmfdg", - "bounding_box": [2, 1, 0.4], - "position": [0.8, 0, 0.3], - "orientation": [0, 0, 0.707, 0.707], - }, - { - "type": "DatasetObject", - "prim_path": "/World/apple", - "name": "apple", - "category": "apple", - "model": "omzprq", - "position": [0.6, 0.1, 0.5], - }, - { - "type": "DatasetObject", - "prim_path": "/World/banana", - "name": "banana", - "category": "banana", - "model": "znakxm", - "position": [0.6, -0.1, 0.5], - }, - ] - if DEBUG_MODE: - # Add the marker to visualize hand tracking landmarks - object_cfg.extend([{ - "type": "PrimitiveObject", - "prim_path": f"/World/marker_{i}", - "name": f"marker_{i}", - "primitive_type": "Cube", - "size": 0.01, - "visual_only": True, - "rgba": [0.0, 1.0, 0.0, 1.0], - } for i in range(52)]) - - cfg = dict(scene=scene_cfg, robots=[robot0_cfg], objects=object_cfg) - # Create the environment - env = og.Environment(configs=cfg) - env.reset() - - if DEBUG_MODE: - markers = [env.scene.object_registry("name", f"marker_{i}") for i in range(52)] - - # Start vrsys - vrsys = OVXRSystem(robot=env.robots[0], show_control_marker=False, system="OpenXR", use_hand_tracking=True) - vrsys.start() - # set headset position to be 1m above ground and facing +x direction - vrsys.set_initial_transform(pos=[0, 0, 1], orn=[0, 0, 0, 1]) - - # main simulation loop - for _ in range(10000): - # update vr system - vrsys.update() - if DEBUG_MODE: - # update the 26 markers' position and orientation for each hand - if vrsys.teleop_action.is_valid["left"]: - for i in range(26): - pos = vrsys.raw_data["hand_data"]["left"]["pos"][i] - orn = vrsys.raw_data["hand_data"]["left"]["orn"][i] - markers[i].set_position_orientation(pos, orn) - if vrsys.teleop_action.is_valid["right"]: - for i in range(26): - pos = vrsys.raw_data["hand_data"]["right"]["pos"][i] - orn = vrsys.raw_data["hand_data"]["right"]["orn"][i] - markers[i + 26].set_position_orientation(pos, orn) - action = vrsys.teleop_data_to_action() - env.step(action) - - # Shut down the environment cleanly at the end - vrsys.stop() - env.close() - -if __name__ == "__main__": - main() \ No newline at end of file