Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions OmniGibson/omnigibson/learning/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,11 +392,11 @@ def _sigint_handler(self, signal_received, frame):
for episode in episodes:
if episode["episode_index"] // 1e4 == task_idx:
instances_to_run.append(str(int((episode["episode_index"] // 10) % 1e3)))
if config.eval_instance_ids:
assert set(config.eval_instance_ids).issubset(
set(range(m.NUM_TRAIN_INSTANCES))
), f"eval instance ids must be in range({m.NUM_TRAIN_INSTANCES})"
instances_to_run = [instances_to_run[i] for i in config.eval_instance_ids]
if config.eval_instance_ids:
assert set(config.eval_instance_ids).issubset(
set(range(m.NUM_TRAIN_INSTANCES))
), f"eval instance ids must be in range({m.NUM_TRAIN_INSTANCES})"
instances_to_run = [instances_to_run[i] for i in config.eval_instance_ids]
else:
instances_to_run = (
config.eval_instance_ids if config.eval_instance_ids is not None else set(range(m.NUM_EVAL_INSTANCES))
Expand Down
3 changes: 2 additions & 1 deletion OmniGibson/omnigibson/learning/wrappers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from .default_wrapper import DefaultWrapper
from .heavy_robot_wrapper import HeavyRobotWrapper
from .rgb_low_res_wrapper import RGBLowResWrapper
from .rich_obs_wrapper import RichObservationWrapper

__all__ = ["DefaultWrapper", "RGBLowResWrapper", "RichObservationWrapper"]
__all__ = ["DefaultWrapper", "HeavyRobotWrapper", "RGBLowResWrapper", "RichObservationWrapper"]
34 changes: 34 additions & 0 deletions OmniGibson/omnigibson/learning/wrappers/heavy_robot_wrapper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import omnigibson as og
from omnigibson.envs import EnvironmentWrapper, Environment
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.learning.utils.eval_utils import ROBOT_CAMERA_NAMES

# Create module logger
logger = create_module_logger("HeavyRobotWrapper")


class HeavyRobotWrapper(EnvironmentWrapper):
"""
Args:
env (og.Environment): The environment to wrap.
"""

def __init__(self, env: Environment):
super().__init__(env=env)
# Note that from eval.py we already set the robot to include rgb + depth + seg_instance_id modalities
# Here, we modify the robot observation to include only rgb modalities, and use 224 * 224 resolution
Copy link

Copilot AI Oct 20, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The comment states 'include only rgb modalities', but the code doesn't explicitly remove depth and seg_instance_id modalities mentioned in line 18. The comment should clarify that sensor resolution is being modified rather than modality filtering, or the code should explicitly set the modalities if that's the intent.

Copilot uses AI. Check for mistakes.
# For a complete list of available modalities, see VisionSensor.ALL_MODALITIES
# We also change the robot base mass to 250kg to match the configuration during data collection.
robot = env.robots[0]
with og.sim.stopped():
robot.base_footprint_link.mass = 250.0 # increase base mass to 250kg
# Update robot sensors:
for camera_id, camera_name in ROBOT_CAMERA_NAMES["R1Pro"].items():
Copy link

Copilot AI Oct 20, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The robot type 'R1Pro' is hardcoded. Consider using robot.name or similar dynamic approach to support different robot types, or add validation that the robot is indeed an R1Pro.

Suggested change
for camera_id, camera_name in ROBOT_CAMERA_NAMES["R1Pro"].items():
robot_type = getattr(robot, "name", None)
if robot_type not in ROBOT_CAMERA_NAMES:
raise ValueError(f"Robot type '{robot_type}' not found in ROBOT_CAMERA_NAMES. Available types: {list(ROBOT_CAMERA_NAMES.keys())}")
for camera_id, camera_name in ROBOT_CAMERA_NAMES[robot_type].items():

Copilot uses AI. Check for mistakes.
sensor_name = camera_name.split("::")[1]
if camera_id == "head":
robot.sensors[sensor_name].horizontal_aperture = 40.0 # this is what we used in data collection
robot.sensors[sensor_name].image_height = 224
robot.sensors[sensor_name].image_width = 224
# reload observation space
env.load_observation_space()
logger.info("Reloaded observation space!")
3 changes: 2 additions & 1 deletion docs/challenge/evaluation.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,9 @@ Here is a brief explanation of the arguments:
```
which is a barebone wrapper that does not provide anything beyond low resolution rgb and proprioception info. There are three example wrappers under `omnigibson.learning.wrappers`:

- `RGBLowResWrapper`: only use rgb as visual observation and camera resolutions of 224 * 224. Only using low-res RGB can help speed up the simulator and thus reduce evaluation time compared to the two other example wrappers. This wrapper is ok to use in standard track.
- `RGBLowResWrapper`: only use rgb as visual observation and camera resolutions of 224 * 224. Only using low-res RGB can help speed up the simulator and thus reduce evaluation time compared to the two other example wrappers. This wrapper is ok to use in both standard track and priveleged track.
- `DefaultWrapper`: wrapper with the default observation config used during data collection (rgb + depth + segmentation, 720p for head camera and 480p for wrist camera). This wrapper is ok to use in standard track, but evaluation will be considerably slower compared to `RGBLowResWrapper`.
- `HeavyRobotWrapper`: wrapper with the RGB low resolution observation config, and heavy robot base mass (250kg) used during data collection. This wrapper is ok to use in both standard track and privileged track.
- `RichObservationWrapper`: this will load additional observation modalities, such as normal and flow, as well as privileged task information. This wrapper can only be used in privileged information track.

There are some more optional arguments, see [base_config.yaml](https://github.com/StanfordVL/BEHAVIOR-1K/blob/main/OmniGibson/omnigibson/learning/configs/base_config.yaml).
Expand Down
Loading