Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
d682199
Add Fan Wang's BPP Agent
kaikwan Jul 13, 2025
eb93b0c
Update README.md to include heuristic-scoring based packing agent det…
kaikwan Jul 13, 2025
3bf98d2
Optimize object properties calculation by caching mesh properties to …
kaikwan Jul 14, 2025
edcabf7
Close-loop implementation of BPP
kaikwan Jul 15, 2025
8e5133d
Enhance BPP agent with unpackable object handling and tote ejection l…
kaikwan Jul 15, 2025
279c279
Enhance pack environment configuration by dynamically loading USD pat…
kaikwan Jul 15, 2025
345761e
Fix wrong indexing for removing objects from unpackable list during e…
kaikwan Jul 15, 2025
b97a297
Add stability checker for checking place pose candidates + Fixed unpa…
kaikwan Jul 17, 2025
4a814ec
Refactor bpp utils for multiprocessing and enhance statistics logging
kaikwan Jul 21, 2025
dd11a68
Heightmap multiprocessing
kaikwan Jul 21, 2025
735b66f
Add seed argument to command line parser and configure environment seed
kaikwan Jul 21, 2025
1aabbf7
Fix container not getting updated
kaikwan Jul 21, 2025
29520e1
Add results parsing scripts and fixed object volume and GCU calculati…
kaikwan Jul 22, 2025
9161ce5
Refactor imports and clean up whitespace in various scripts
kaikwan Jul 22, 2025
f4ea308
Fix summary and cummulative stats to same dir
kaikwan Jul 22, 2025
0dd38cf
Add comment to clarify asset position adjustment in bounding box calc…
kaikwan Jul 22, 2025
7984f1c
Update README.md
kaikwan Jul 22, 2025
cfd0ca2
Refactor tote manager and statistics handling: remove unnecessary clo…
kaikwan Jul 22, 2025
95f38cc
Merge branch 'henri/baselines' of https://github.com/personalrobotics…
kaikwan Jul 22, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -69,4 +69,4 @@ datasets
tests/

# Statistics
stats/
stats/
14 changes: 13 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,20 @@

### Key Features

#### Heuristic-scoring based search packing agent
Packing agent based on *"Stable bin packing of non-convex 3D objects with a robot manipulator"* by Fan Wang and Kris Hauser.
[arXiv:1812.04093](https://arxiv.org/abs/1812.04093)

Notes: Uses multiprocessing for placement calculation, recommended `--num_envs 100`
Defaults to DBLF heuristic

**Command:**
```bash
python scripts/fanwang_bpp/fanwang_bpp_agent.py --task=Isaac-Pack-NoArm-v0 --num_envs 100
```

#### Pack Task
Packing gym environment for the Amazon task.
Demo for Amazon Packing Task

**Command:**
```bash
Expand Down
2 changes: 1 addition & 1 deletion gcu_objects
373 changes: 373 additions & 0 deletions scripts/fanwang_bpp/bpp_utils.py

Large diffs are not rendered by default.

174 changes: 174 additions & 0 deletions scripts/fanwang_bpp/fanwang_bpp_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

"""Script to run an environment with a 3D Bin Packing agent based on the paper:
Stable bin packing of non-convex 3D objects with a robot manipulator
Fan Wang, Kris Hauser
https://arxiv.org/abs/1812.04093
"""

"""Launch Isaac Sim Simulator first."""

import argparse

from isaaclab.app import AppLauncher

# add argparse arguments
parser = argparse.ArgumentParser(description="Zero agent for Isaac Lab environments.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--exp_name", type=str, default="test_placement", help="Name of the experiment.")
parser.add_argument("--seed", type=int, default=0, help="Seed used for the environment")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()

# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app

"""Rest everything follows."""

import os
from datetime import datetime

import bpp_utils
import gymnasium as gym
import isaaclab.utils.math as math_utils
import isaaclab_tasks # noqa: F401
import torch
import tote_consolidation.tasks # noqa: F401
from isaaclab_tasks.utils import parse_env_cfg

# PLACEHOLDER: Extension template (do not remove this comment)


def convert_transform_to_action_tensor(transforms, obj_indicies, device):
"""Convert a transform object to an action tensor format.

Args:
transforms: Transform object with position and attitude (orientation)
obj_indicies: Index of the object to place
device: The device to create tensors on

Returns:
A tensor representing the object index and transform in the format
expected by the action space [obj_idx, pos_x, pos_y, pos_z, quat_w, quat_x, quat_y, quat_z]
"""
# Get batch size from the transforms
batch_size = len(transforms)
action_tensor = torch.zeros((batch_size, 8), device=device)

# Extract position and attitude values in batch
positions = torch.tensor([[t.position.x, t.position.y, t.position.z] for t in transforms], device=device) / 100.0

# Convert Euler angles to radians (vectorized)
roll_rad = torch.tensor([t.attitude.roll for t in transforms], device=device) * torch.pi / 180.0
pitch_rad = torch.tensor([t.attitude.pitch for t in transforms], device=device) * torch.pi / 180.0
yaw_rad = torch.tensor([t.attitude.yaw for t in transforms], device=device) * torch.pi / 180.0

# Convert Euler angles to quaternions (vectorized)
quats = math_utils.quat_from_euler_xyz(roll_rad.unsqueeze(1), pitch_rad.unsqueeze(1), yaw_rad.unsqueeze(1)).squeeze(
1
)

# Build action tensor
action_tensor[:, 0] = obj_indicies
action_tensor[:, 1:4] = positions
action_tensor[:, 4:8] = quats

return action_tensor


def main():
"""Zero actions agent with Isaac Lab environment."""
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
env_cfg.seed = args_cli.seed
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)

# print info (this is vectorized environment)
print(f"[INFO]: Gym observation space: {env.observation_space}")
print(f"[INFO]: Gym action space: {env.action_space}")
# reset environment
env.reset()

obj_idx = torch.zeros(
args_cli.num_envs, device=env.unwrapped.device, dtype=torch.int32
) # Track object indices per environment
tote_manager = env.unwrapped.tote_manager
num_obj_per_env = tote_manager.num_objects
num_totes = len([key for key in env.unwrapped.scene.keys() if key.startswith("tote")])

env_indices = torch.arange(args_cli.num_envs, device=env.unwrapped.device) # Indices of all environments

exp_log_interval = 1 # Log stats every 50 steps

step_count = 0

bpp = bpp_utils.BPP(tote_manager, args_cli.num_envs, torch.arange(num_obj_per_env, device=env.unwrapped.device))

while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# compute zero actions
actions = torch.zeros(env.action_space.shape, device=env.unwrapped.device)
stats = tote_manager.get_stats_summary()
ejection_summary = tote_manager.stats.get_ejection_summary()
print("GCU ", tote_manager.get_gcu(env_indices))
print("\n===== Ejection Summary =====")
print(f"Total steps: {stats['total_steps']}")
if ejection_summary != {}:
for i in range(len(ejection_summary.keys())):
env_id = list(ejection_summary.keys())[i]
print(ejection_summary[env_id])
print("==========================\n")

# [0] is destination tote idx (ascending values for batch size)
# [1] currently is the object idx (0-indexed. -1 for no packable objects)
# [2-9] is the desired object position and orientation
# [10] is the action to indicate if an object is being placed
actions[:, 0] = torch.arange(args_cli.num_envs, device=env.unwrapped.device) % num_totes

tote_manager.eject_totes(actions[:, 0].to(torch.int32), env_indices) # Eject destination totes

# Destination tote IDs for each environment
tote_ids = actions[:, 0].to(torch.int32)

# Get the objects that can be packed
packable_objects = bpp.get_packable_object_indices(num_obj_per_env, tote_manager, env_indices, tote_ids)[0]

bpp.update_container_heightmap(env, env_indices, tote_ids)
transforms, obj_indicies = bpp.get_action(env, packable_objects, tote_ids, env_indices)
actions[:, 1:9] = convert_transform_to_action_tensor(transforms, obj_indicies, env.unwrapped.device)
# apply actions
env.step(actions)

# Check that all environments have no packable objects
tote_ids = actions[:, 0].to(torch.int32) # Destination tote IDs for each environment

if step_count % exp_log_interval == 0:
print(f"\nStep {step_count}:")
tote_manager.stats.save_to_file()
print("Saved stats to file.")

step_count += 1

# close the simulator
env.close()


if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
3 changes: 1 addition & 2 deletions scripts/list_envs.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,8 @@
"""Rest everything follows."""

import gymnasium as gym
from prettytable import PrettyTable

import tote_consolidation.tasks # noqa: F401
from prettytable import PrettyTable


def main():
Expand Down
67 changes: 67 additions & 0 deletions scripts/parse_results/gcu_stats.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import argparse
import json
import os

import numpy as np


def flatten(values):
"""Flatten nested lists of values."""
return [v for sublist in values for v in (sublist if isinstance(sublist, list) else [sublist])]


def compute_stats(values):
arr = np.array(values)
return [
f"{np.mean(arr):.4f}",
f"{np.std(arr):.4f}",
f"{np.percentile(arr, 25):.4f}",
f"{np.percentile(arr, 50):.4f}",
f"{np.percentile(arr, 75):.4f}",
]


def main(data_path, max_rows):
json_file = "test_placement.json"
json_path = os.path.join(data_path, json_file)

with open(json_path) as f:
data = json.load(f)

gcus = []
transfers = []
ejections = []

for k, v in data.items():
if not k.isdigit():
continue # Skip mean_* keys
gcus.extend(flatten(v["gcus"]))
transfers.extend(flatten(v["obj_transfers"]))
ejections.extend(flatten(v["source_ejections"]))

print(f"Total records found: {len(gcus)}")

if max_rows is not None:
gcus = gcus[:max_rows]
transfers = transfers[:max_rows]
ejections = ejections[:max_rows]

gcus_stats = compute_stats(gcus)
transfers_stats = compute_stats(transfers)
ejections_stats = compute_stats(ejections)

# Print header for Google Sheets
print("Header\tGCUs\t\t\t\t\tObjects transferred\t\t\t\t\tSource tote ejections\t\t\t\t\t")
print("\tMean\tStdev\tp25\tp50\tp75\tMean\tStdev\tp25\tp50\tp75\tMean\tStdev\tp25\tp50\tp75")

# Print values, with a label at the front
print("Base " + " ".join(gcus_stats + transfers_stats + ejections_stats))


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Summarize metrics for Google Sheets.")
parser.add_argument("--data_path", type=str, required=True, help="Root path to the saved container data.")
parser.add_argument("--max_rows", type=int, default=None, help="Number of rows to read (default: all).")
args = parser.parse_args()

main(args.data_path, args.max_rows)
96 changes: 96 additions & 0 deletions scripts/parse_results/view_container_cross_section.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import argparse
import os
import pickle

import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import to_rgba
from matplotlib.widgets import Slider
from packing3d import Display


def plot_cross_section(container_path):
# Load container
with open(container_path, "rb") as f:
container = pickle.load(f)

cube = np.array(container.geometry.cube) # (z, x, y)

# Compute fill stats
filled_voxels = np.count_nonzero(cube)
total_voxels = np.prod(cube.shape)
fill_ratio = filled_voxels / total_voxels

print(f"Total filled volume: {filled_voxels} unit³")
print(f"Total container volume: {total_voxels} unit³")
print(f"Fill ratio: {fill_ratio:.3%}")

# Color mapping
colors = [
"lightcoral",
"lightsalmon",
"gold",
"olive",
"mediumaquamarine",
"deepskyblue",
"blueviolet",
"pink",
"brown",
"darkorange",
"yellow",
"lawngreen",
"turquoise",
"dodgerblue",
"darkorchid",
"hotpink",
"deeppink",
"peru",
"orange",
"darkolivegreen",
"cyan",
"purple",
"crimson",
]
color_rgba = np.asarray([to_rgba(c) for c in colors])

fig, ax = plt.subplots()
plt.subplots_adjust(bottom=0.2)

z_max = cube.shape[0] - 1
slice_ax = ax.imshow(np.zeros(cube.shape[1:]), origin="lower")
title = ax.set_title("Cross-section Z=0 | Filled Area = 0 unit²")

ax_z = plt.axes([0.2, 0.05, 0.65, 0.03])
slider_z = Slider(ax_z, "Z Height", 0, z_max, valinit=0, valstep=1)

def update(z):
z = int(z)
slice_data = cube[z]
filled_area = np.count_nonzero(slice_data)

# Map ints to RGBA
flat = slice_data.astype(int).flatten()
rgba = np.zeros((flat.size, 4))
mask = flat > 0
rgba[mask] = color_rgba[flat[mask] % len(color_rgba)]
rgba = rgba.reshape((*slice_data.shape, 4))

slice_ax.set_data(rgba)
title.set_text(f"Cross-section Z={z} | Filled Area = {filled_area} unit²")
fig.canvas.draw_idle()

slider_z.on_changed(update)
update(0)

plt.show(block=True)


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Visualize 3D container cross-sections with a slider.")
parser.add_argument("--data_path", type=str, required=True, help="Root path to the saved container data.")
parser.add_argument("--env_id", type=int, required=True, help="Environment index (e.g., 0, 1, ...).")
parser.add_argument("--step_id", type=int, required=True, help="Step index for the container (e.g., 0, 23, ...).")
args = parser.parse_args()

container_path = os.path.join(args.data_path, "containers", f"env_{args.env_id}", f"{args.step_id}.pkl")
plot_cross_section(container_path)
Loading