Upload modified_lerobot_dataset.py
Browse files
scripts/modified_lerobot_dataset.py
ADDED
|
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import Callable
|
| 3 |
+
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
import h5py
|
| 6 |
+
import torch
|
| 7 |
+
import einops
|
| 8 |
+
import shutil
|
| 9 |
+
import logging
|
| 10 |
+
import numpy as np
|
| 11 |
+
from math import ceil
|
| 12 |
+
from copy import deepcopy
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
| 17 |
+
from lerobot.common.datasets.utils import (
|
| 18 |
+
STATS_PATH,
|
| 19 |
+
check_timestamps_sync,
|
| 20 |
+
get_episode_data_index,
|
| 21 |
+
serialize_dict,
|
| 22 |
+
write_json,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
def get_stats_einops_patterns(dataset, num_workers=0):
|
| 26 |
+
"""These einops patterns will be used to aggregate batches and compute statistics.
|
| 27 |
+
|
| 28 |
+
Note: We assume the images are in channel first format
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
dataloader = torch.utils.data.DataLoader(
|
| 32 |
+
dataset,
|
| 33 |
+
num_workers=num_workers,
|
| 34 |
+
batch_size=2,
|
| 35 |
+
shuffle=False,
|
| 36 |
+
)
|
| 37 |
+
batch = next(iter(dataloader))
|
| 38 |
+
|
| 39 |
+
stats_patterns = {}
|
| 40 |
+
|
| 41 |
+
for key in dataset.features:
|
| 42 |
+
# sanity check that tensors are not float64
|
| 43 |
+
assert batch[key].dtype != torch.float64
|
| 44 |
+
|
| 45 |
+
# if isinstance(feats_type, (VideoFrame, Image)):
|
| 46 |
+
if key in dataset.meta.camera_keys:
|
| 47 |
+
# sanity check that images are channel first
|
| 48 |
+
_, c, h, w = batch[key].shape
|
| 49 |
+
assert (
|
| 50 |
+
c < h and c < w
|
| 51 |
+
), f"expect channel first images, but instead {batch[key].shape}"
|
| 52 |
+
assert (
|
| 53 |
+
batch[key].dtype == torch.float32
|
| 54 |
+
), f"expect torch.float32, but instead {batch[key].dtype=}"
|
| 55 |
+
# assert batch[key].max() <= 1, f"expect pixels lower than 1, but instead {batch[key].max()=}"
|
| 56 |
+
# assert batch[key].min() >= 0, f"expect pixels greater than 1, but instead {batch[key].min()=}"
|
| 57 |
+
stats_patterns[key] = "b c h w -> c 1 1"
|
| 58 |
+
elif batch[key].ndim == 2:
|
| 59 |
+
stats_patterns[key] = "b c -> c "
|
| 60 |
+
elif batch[key].ndim == 1:
|
| 61 |
+
stats_patterns[key] = "b -> 1"
|
| 62 |
+
else:
|
| 63 |
+
raise ValueError(f"{key}, {batch[key].shape}")
|
| 64 |
+
|
| 65 |
+
return stats_patterns
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def compute_stats(dataset, batch_size=1, num_workers=4, max_num_samples=None):
|
| 69 |
+
"""Compute mean/std and min/max statistics of all data keys in a LeRobotDataset."""
|
| 70 |
+
if max_num_samples is None:
|
| 71 |
+
max_num_samples = len(dataset)
|
| 72 |
+
else:
|
| 73 |
+
max_num_samples = min(max_num_samples, len(dataset))
|
| 74 |
+
|
| 75 |
+
# for more info on why we need to set the same number of workers, see `load_from_videos`
|
| 76 |
+
stats_patterns = get_stats_einops_patterns(dataset, num_workers)
|
| 77 |
+
|
| 78 |
+
# mean and std will be computed incrementally while max and min will track the running value.
|
| 79 |
+
mean, std, _max, _min = {}, {}, {}, {}
|
| 80 |
+
for key in stats_patterns:
|
| 81 |
+
mean[key] = torch.tensor(0.0).float()
|
| 82 |
+
std[key] = torch.tensor(0.0).float()
|
| 83 |
+
_max[key] = torch.tensor(-float("inf")).float()
|
| 84 |
+
_min[key] = torch.tensor(float("inf")).float()
|
| 85 |
+
|
| 86 |
+
def create_seeded_dataloader(dataset, batch_size, seed):
|
| 87 |
+
generator = torch.Generator()
|
| 88 |
+
generator.manual_seed(seed)
|
| 89 |
+
dataloader = torch.utils.data.DataLoader(
|
| 90 |
+
dataset,
|
| 91 |
+
num_workers=num_workers,
|
| 92 |
+
batch_size=batch_size,
|
| 93 |
+
shuffle=True,
|
| 94 |
+
drop_last=False,
|
| 95 |
+
generator=generator,
|
| 96 |
+
)
|
| 97 |
+
return dataloader
|
| 98 |
+
|
| 99 |
+
# Note: Due to be refactored soon. The point of storing `first_batch` is to make sure we don't get
|
| 100 |
+
# surprises when rerunning the sampler.
|
| 101 |
+
first_batch = None
|
| 102 |
+
running_item_count = 0 # for online mean computation
|
| 103 |
+
dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337)
|
| 104 |
+
for i, batch in enumerate(
|
| 105 |
+
tqdm(
|
| 106 |
+
dataloader,
|
| 107 |
+
total=ceil(max_num_samples / batch_size),
|
| 108 |
+
desc="Compute mean, min, max",
|
| 109 |
+
)
|
| 110 |
+
):
|
| 111 |
+
this_batch_size = len(batch["index"])
|
| 112 |
+
running_item_count += this_batch_size
|
| 113 |
+
if first_batch is None:
|
| 114 |
+
first_batch = deepcopy(batch)
|
| 115 |
+
for key, pattern in stats_patterns.items():
|
| 116 |
+
batch[key] = batch[key].float()
|
| 117 |
+
# Numerically stable update step for mean computation.
|
| 118 |
+
batch_mean = einops.reduce(batch[key], pattern, "mean")
|
| 119 |
+
# Hint: to update the mean we need x̄ₙ = (Nₙ₋₁x̄ₙ₋₁ + Bₙxₙ) / Nₙ, where the subscript represents
|
| 120 |
+
# the update step, N is the running item count, B is this batch size, x̄ is the running mean,
|
| 121 |
+
# and x is the current batch mean. Some rearrangement is then required to avoid risking
|
| 122 |
+
# numerical overflow. Another hint: Nₙ₋₁ = Nₙ - Bₙ. Rearrangement yields
|
| 123 |
+
# x̄ₙ = x̄ₙ₋₁ + Bₙ * (xₙ - x̄ₙ₋₁) / Nₙ
|
| 124 |
+
mean[key] = (
|
| 125 |
+
mean[key]
|
| 126 |
+
+ this_batch_size * (batch_mean - mean[key]) / running_item_count
|
| 127 |
+
)
|
| 128 |
+
_max[key] = torch.maximum(
|
| 129 |
+
_max[key], einops.reduce(batch[key], pattern, "max")
|
| 130 |
+
)
|
| 131 |
+
_min[key] = torch.minimum(
|
| 132 |
+
_min[key], einops.reduce(batch[key], pattern, "min")
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
if i == ceil(max_num_samples / batch_size) - 1:
|
| 136 |
+
break
|
| 137 |
+
|
| 138 |
+
first_batch_ = None
|
| 139 |
+
running_item_count = 0 # for online std computation
|
| 140 |
+
dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337)
|
| 141 |
+
for i, batch in enumerate(
|
| 142 |
+
tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute std")
|
| 143 |
+
):
|
| 144 |
+
this_batch_size = len(batch["index"])
|
| 145 |
+
running_item_count += this_batch_size
|
| 146 |
+
# Sanity check to make sure the batches are still in the same order as before.
|
| 147 |
+
if first_batch_ is None:
|
| 148 |
+
first_batch_ = deepcopy(batch)
|
| 149 |
+
for key in stats_patterns:
|
| 150 |
+
assert torch.equal(first_batch_[key], first_batch[key])
|
| 151 |
+
for key, pattern in stats_patterns.items():
|
| 152 |
+
batch[key] = batch[key].float()
|
| 153 |
+
# Numerically stable update step for mean computation (where the mean is over squared
|
| 154 |
+
# residuals).See notes in the mean computation loop above.
|
| 155 |
+
batch_std = einops.reduce((batch[key] - mean[key]) ** 2, pattern, "mean")
|
| 156 |
+
std[key] = (
|
| 157 |
+
std[key] + this_batch_size * (batch_std - std[key]) / running_item_count
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
if i == ceil(max_num_samples / batch_size) - 1:
|
| 161 |
+
break
|
| 162 |
+
|
| 163 |
+
for key in stats_patterns:
|
| 164 |
+
std[key] = torch.sqrt(std[key])
|
| 165 |
+
|
| 166 |
+
stats = {}
|
| 167 |
+
for key in stats_patterns:
|
| 168 |
+
stats[key] = {
|
| 169 |
+
"mean": mean[key],
|
| 170 |
+
"std": std[key],
|
| 171 |
+
"max": _max[key],
|
| 172 |
+
"min": _min[key],
|
| 173 |
+
}
|
| 174 |
+
return stats
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class AgiBotDataset(LeRobotDataset):
|
| 180 |
+
def __init__(
|
| 181 |
+
self,
|
| 182 |
+
repo_id: str,
|
| 183 |
+
root: str | Path | None = None,
|
| 184 |
+
episodes: list[int] | None = None,
|
| 185 |
+
image_transforms: Callable | None = None,
|
| 186 |
+
delta_timestamps: dict[list[float]] | None = None,
|
| 187 |
+
tolerance_s: float = 1e-4,
|
| 188 |
+
download_videos: bool = True,
|
| 189 |
+
local_files_only: bool = False,
|
| 190 |
+
video_backend: str | None = None,
|
| 191 |
+
):
|
| 192 |
+
super().__init__(
|
| 193 |
+
repo_id=repo_id,
|
| 194 |
+
root=root,
|
| 195 |
+
episodes=episodes,
|
| 196 |
+
image_transforms=image_transforms,
|
| 197 |
+
delta_timestamps=delta_timestamps,
|
| 198 |
+
tolerance_s=tolerance_s,
|
| 199 |
+
download_videos=download_videos,
|
| 200 |
+
local_files_only=local_files_only,
|
| 201 |
+
video_backend=video_backend,
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
def save_episode(
|
| 205 |
+
self, task: str, episode_data: dict | None = None, videos: dict | None = None
|
| 206 |
+
) -> None:
|
| 207 |
+
"""
|
| 208 |
+
We rewrite this method to copy mp4 videos to the target position
|
| 209 |
+
"""
|
| 210 |
+
if not episode_data:
|
| 211 |
+
episode_buffer = self.episode_buffer
|
| 212 |
+
|
| 213 |
+
episode_length = episode_buffer.pop("size")
|
| 214 |
+
episode_index = episode_buffer["episode_index"]
|
| 215 |
+
if episode_index != self.meta.total_episodes:
|
| 216 |
+
# TODO(aliberts): Add option to use existing episode_index
|
| 217 |
+
raise NotImplementedError(
|
| 218 |
+
"You might have manually provided the episode_buffer with an episode_index that doesn't "
|
| 219 |
+
"match the total number of episodes in the dataset. This is not supported for now."
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
if episode_length == 0:
|
| 223 |
+
raise ValueError(
|
| 224 |
+
"You must add one or several frames with `add_frame` before calling `add_episode`."
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
task_index = self.meta.get_task_index(task)
|
| 228 |
+
|
| 229 |
+
if not set(episode_buffer.keys()) == set(self.features):
|
| 230 |
+
raise ValueError()
|
| 231 |
+
|
| 232 |
+
for key, ft in self.features.items():
|
| 233 |
+
if key == "index":
|
| 234 |
+
episode_buffer[key] = np.arange(
|
| 235 |
+
self.meta.total_frames, self.meta.total_frames + episode_length
|
| 236 |
+
)
|
| 237 |
+
elif key == "episode_index":
|
| 238 |
+
episode_buffer[key] = np.full((episode_length,), episode_index)
|
| 239 |
+
elif key == "task_index":
|
| 240 |
+
episode_buffer[key] = np.full((episode_length,), task_index)
|
| 241 |
+
elif ft["dtype"] in ["image", "video"]:
|
| 242 |
+
continue
|
| 243 |
+
elif len(ft["shape"]) == 1 and ft["shape"][0] == 1:
|
| 244 |
+
episode_buffer[key] = np.array(episode_buffer[key], dtype=ft["dtype"])
|
| 245 |
+
elif len(ft["shape"]) == 1 and ft["shape"][0] > 1:
|
| 246 |
+
episode_buffer[key] = np.stack(episode_buffer[key])
|
| 247 |
+
else:
|
| 248 |
+
raise ValueError(key)
|
| 249 |
+
|
| 250 |
+
self._wait_image_writer()
|
| 251 |
+
self._save_episode_table(episode_buffer, episode_index)
|
| 252 |
+
|
| 253 |
+
self.meta.save_episode(episode_index, episode_length, task, task_index)
|
| 254 |
+
for key in self.meta.video_keys:
|
| 255 |
+
video_path = self.root / self.meta.get_video_file_path(episode_index, key)
|
| 256 |
+
episode_buffer[key] = video_path
|
| 257 |
+
video_path.parent.mkdir(parents=True, exist_ok=True)
|
| 258 |
+
shutil.copyfile(videos[key], video_path)
|
| 259 |
+
if not episode_data: # Reset the buffer
|
| 260 |
+
self.episode_buffer = self.create_episode_buffer()
|
| 261 |
+
self.consolidated = False
|
| 262 |
+
|
| 263 |
+
def consolidate(
|
| 264 |
+
self, run_compute_stats: bool = True, keep_image_files: bool = False
|
| 265 |
+
) -> None:
|
| 266 |
+
self.hf_dataset = self.load_hf_dataset()
|
| 267 |
+
self.episode_data_index = get_episode_data_index(
|
| 268 |
+
self.meta.episodes, self.episodes
|
| 269 |
+
)
|
| 270 |
+
check_timestamps_sync(
|
| 271 |
+
self.hf_dataset, self.episode_data_index, self.fps, self.tolerance_s
|
| 272 |
+
)
|
| 273 |
+
if len(self.meta.video_keys) > 0:
|
| 274 |
+
self.meta.write_video_info()
|
| 275 |
+
|
| 276 |
+
if not keep_image_files:
|
| 277 |
+
img_dir = self.root / "images"
|
| 278 |
+
if img_dir.is_dir():
|
| 279 |
+
shutil.rmtree(self.root / "images")
|
| 280 |
+
video_files = list(self.root.rglob("*.mp4"))
|
| 281 |
+
assert len(video_files) == self.num_episodes * len(self.meta.video_keys)
|
| 282 |
+
|
| 283 |
+
parquet_files = list(self.root.rglob("*.parquet"))
|
| 284 |
+
assert len(parquet_files) == self.num_episodes
|
| 285 |
+
|
| 286 |
+
if run_compute_stats:
|
| 287 |
+
self.stop_image_writer()
|
| 288 |
+
self.meta.stats = compute_stats(self, batch_size=1, num_workers=1, max_num_samples=1000)
|
| 289 |
+
serialized_stats = serialize_dict(self.meta.stats)
|
| 290 |
+
write_json(serialized_stats, self.root / STATS_PATH)
|
| 291 |
+
self.consolidated = True
|
| 292 |
+
else:
|
| 293 |
+
logging.warning(
|
| 294 |
+
"Skipping computation of the dataset statistics, dataset is not fully consolidated."
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
def add_frame(self, frame: dict) -> None:
|
| 298 |
+
"""
|
| 299 |
+
This function only adds the frame to the episode_buffer. Apart from images — which are written in a
|
| 300 |
+
temporary directory — nothing is written to disk. To save those frames, the 'save_episode()' method
|
| 301 |
+
then needs to be called.
|
| 302 |
+
"""
|
| 303 |
+
# TODO(aliberts, rcadene): Add sanity check for the input, check it's numpy or torch,
|
| 304 |
+
# check the dtype and shape matches, etc.
|
| 305 |
+
|
| 306 |
+
if self.episode_buffer is None:
|
| 307 |
+
self.episode_buffer = self.create_episode_buffer()
|
| 308 |
+
|
| 309 |
+
frame_index = self.episode_buffer["size"]
|
| 310 |
+
timestamp = (
|
| 311 |
+
frame.pop("timestamp") if "timestamp" in frame else frame_index / self.fps
|
| 312 |
+
)
|
| 313 |
+
self.episode_buffer["frame_index"].append(frame_index)
|
| 314 |
+
self.episode_buffer["timestamp"].append(timestamp)
|
| 315 |
+
|
| 316 |
+
for key in frame:
|
| 317 |
+
if key not in self.features:
|
| 318 |
+
raise ValueError(key)
|
| 319 |
+
item = (
|
| 320 |
+
frame[key].numpy()
|
| 321 |
+
if isinstance(frame[key], torch.Tensor)
|
| 322 |
+
else frame[key]
|
| 323 |
+
)
|
| 324 |
+
self.episode_buffer[key].append(item)
|
| 325 |
+
|
| 326 |
+
self.episode_buffer["size"] += 1
|
| 327 |
+
|