Suoivy commited on
Commit
0ff287e
·
verified ·
1 Parent(s): bf27d6c

Upload 2 files

Browse files
scripts/convert_to_lerobot.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This project is built upon the open-source project 🤗 LeRobot: https://github.com/huggingface/lerobot
3
+
4
+ We are grateful to the LeRobot team for their outstanding work and their contributions to the community.
5
+
6
+ If you find this project useful, please also consider supporting and exploring LeRobot.
7
+ """
8
+
9
+ import os
10
+ import cv2
11
+ import json
12
+ import glob
13
+ import shutil
14
+ import logging
15
+ import argparse
16
+ from pathlib import Path
17
+ from typing import Callable
18
+ from functools import partial
19
+ from math import ceil
20
+ from copy import deepcopy
21
+ import subprocess
22
+ from multiprocessing import Pool, cpu_count
23
+
24
+
25
+ import h5py
26
+ import torch
27
+ import einops
28
+ import numpy as np
29
+ from PIL import Image
30
+ from tqdm import tqdm
31
+
32
+
33
+ HEAD_COLOR = "head.mp4"
34
+ HAND_LEFT_COLOR = "hand_left.mp4"
35
+ HAND_RIGHT_COLOR = "hand_right.mp4"
36
+ HEAD_CENTER_FISHEYE_COLOR = "head_front_fisheye.mp4"
37
+ HEAD_LEFT_FISHEYE_COLOR = "head_left_fisheye.mp4"
38
+ HEAD_RIGHT_FISHEYE_COLOR = "head_right_fisheye.mp4"
39
+ BACK_LEFT_FISHEYE_COLOR = "back_left_fisheye.mp4"
40
+ BACK_RIGHT_FISHEYE_COLOR = "back_right_fisheye.mp4"
41
+ HEAD_DEPTH = "head"
42
+ ALL_VIDEOS = [HEAD_COLOR, HAND_LEFT_COLOR, HAND_RIGHT_COLOR, HEAD_CENTER_FISHEYE_COLOR, HEAD_LEFT_FISHEYE_COLOR, HEAD_RIGHT_FISHEYE_COLOR, BACK_LEFT_FISHEYE_COLOR, BACK_RIGHT_FISHEYE_COLOR]
43
+
44
+ DEFAULT_IMAGE_PATH = (
45
+ "images/{image_key}/episode_{episode_index:06d}/frame_{frame_index:06d}.jpg"
46
+ )
47
+
48
+ FEATURES = {
49
+ "observation.images.top_head": {
50
+ "dtype": "video",
51
+ "shape": [480, 640, 3],
52
+ "names": ["height", "width", "channel"],
53
+ "video_info": {
54
+ "video.fps": 30.0,
55
+ "video.codec": "av1",
56
+ "video.pix_fmt": "yuv420p",
57
+ "video.is_depth_map": False,
58
+ "has_audio": False,
59
+ },
60
+ },
61
+ "observation.images.cam_top_depth": {
62
+ "dtype": "image",
63
+ "shape": [480, 640, 1],
64
+ "names": ["height", "width", "channel"],
65
+ },
66
+ "observation.images.hand_left": {
67
+ "dtype": "video",
68
+ "shape": [480, 640, 3],
69
+ "names": ["height", "width", "channel"],
70
+ "video_info": {
71
+ "video.fps": 30.0,
72
+ "video.codec": "av1",
73
+ "video.pix_fmt": "yuv420p",
74
+ "video.is_depth_map": False,
75
+ "has_audio": False,
76
+ },
77
+ },
78
+ "observation.images.hand_right": {
79
+ "dtype": "video",
80
+ "shape": [480, 640, 3],
81
+ "names": ["height", "width", "channel"],
82
+ "video_info": {
83
+ "video.fps": 30.0,
84
+ "video.codec": "av1",
85
+ "video.pix_fmt": "yuv420p",
86
+ "video.is_depth_map": False,
87
+ "has_audio": False,
88
+ },
89
+ },
90
+ "observation.images.head_center_fisheye": {
91
+ "dtype": "video",
92
+ "shape": [748, 960, 3],
93
+ "names": ["height", "width", "channel"],
94
+ "video_info": {
95
+ "video.fps": 30.0,
96
+ "video.codec": "av1",
97
+ "video.pix_fmt": "yuv420p",
98
+ "video.is_depth_map": False,
99
+ "has_audio": False,
100
+ },
101
+ },
102
+ "observation.images.head_left_fisheye": {
103
+ "dtype": "video",
104
+ "shape": [748, 960, 3],
105
+ "names": ["height", "width", "channel"],
106
+ "video_info": {
107
+ "video.fps": 30.0,
108
+ "video.codec": "av1",
109
+ "video.pix_fmt": "yuv420p",
110
+ "video.is_depth_map": False,
111
+ "has_audio": False,
112
+ },
113
+ },
114
+ "observation.images.head_right_fisheye": {
115
+ "dtype": "video",
116
+ "shape": [748, 960, 3],
117
+ "names": ["height", "width", "channel"],
118
+ "video_info": {
119
+ "video.fps": 30.0,
120
+ "video.codec": "av1",
121
+ "video.pix_fmt": "yuv420p",
122
+ "video.is_depth_map": False,
123
+ "has_audio": False,
124
+ },
125
+ },
126
+ "observation.images.back_left_fisheye": {
127
+ "dtype": "video",
128
+ "shape": [748, 960, 3],
129
+ "names": ["height", "width", "channel"],
130
+ "video_info": {
131
+ "video.fps": 30.0,
132
+ "video.codec": "av1",
133
+ "video.pix_fmt": "yuv420p",
134
+ "video.is_depth_map": False,
135
+ "has_audio": False,
136
+ },
137
+ },
138
+ "observation.images.back_right_fisheye": {
139
+ "dtype": "video",
140
+ "shape": [748, 960, 3],
141
+ "names": ["height", "width", "channel"],
142
+ "video_info": {
143
+ "video.fps": 30.0,
144
+ "video.codec": "av1",
145
+ "video.pix_fmt": "yuv420p",
146
+ "video.is_depth_map": False,
147
+ "has_audio": False,
148
+ },
149
+ },
150
+ "observation.state": {
151
+ "dtype": "float32",
152
+ "shape": [22],
153
+ },
154
+ "action": {
155
+ "dtype": "float32",
156
+ "shape": [22],
157
+ },
158
+ "episode_index": {
159
+ "dtype": "int64",
160
+ "shape": [1],
161
+ "names": None,
162
+ },
163
+ "frame_index": {
164
+ "dtype": "int64",
165
+ "shape": [1],
166
+ "names": None,
167
+ },
168
+ "index": {
169
+ "dtype": "int64",
170
+ "shape": [1],
171
+ "names": None,
172
+ },
173
+ "task_index": {
174
+ "dtype": "int64",
175
+ "shape": [1],
176
+ "names": None,
177
+ },
178
+ }
179
+
180
+
181
+
182
+
183
+ from modified_lerobot_dataset import AgiBotDataset
184
+
185
+
186
+
187
+
188
+ def process_video(video_path):
189
+ output = video_path.replace('.mp4', '_encode.mp4')
190
+ try:
191
+ command = [
192
+ "ffmpeg",
193
+ "-i", video_path,
194
+ "-vcodec", "libsvtav1",
195
+ "-pix_fmt", "yuv420p",
196
+ "-r", "30",
197
+ "-g", "2",
198
+ "-crf", "30",
199
+ "-vf", "scale=640:360:flags=bicubic",
200
+ "-loglevel", "error",
201
+ "-y", output
202
+ ]
203
+ subprocess.run(command, check=True)
204
+
205
+ except subprocess.CalledProcessError as e:
206
+ print(f"Video failure: {' '.join(command)}, error: {e}")
207
+ except Exception as e:
208
+ print(f"Video unknwon failure: {' '.join(command)}, error: {e}")
209
+ finally:
210
+ pass
211
+
212
+
213
+ def preprocess_vides(episode_list, debug=False):
214
+ video_paths = []
215
+ for episode_path in episode_list:
216
+ video_dir = episode_path.replace('meta_info', 'observation') + "/video"
217
+ for file in ALL_VIDEOS:
218
+ video_path = os.path.join(video_dir, file)
219
+ video_paths.append(video_path)
220
+
221
+ if debug:
222
+ for video in video_paths:
223
+ process_video(video)
224
+ else:
225
+ with Pool(processes=os.cpu_count() // 2) as pool:
226
+ for _ in tqdm(pool.imap_unordered(process_video, video_paths), total=len(video_paths), desc="Video preprocessing"):
227
+ pass
228
+
229
+
230
+ def load_depths(root_dir: str, camera_name: str):
231
+ cam_path = Path(root_dir)
232
+ all_imgs = sorted(list(cam_path.glob(f"*")), key=lambda x: int(x.stem))
233
+ return [np.array(Image.open(f"{file}/{camera_name}.png")).astype(np.float32) / 1000 for file in all_imgs]
234
+
235
+
236
+ def load_local_dataset(episode_path: str) -> list | None:
237
+ """Load local dataset and return a dict with observations and actions"""
238
+ observation_path = episode_path.replace('meta_info', 'observation')
239
+ with open(f"{episode_path}/task_info.json") as f:
240
+ task_info = json.load(f)
241
+ task = task_info['task_name']
242
+
243
+ with h5py.File(Path(episode_path) / "aligned_joints.h5") as f:
244
+ state_joint = np.array(f["state/joint/position"])
245
+ joint_names = f["state/joint"].attrs['name'].tolist()
246
+
247
+
248
+ head_joint_names = [
249
+ "joint_head_yaw",
250
+ "joint_head_pitch",
251
+ ]
252
+ body_joint_names = [
253
+ "joint_lift_body",
254
+ "joint_body_pitch",
255
+ ]
256
+ arm_joint_names = [
257
+ "Joint1_l",
258
+ "Joint1_r",
259
+ "Joint2_l",
260
+ "Joint2_r",
261
+ "Joint3_l",
262
+ "Joint3_r",
263
+ "Joint4_l",
264
+ "Joint4_r",
265
+ "Joint5_l",
266
+ "Joint5_r",
267
+ "Joint6_l",
268
+ "Joint6_r",
269
+ "Joint7_l",
270
+ "Joint7_r",
271
+ ]
272
+ effector_joint_names = [
273
+ "right_Left_1_Joint",
274
+ "right_Right_1_Joint",
275
+ "left_Left_1_Joint",
276
+ "left_Right_1_Joint"
277
+ ]
278
+
279
+ # Get indices for arm and effector joints from the first frame
280
+ head_joint_indices = [joint_names.index(name) for name in head_joint_names]
281
+ body_joint_indices = [joint_names.index(name) for name in body_joint_names]
282
+ arm_joint_indices = [joint_names.index(name) for name in arm_joint_names]
283
+ effector_joint_indices = [joint_names.index(name) for name in effector_joint_names]
284
+
285
+ # Extract joint positions for all frames
286
+ state_head = state_joint[:, head_joint_indices]
287
+ state_body = state_joint[:, body_joint_indices]
288
+ state_arm = state_joint[:, arm_joint_indices]
289
+ state_effector = state_joint[:, effector_joint_indices]
290
+
291
+
292
+ # Get action from state
293
+ action_head = state_head[1:] - state_head[:-1]
294
+ action_body = state_body[1:] - state_body[:-1]
295
+ action_arm = state_arm[1:] - state_arm[:-1]
296
+ action_effector = state_effector[1:] - state_effector[:-1]
297
+
298
+ # repeat the last frame of the action
299
+ action_head = np.concatenate([action_head, action_head[-1:]])
300
+ action_body = np.concatenate([action_body, action_body[-1:]])
301
+ action_arm = np.concatenate([action_arm, action_arm[-1:]])
302
+ action_effector = np.concatenate([action_effector, action_effector[-1:]])
303
+
304
+
305
+
306
+ states_value = np.hstack(
307
+ [state_head, state_body, state_arm, state_effector]
308
+ ).astype(np.float32)
309
+ assert (
310
+ action_arm.shape[0] == action_effector.shape[0]
311
+ ), f"shape of action_arm:{action_arm.shape};shape of action_effector:{action_effector.shape}"
312
+ action_value = np.hstack(
313
+ [action_head, action_body, action_arm, action_effector]
314
+ ).astype(np.float32)
315
+
316
+ depth_imgs = load_depths(f"{observation_path}/depth", HEAD_DEPTH)
317
+
318
+ assert len(depth_imgs) == len(
319
+ states_value
320
+ ), f"Number of images and states are not equal"
321
+ assert len(depth_imgs) == len(
322
+ action_value
323
+ ), f"Number of images and actions are not equal"
324
+ frames = [
325
+ {
326
+ "observation.images.cam_top_depth": depth_imgs[i],
327
+ "observation.state": states_value[i],
328
+ "action": action_value[i],
329
+ }
330
+ for i in range(len(depth_imgs))
331
+ ]
332
+
333
+ v_path = observation_path + "/video"
334
+ videos = {
335
+ "observation.images.top_head": f"{v_path}/{HEAD_COLOR}".replace('.mp4', '_encode.mp4'),
336
+ "observation.images.hand_left": f"{v_path}/{HAND_LEFT_COLOR}".replace('.mp4', '_encode.mp4'),
337
+ "observation.images.hand_right": f"{v_path}/{HAND_RIGHT_COLOR}".replace('.mp4', '_encode.mp4'),
338
+ "observation.images.head_center_fisheye": f"{v_path}/{HEAD_CENTER_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
339
+ "observation.images.head_left_fisheye": f"{v_path}/{HEAD_LEFT_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
340
+ "observation.images.head_right_fisheye": f"{v_path}/{HEAD_RIGHT_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
341
+ "observation.images.back_left_fisheye": f"{v_path}/{BACK_LEFT_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
342
+ "observation.images.back_right_fisheye": f"{v_path}/{BACK_RIGHT_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
343
+ }
344
+ return {
345
+ 'frames': frames,
346
+ 'videos': videos,
347
+ 'task': task
348
+ }
349
+
350
+
351
+
352
+
353
+
354
+ def main(
355
+ src_path: str,
356
+ tgt_path: str,
357
+ repo_id: str,
358
+ preprocess_video: bool = False,
359
+ debug: bool = True,
360
+ ):
361
+ # remove the existing dataset
362
+ if os.path.exists(f"{tgt_path}/{repo_id}"):
363
+ shutil.rmtree(f"{tgt_path}/{repo_id}")
364
+ dataset = AgiBotDataset.create(
365
+ repo_id=repo_id,
366
+ root=f"{tgt_path}/{repo_id}",
367
+ fps=30,
368
+ robot_type="a2d",
369
+ features=FEATURES,
370
+ )
371
+
372
+ episode_list = sorted(
373
+ [
374
+ f
375
+ for f in glob.glob(f"{src_path}/meta_info/*/*")
376
+ if os.path.isdir(f)
377
+ ]
378
+ )
379
+
380
+ # preprocess the videos to avoid encoding error
381
+ if preprocess_video:
382
+ preprocess_vides(episode_list, debug)
383
+
384
+ # load the raw datasets
385
+ raw_datasets_before_filter = [
386
+ load_local_dataset(episode_path)
387
+ for episode_path in tqdm(episode_list)
388
+ ]
389
+
390
+ # remove the None result from the raw_datasets
391
+ raw_datasets = [
392
+ dataset for dataset in raw_datasets_before_filter if dataset is not None
393
+ ]
394
+
395
+ for episode_data in tqdm(raw_datasets, desc="Generating dataset from raw datasets"):
396
+ for frame in tqdm(episode_data['frames'], desc="Generating dataset from raw dataset"):
397
+ dataset.add_frame(frame)
398
+
399
+ dataset.save_episode(task=episode_data['task'], videos=episode_data['videos'])
400
+ dataset.consolidate(run_compute_stats=True)
401
+
402
+
403
+
404
+
405
+
406
+ if __name__ == "__main__":
407
+ parser = argparse.ArgumentParser()
408
+ parser.add_argument(
409
+ "--data_dir",
410
+ type=str,
411
+ required=True,
412
+ )
413
+ parser.add_argument(
414
+ "--save_dir",
415
+ type=str,
416
+ required=True,
417
+ )
418
+ parser.add_argument(
419
+ "--repo_id",
420
+ type=str,
421
+ required=True,
422
+ )
423
+ parser.add_argument(
424
+ "--preprocess_video",
425
+ action="store_true",
426
+ )
427
+ parser.add_argument(
428
+ "--debug",
429
+ action="store_true",
430
+ )
431
+ args = parser.parse_args()
432
+
433
+
434
+
435
+ assert os.path.exists(args.data_dir), f"Cannot find {args.data_dir}."
436
+
437
+
438
+ main(args.data_dir, args.save_dir, args.repo_id, args.preprocess_video, args.debug)
scripts/visualize_dataset.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This script is adapted from the Hugging Face 🤗 LeRobot project:
3
+ https://github.com/huggingface/lerobot
4
+
5
+ Original file:
6
+ https://github.com/huggingface/lerobot/blob/main/lerobot/scripts/visualize_dataset.py
7
+
8
+ The original script was developed as part of the LeRobot project for dataset visualization.
9
+ This version adds support for depth map visualization.
10
+ """
11
+
12
+ import argparse
13
+ import gc
14
+ import logging
15
+ import time
16
+ from pathlib import Path
17
+ from typing import Iterator
18
+
19
+ import numpy as np
20
+ import rerun as rr
21
+ import torch
22
+ import torch.utils.data
23
+ import tqdm
24
+ import matplotlib.pyplot as plt
25
+
26
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
27
+
28
+
29
+ class EpisodeSampler(torch.utils.data.Sampler):
30
+ def __init__(self, dataset: LeRobotDataset, episode_index: int):
31
+ from_idx = dataset.episode_data_index["from"][episode_index].item()
32
+ to_idx = dataset.episode_data_index["to"][episode_index].item()
33
+ self.frame_ids = range(from_idx, to_idx)
34
+
35
+ def __iter__(self) -> Iterator:
36
+ return iter(self.frame_ids)
37
+
38
+ def __len__(self) -> int:
39
+ return len(self.frame_ids)
40
+
41
+
42
+ def to_hwc_uint8_numpy(chw_float32_torch: torch.Tensor) -> np.ndarray:
43
+ assert chw_float32_torch.dtype == torch.float32
44
+ assert chw_float32_torch.ndim == 3
45
+ c, h, w = chw_float32_torch.shape
46
+ assert c < h and c < w, f"Expect channel first images, but instead {chw_float32_torch.shape}"
47
+
48
+ if c == 1:
49
+ # If depth image, clip and normalize the depth map just for visualization
50
+ min_depth = 0.4
51
+ max_depth = 3
52
+ clipped_depth = torch.clamp(chw_float32_torch, min=min_depth, max=max_depth)
53
+ normalized_depth = (clipped_depth-min_depth) / (max_depth-min_depth)
54
+ depth_image = np.sqrt(normalized_depth.squeeze().cpu().numpy())
55
+
56
+ colormap = plt.get_cmap('jet')
57
+ colored_depth_image = colormap(depth_image)
58
+ hwc_uint8_numpy = (colored_depth_image[:, :, :3] * 255).astype(np.uint8)
59
+ else:
60
+ # If RGB image
61
+ hwc_uint8_numpy = (chw_float32_torch * 255).type(torch.uint8).permute(1, 2, 0).numpy()
62
+
63
+ return hwc_uint8_numpy
64
+
65
+
66
+ def visualize_dataset(
67
+ dataset: LeRobotDataset,
68
+ episode_index: int,
69
+ batch_size: int = 32,
70
+ num_workers: int = 0,
71
+ mode: str = "local",
72
+ web_port: int = 9090,
73
+ ws_port: int = 9087,
74
+ save: bool = False,
75
+ output_dir: Path | None = None,
76
+ **kwargs,
77
+ ) -> Path | None:
78
+ if save:
79
+ assert (
80
+ output_dir is not None
81
+ ), "Set an output directory where to write .rrd files with `--output-dir path/to/directory`."
82
+
83
+ repo_id = dataset.repo_id
84
+
85
+ logging.info("Loading dataloader")
86
+ episode_sampler = EpisodeSampler(dataset, episode_index)
87
+ dataloader = torch.utils.data.DataLoader(
88
+ dataset,
89
+ num_workers=num_workers,
90
+ batch_size=batch_size,
91
+ sampler=episode_sampler,
92
+ )
93
+
94
+ logging.info("Starting Rerun")
95
+
96
+ if mode not in ["local", "distant"]:
97
+ raise ValueError(mode)
98
+
99
+ spawn_local_viewer = mode == "local" and not save
100
+ rr.init(f"{repo_id}/episode_{episode_index}", spawn=spawn_local_viewer)
101
+
102
+ # Manually call python garbage collector after `rr.init` to avoid hanging in a blocking flush
103
+ # when iterating on a dataloader with `num_workers` > 0
104
+ # TODO(rcadene): remove `gc.collect` when rerun version 0.16 is out, which includes a fix
105
+ gc.collect()
106
+
107
+ if mode == "distant":
108
+ rr.serve(open_browser=False, web_port=web_port, ws_port=ws_port)
109
+
110
+ logging.info("Logging to Rerun")
111
+
112
+ for batch in tqdm.tqdm(dataloader, total=len(dataloader)):
113
+ # iterate over the batch
114
+ for i in range(len(batch["index"])):
115
+ rr.set_time_sequence("frame_index", batch["frame_index"][i].item())
116
+ rr.set_time_seconds("timestamp", batch["timestamp"][i].item())
117
+
118
+ # display each camera image
119
+ for key in dataset.meta.camera_keys:
120
+ # TODO(rcadene): add `.compress()`? is it lossless?
121
+ rr.log(key, rr.Image(to_hwc_uint8_numpy(batch[key][i])))
122
+
123
+ # display each dimension of action space (e.g. actuators command)
124
+ if "action" in batch:
125
+ for dim_idx, val in enumerate(batch["action"][i]):
126
+ rr.log(f"action/{dim_idx}", rr.Scalar(val.item()))
127
+
128
+ # display each dimension of observed state space (e.g. agent position in joint space)
129
+ if "observation.state" in batch:
130
+ for dim_idx, val in enumerate(batch["observation.state"][i]):
131
+ rr.log(f"state/{dim_idx}", rr.Scalar(val.item()))
132
+
133
+ if mode == "local" and save:
134
+ # save .rrd locally
135
+ output_dir = Path(output_dir)
136
+ output_dir.mkdir(parents=True, exist_ok=True)
137
+ repo_id_str = repo_id.replace("/", "_")
138
+ rrd_path = output_dir / f"{repo_id_str}_episode_{episode_index}.rrd"
139
+ rr.save(rrd_path)
140
+ return rrd_path
141
+
142
+ elif mode == "distant":
143
+ # stop the process from exiting since it is serving the websocket connection
144
+ try:
145
+ while True:
146
+ time.sleep(1)
147
+ except KeyboardInterrupt:
148
+ print("Ctrl-C received. Exiting.")
149
+
150
+
151
+ def main():
152
+ parser = argparse.ArgumentParser()
153
+
154
+ parser.add_argument(
155
+ "--repo-id",
156
+ type=str,
157
+ default=None,
158
+ )
159
+ parser.add_argument(
160
+ "--episode-index",
161
+ type=int,
162
+ nargs="*",
163
+ default=None,
164
+ help="Episode indices to visualize (e.g. `0 1 5 6` to load episodes of index 0, 1, 5 and 6). By default loads all episodes.",
165
+ )
166
+ parser.add_argument(
167
+ "--dataset-path",
168
+ type=Path,
169
+ default=None,
170
+ help="Root directory for the converted LeRobot dataset stored locally.",
171
+ )
172
+ parser.add_argument(
173
+ "--output-dir",
174
+ type=Path,
175
+ default=None,
176
+ help="Directory path to write a .rrd file when `--save 1` is set.",
177
+ )
178
+ parser.add_argument(
179
+ "--batch-size",
180
+ type=int,
181
+ default=32,
182
+ help="Batch size loaded by DataLoader.",
183
+ )
184
+ parser.add_argument(
185
+ "--num-workers",
186
+ type=int,
187
+ default=4,
188
+ help="Number of processes of Dataloader for loading the data.",
189
+ )
190
+ parser.add_argument(
191
+ "--mode",
192
+ type=str,
193
+ default="local",
194
+ help=(
195
+ "Mode of viewing between 'local' or 'distant'. "
196
+ "'local' requires data to be on a local machine. It spawns a viewer to visualize the data locally. "
197
+ "'distant' creates a server on the distant machine where the data is stored. "
198
+ "Visualize the data by connecting to the server with `rerun ws://localhost:PORT` on the local machine."
199
+ ),
200
+ )
201
+ parser.add_argument(
202
+ "--web-port",
203
+ type=int,
204
+ default=9090,
205
+ help="Web port for rerun.io when `--mode distant` is set.",
206
+ )
207
+ parser.add_argument(
208
+ "--ws-port",
209
+ type=int,
210
+ default=9087,
211
+ help="Web socket port for rerun.io when `--mode distant` is set.",
212
+ )
213
+ parser.add_argument(
214
+ "--save",
215
+ type=int,
216
+ default=0,
217
+ help=(
218
+ "Save a .rrd file in the directory provided by `--output-dir`. "
219
+ "It also deactivates the spawning of a viewer. "
220
+ "Visualize the data by running `rerun path/to/file.rrd` on your local machine."
221
+ ),
222
+ )
223
+
224
+ args = parser.parse_args()
225
+ kwargs = vars(args)
226
+ root = f"{kwargs.pop('dataset_path')}/{args.repo_id}"
227
+
228
+ logging.info("Loading dataset")
229
+ dataset = LeRobotDataset(args.repo_id, root=root, local_files_only=True)
230
+
231
+ visualize_dataset(dataset, **vars(args))
232
+
233
+ if __name__ == "__main__":
234
+ main()