The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
Error code: DatasetGenerationError
Exception: ArrowNotImplementedError
Message: Cannot write struct type 'target_to_reference_mapping' with no child field to Parquet. Consider adding a dummy child field.
Traceback: Traceback (most recent call last):
File "/usr/local/lib/python3.12/site-packages/datasets/builder.py", line 1594, in _prepare_split_single
writer.write(example)
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 598, in write
self.write_examples_on_file()
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 571, in write_examples_on_file
self.write_batch(batch_examples=batch_examples)
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 661, in write_batch
self.write_table(pa_table, writer_batch_size)
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 672, in write_table
self._build_writer(inferred_schema=pa_table.schema)
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 713, in _build_writer
self.pa_writer = pq.ParquetWriter(
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/pyarrow/parquet/core.py", line 1070, in __init__
self.writer = _parquet.ParquetWriter(
^^^^^^^^^^^^^^^^^^^^^^^
File "pyarrow/_parquet.pyx", line 2363, in pyarrow._parquet.ParquetWriter.__cinit__
File "pyarrow/error.pxi", line 155, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 92, in pyarrow.lib.check_status
pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'target_to_reference_mapping' with no child field to Parquet. Consider adding a dummy child field.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.12/site-packages/datasets/builder.py", line 1608, in _prepare_split_single
num_examples, num_bytes = writer.finalize()
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 684, in finalize
self.write_examples_on_file()
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 571, in write_examples_on_file
self.write_batch(batch_examples=batch_examples)
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 661, in write_batch
self.write_table(pa_table, writer_batch_size)
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 672, in write_table
self._build_writer(inferred_schema=pa_table.schema)
File "/usr/local/lib/python3.12/site-packages/datasets/arrow_writer.py", line 713, in _build_writer
self.pa_writer = pq.ParquetWriter(
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/pyarrow/parquet/core.py", line 1070, in __init__
self.writer = _parquet.ParquetWriter(
^^^^^^^^^^^^^^^^^^^^^^^
File "pyarrow/_parquet.pyx", line 2363, in pyarrow._parquet.ParquetWriter.__cinit__
File "pyarrow/error.pxi", line 155, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 92, in pyarrow.lib.check_status
pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'target_to_reference_mapping' with no child field to Parquet. Consider adding a dummy child field.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1342, in compute_config_parquet_and_info_response
parquet_operations, partial, estimated_dataset_info = stream_convert_to_parquet(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 907, in stream_convert_to_parquet
builder._prepare_split(split_generator=splits_generators[split], file_format="parquet")
File "/usr/local/lib/python3.12/site-packages/datasets/builder.py", line 1438, in _prepare_split
for job_id, done, content in self._prepare_split_single(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/builder.py", line 1617, in _prepare_split_single
raise DatasetGenerationError("An error occurred while generating the dataset") from e
datasets.exceptions.DatasetGenerationError: An error occurred while generating the datasetNeed help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
camera.json dict | depth.npy list | metadata.json dict | rgb.png image | __key__ string | __url__ string |
|---|---|---|---|---|---|
{"carla_transform":{"location":{"x":186.4949493408203,"y":124.31021881103516,"z":2.4354076385498047}(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_000 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) | |
{"carla_transform":{"location":{"x":186.67880249023438,"y":124.3115234375,"z":2.4354076385498047},"r(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_001 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) | |
{"carla_transform":{"location":{"x":186.86767578125,"y":124.31316375732422,"z":2.4354076385498047},"(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_002 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) | |
{"carla_transform":{"location":{"x":187.04910278320312,"y":124.31184387207031,"z":2.4354076385498047(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_003 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) | |
{"carla_transform":{"location":{"x":187.22702026367188,"y":124.31088256835938,"z":2.4354076385498047(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_004 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) | |
{"carla_transform":{"location":{"x":187.4013214111328,"y":124.31216430664062,"z":2.4354076385498047}(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_005 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) | |
{"carla_transform":{"location":{"x":187.557373046875,"y":124.311279296875,"z":2.4354076385498047},"r(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_006 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) | |
{"carla_transform":{"location":{"x":187.7060089111328,"y":124.30927276611328,"z":2.4354076385498047}(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_007 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) | |
{"carla_transform":{"location":{"x":187.85598754882812,"y":124.30854034423828,"z":2.4354076385498047(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_008 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) | |
{"carla_transform":{"location":{"x":188.0433807373047,"y":124.30751037597656,"z":2.4354076385498047}(...TRUNCATED) | [[100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,10(...TRUNCATED) | {"actor_type":"pedestrian","config":{"camera":{"fov":90.0,"height":704,"width":1280},"capture":{"fps(...TRUNCATED) | pedestrian_0_20260131_234844_ped00_009 | "hf://datasets/mkxdxd/carla-dataset@55d0a5ed46744eb678b541bff63043ec14c50744/Town01/pedestrian/carla(...TRUNCATED) |
CARLA Dataset (Target)
A large-scale driving dataset captured from CARLA simulator, containing RGB images and depth maps with camera parameters for autonomous driving research.
Tar File Structure
This dataset is stored in WebDataset format for efficient streaming and loading.
Sharding Strategy
- 3 scenes per shard: Each
.tarfile contains exactly 3 complete scenes - 93 frames per scene: Each scene is a video sequence of 93 consecutive frames
- ~279 frames per tar: 3 scenes Γ 93 frames = 279 frames per shard
Repository Structure
carla-dataset/
βββ Town01/
β βββ vehicle/
β β βββ carla-stage2-000000.tar β Scenes 1-3
β β βββ carla-stage2-000001.tar β Scenes 4-6
β β βββ ...
β βββ pedestrian/
β βββ ...
βββ Town02/
β βββ ...
βββ Town03/
β βββ ...
βββ Town04/
β βββ ...
βββ Town05/
β βββ ...
βββ Town06/
βββ ...
Shard Contents (WebDataset format)
Each tar file contains samples with the following files per frame:
{scene_id}_{frame_idx:03d}.rgb.png β RGB image (1280Γ704)
{scene_id}_{frame_idx:03d}.depth.npy β Depth map (numpy array, 704Γ1280)
{scene_id}_{frame_idx:03d}.camera.json β Camera parameters + matched reference frames
{scene_id}_{frame_idx:03d}.metadata.json β Scene info (scene_id, frame_id, town, actor_type)
Key Format: {scene_id}_{frame_idx:03d} (e.g., scene_001_000, scene_001_001, ..., scene_001_092)
Data Format
Each sample contains:
rgb: PIL.Image (1280Γ704) - RGB imagedepth: np.ndarray (704, 1280) - Depth mapcamera: dict containing:intrinsic: Camera intrinsic matrixextrinsic: Camera extrinsic matrixmatched_references: List of reference image IDs accumulated along the trajectory
metadata: dict containing:scene_id: Unique scene identifierframe_id: Frame index within the scene (0-92)town: CARLA town name (Town01-Town06)actor_type: Type of actor being followed ("vehicle" or "pedestrian")
Camera JSON with Reference Frame Mapping
Each camera.json contains camera parameters and matched reference frames:
{
"intrinsic": { ... },
"extrinsic": { ... },
"carla_transform": { ... },
"matched_references": ["subset_0/0001", "subset_0/0042", "subset_0/0083"]
}
Reference Accumulation: As the camera moves through the scene, reference frames are accumulated:
- Frame 0:
[ref_A]- first reference encountered - Frame 20:
[ref_A, ref_B]- new reference added - Frame 40:
[ref_A, ref_B, ref_C]- another reference added - ...
This allows each target frame to know which reference images are relevant based on position and viewing angle.
Dataset Statistics
Dataset Summary
| Town | Mode | Scenes | Images | Avg Images/Scene |
|---|---|---|---|---|
| Town01 | pedestrian | 520 | 48,360 | 93.0 |
| Town01 | vehicle | 490 | 45,570 | 93.0 |
| Town01 | TOTAL | 1,010 | 93,930 | 93.0 |
| Town02 | pedestrian | 509 | 47,337 | 93.0 |
| Town02 | vehicle | 500 | 46,500 | 93.0 |
| Town02 | TOTAL | 1,009 | 93,837 | 93.0 |
| Town03 | pedestrian | 500 | 46,500 | 93.0 |
| Town03 | vehicle | 500 | 46,500 | 93.0 |
| Town03 | TOTAL | 1,000 | 93,000 | 93.0 |
| Town04 | pedestrian | 500 | 46,500 | 93.0 |
| Town04 | vehicle | 530 | 49,290 | 93.0 |
| Town04 | TOTAL | 1,030 | 95,790 | 93.0 |
| Town05 | pedestrian | 500 | 46,500 | 93.0 |
| Town05 | vehicle | 500 | 46,500 | 93.0 |
| Town05 | TOTAL | 1,000 | 93,000 | 93.0 |
| Town06 | pedestrian | 500 | 46,500 | 93.0 |
| Town06 | vehicle | 500 | 46,500 | 93.0 |
| Town06 | TOTAL | 1,000 | 93,000 | 93.0 |
Grand Total
| Metric | Value |
|---|---|
| Total Scenes | 6,049 |
| Total Images | 562,557 |
| Towns | 6 (Town01-06) |
Example Usage
π Full example code is available in
example_usage.py
Installation
pip install datasets torch pillow numpy webdataset
Basic Usage with webdataset
import webdataset as wds
from huggingface_hub import hf_hub_url
import json
import numpy as np
from PIL import Image
import io
# Stream dataset from HuggingFace
url = "https://huggingface.co/datasets/mkxdxd/carla-dataset/resolve/main/Town01/pedestrian/{carla-stage2-000000..carla-stage2-000010}.tar"
dataset = wds.WebDataset(url).decode("pil")
for sample in dataset:
key = sample["__key__"]
rgb = sample["rgb.png"] # PIL.Image (1280x704)
depth = np.load(io.BytesIO(sample["depth.npy"])) # numpy array (704x1280)
camera = json.loads(sample["camera.json"]) # dict
metadata = json.loads(sample["metadata.json"]) # dict
print(f"Key: {key}")
print(f"RGB size: {rgb.size}")
print(f"Depth shape: {depth.shape}")
print(f"Scene: {metadata['scene_id']}, Town: {metadata['town']}")
break
With PyTorch DataLoader
import webdataset as wds
import torch
from torch.utils.data import DataLoader
import json
import numpy as np
from PIL import Image
import io
def decode_sample(sample):
return {
"rgb": sample["rgb.png"],
"depth": np.load(io.BytesIO(sample["depth.npy"])),
"camera": json.loads(sample["camera.json"]),
"metadata": json.loads(sample["metadata.json"]),
}
url = "https://huggingface.co/datasets/mkxdxd/carla-dataset/resolve/main/Town01/pedestrian/{carla-stage2-000000..carla-stage2-000010}.tar"
dataset = (
wds.WebDataset(url)
.decode("pil")
.map(decode_sample)
)
dataloader = DataLoader(dataset, batch_size=8, num_workers=4)
for batch in dataloader:
rgb_batch = batch["rgb"]
depth_batch = batch["depth"]
# ... training code
break
Dataset generated from CARLA Simulator
- Downloads last month
- 5,127