Datasets:
Tasks:
Image Segmentation
Formats:
parquet
Sub-tasks:
instance-segmentation
Languages:
English
Size:
10K - 100K
ArXiv:
License:
| from pathlib import Path | |
| import datasets | |
| import json | |
| from datetime import datetime | |
| _VERSION = "0.1.0" | |
| _CITATION = """ | |
| @inproceedings{8100027, | |
| title = {Scene Parsing through ADE20K Dataset}, | |
| author = {Zhou, Bolei and Zhao, Hang and Puig, Xavier and Fidler, Sanja and Barriuso, Adela and Torralba, Antonio}, | |
| year = 2017, | |
| booktitle = {2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, | |
| volume = {}, | |
| number = {}, | |
| pages = {5122--5130}, | |
| doi = {10.1109/CVPR.2017.544}, | |
| keywords = {Image segmentation;Semantics;Sun;Labeling;Visualization;Neural networks;Computer vision} | |
| } | |
| @misc{zhou2018semantic, | |
| title = {Semantic Understanding of Scenes through the ADE20K Dataset}, | |
| author = {Bolei Zhou and Hang Zhao and Xavier Puig and Tete Xiao and Sanja Fidler and Adela Barriuso and Antonio Torralba}, | |
| year = 2018, | |
| eprint = {1608.05442}, | |
| archiveprefix = {arXiv}, | |
| primaryclass = {cs.CV} | |
| } | |
| """ | |
| _DESCRIPTION = """ | |
| ADE20K is composed of more than 27K images from the SUN and Places databases. | |
| Images are fully annotated with objects, spanning over 3K object categories. | |
| Many of the images also contain object parts, and parts of parts. | |
| We also provide the original annotated polygons, as well as object instances for amodal segmentation. | |
| Images are also anonymized, blurring faces and license plates. | |
| """ | |
| _HOMEPAGE = "https://groups.csail.mit.edu/vision/datasets/ADE20K/" | |
| _LICENSE = "Creative Commons BSD-3 License Agreement" | |
| _FEATURES = datasets.Features( | |
| { | |
| "image": datasets.Image(mode="RGB"), | |
| "segmentations": datasets.Sequence(datasets.Image(mode="RGB")), | |
| "instances": datasets.Sequence(datasets.Image(mode="L")), | |
| "filename": datasets.Value("string"), | |
| "folder": datasets.Value("string"), | |
| "source": datasets.Features( | |
| { | |
| "folder": datasets.Value("string"), | |
| "filename": datasets.Value("string"), | |
| "origin": datasets.Value("string"), | |
| } | |
| ), | |
| "scene": datasets.Sequence(datasets.Value("string")), | |
| "objects": [ | |
| { | |
| "id": datasets.Value("uint16"), | |
| "name": datasets.Value("string"), | |
| "name_ndx": datasets.Value("uint16"), | |
| "hypernym": datasets.Sequence(datasets.Value("string")), | |
| "raw_name": datasets.Value("string"), | |
| "attributes": datasets.Value("string"), | |
| "depth_ordering_rank": datasets.Value("uint16"), | |
| "occluded": datasets.Value("bool"), | |
| "crop": datasets.Value(dtype="bool"), | |
| "parts": { | |
| "is_part_of": datasets.Value("uint16"), | |
| "part_level": datasets.Value("uint8"), | |
| "has_parts": datasets.Sequence(datasets.Value("uint16")), | |
| }, | |
| "polygon": { | |
| "x": datasets.Sequence(datasets.Value("uint16")), | |
| "y": datasets.Sequence(datasets.Value("uint16")), | |
| "click_date": datasets.Sequence(datasets.Value("timestamp[us]")), | |
| }, | |
| "saved_date": datasets.Value("timestamp[us]"), | |
| } | |
| ], | |
| } | |
| ) | |
| class ADE20K(datasets.GeneratorBasedBuilder): | |
| DEFAULT_WRITER_BATCH_SIZE = 1000 | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| features=_FEATURES, | |
| supervised_keys=None, | |
| description=_DESCRIPTION, | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| version=_VERSION, | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager: datasets.DownloadManager): | |
| archive_training = Path("ADE20K_2021_17_01/images/ADE/training") | |
| archive_validation = Path("ADE20K_2021_17_01/images/ADE/validation") | |
| jsons_training = sorted(list(archive_training.rglob("*.json"))) | |
| jsons_validation = sorted(list(archive_validation.rglob("*.json"))) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={"jsons": jsons_training}, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| gen_kwargs={"jsons": jsons_validation}, | |
| ), | |
| ] | |
| def parse_date(self, date: str) -> datetime: | |
| if date == []: | |
| return None | |
| try: | |
| timestamp = datetime.strptime(date, "%d-%m-%y %H:%M:%S:%f") | |
| return timestamp | |
| except: | |
| pass | |
| try: | |
| timestamp = datetime.strptime(date, "%d-%b-%Y %H:%M:%S:%f") | |
| return timestamp | |
| except: | |
| pass | |
| try: | |
| timestamp = datetime.strptime(date, "%d-%m-%y %H:%M:%S") | |
| return timestamp | |
| except: | |
| pass | |
| try: | |
| timestamp = datetime.strptime(date, "%d-%b-%Y %H:%M:%S") | |
| return timestamp | |
| except: | |
| pass | |
| raise ValueError(f"Could not parse date: {date}") | |
| def parse_imsize(self, imsize: list[int]) -> list[int]: | |
| if len(imsize) == 2: | |
| return imsize + [3] | |
| return imsize | |
| def parse_json(self, json_path: Path): | |
| with json_path.open("r", encoding="ISO-8859-1") as f: | |
| data = json.load(f) | |
| annotation = data["annotation"] | |
| objects = annotation["object"] | |
| segmentations = list( | |
| json_path.parent.glob( | |
| f"{annotation['filename'].removesuffix(".jpg")}_parts*" | |
| ) | |
| ) | |
| segmentations = [str(part) for part in segmentations] | |
| main_mask = json_path.parent / annotation["filename"] | |
| main_mask = str(main_mask.with_suffix("")) + "_seg.png" | |
| segmentations.insert(0, main_mask) | |
| instances = [ | |
| json_path.parent / object["instance_mask"] for object in objects | |
| ] | |
| instances = [str(instance) for instance in instances] | |
| return { | |
| "image": str(json_path.parent / annotation["filename"]), | |
| "segmentations": segmentations, | |
| "instances": instances, | |
| "filename": annotation["filename"], | |
| "folder": annotation["folder"], | |
| "source": { | |
| "folder": annotation["source"]["folder"], | |
| "filename": annotation["source"]["filename"], | |
| "origin": annotation["source"]["origin"], | |
| }, | |
| "scene": annotation["scene"], | |
| "objects": [ | |
| { | |
| "id": object["id"], | |
| "name": object["name"], | |
| "name_ndx": object["name_ndx"], | |
| "hypernym": object["hypernym"], | |
| "raw_name": object["raw_name"], | |
| "attributes": "" | |
| if object["attributes"] == [] | |
| else object["attributes"], | |
| "depth_ordering_rank": object["depth_ordering_rank"], | |
| "occluded": object["occluded"] == "yes", | |
| "crop": object["crop"] == "1", | |
| "parts": { | |
| "part_level": object["parts"]["part_level"], | |
| "is_part_of": None | |
| if object["parts"]["ispartof"] == [] | |
| else object["parts"]["ispartof"], | |
| "has_parts": [object["parts"]["hasparts"]] | |
| if isinstance(object["parts"]["hasparts"], int) | |
| else object["parts"]["hasparts"], | |
| }, | |
| "polygon": { | |
| "x": list( | |
| map(lambda x: int(max(0, x)), object["polygon"]["x"]) | |
| ), | |
| "y": list( | |
| map(lambda y: int(max(0, y)), object["polygon"]["y"]) | |
| ), | |
| "click_date": [] | |
| if "click_date" not in object["polygon"] | |
| else list( | |
| map(self.parse_date, object["polygon"]["click_date"]) | |
| ), | |
| }, | |
| "saved_date": self.parse_date(object["saved_date"]), | |
| } | |
| for object in objects | |
| ], | |
| } | |
| def _generate_examples(self, jsons: list[Path]): | |
| for i, json_path in enumerate(jsons): | |
| yield i, self.parse_json(json_path) | |