import datasets import numpy as np import pandas as pd _DESCRIPTION = """ GOES-16 ABI (Advanced Baseline Imager) satellite image dataset with multi-spectral imagery and corresponding labels. The dataset contains training and test splits at two different resolutions (128x128 and 256x256). Each image has 16 spectral channels from the GOES-16 ABI instrument. Data provided by NOAA and NESDIS. """ _HOMEPAGE = "" _LICENSE = "" # URLs for the data files - using HuggingFace repository URLs _URLS = { "data": "https://huggingface.co/datasets/Silicon23/ioai2025-athome-satellite-images/resolve/main/data/dataset.npz", "metadata": "https://huggingface.co/datasets/Silicon23/ioai2025-athome-satellite-images/resolve/main/data/metadata.csv" } class Goes16Dataset(datasets.GeneratorBasedBuilder): """GOES-16 ABI satellite image dataset with multi-spectral imagery.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="all", version=VERSION, description="All resolutions combined (128x128 and 256x256 images)", ), datasets.BuilderConfig( name="128x128", version=VERSION, description="128x128 resolution images only", ), datasets.BuilderConfig( name="256x256", version=VERSION, description="256x256 resolution images only", ), ] DEFAULT_CONFIG_NAME = "all" def _info(self): if self.config.name == "all": # For "all" config, use flexible features that can handle both resolutions features = datasets.Features({ "image": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32")))), # Variable size [16, H, W] "label": datasets.Sequence(datasets.Sequence(datasets.Value("uint8"))), # Variable size [H, W] "i": datasets.Value("int32"), "j": datasets.Value("int32"), "start_time": datasets.Value("string"), "end_time": datasets.Value("string"), "ind": datasets.Value("int32"), "size": datasets.Value("int32"), # This field indicates the actual resolution (128 or 256) }) elif self.config.name == "128x128": # For 128x128, use fixed-size arrays for better caching compatibility features = datasets.Features({ "image": datasets.Array3D(shape=(16, 128, 128), dtype="float32"), "label": datasets.Array2D(shape=(128, 128), dtype="uint8"), "i": datasets.Value("int32"), "j": datasets.Value("int32"), "start_time": datasets.Value("string"), "end_time": datasets.Value("string"), "ind": datasets.Value("int32"), "size": datasets.Value("int32"), }) else: # 256x256 # For 256x256, use fixed-size arrays for better caching compatibility features = datasets.Features({ "image": datasets.Array3D(shape=(16, 256, 256), dtype="float32"), "label": datasets.Array2D(shape=(256, 256), dtype="uint8"), "i": datasets.Value("int32"), "j": datasets.Value("int32"), "start_time": datasets.Value("string"), "end_time": datasets.Value("string"), "ind": datasets.Value("int32"), "size": datasets.Value("int32"), }) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, ) def _split_generators(self, dl_manager): # Download the files (no extraction needed for .npz/.csv) downloaded_files = dl_manager.download(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "data_file": downloaded_files["data"], "metadata_file": downloaded_files["metadata"], "split": "train" }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "data_file": downloaded_files["data"], "metadata_file": downloaded_files["metadata"], "split": "test" }, ), ] def _generate_examples(self, data_file, metadata_file, split): # Load data and metadata from the provided file paths data = np.load(data_file) metadata = pd.read_csv(metadata_file) # Filter metadata for the current split split_metadata = metadata[metadata['split'] == split] example_id = 0 if self.config.name == "all": # For "all" config, load both 128x128 and 256x256 data for size in [128, 256]: # Filter metadata for current resolution size_metadata = split_metadata[split_metadata['size'] == size] # Get corresponding arrays X_key = f"X_{split}_{size}" Y_key = f"Y_{split}_{size}" X_data = data[X_key] Y_data = data[Y_key] # Generate examples using metadata for _, row in size_metadata.iterrows(): ind = row['ind'] if ind < len(X_data): # Safety check # Convert numpy arrays to lists for "all" config compatibility image_array = X_data[ind].astype(np.float32) label_array = Y_data[ind].astype(np.uint8) yield example_id, { "image": image_array.tolist(), # Convert to nested list "label": label_array.tolist(), # Convert to nested list "i": int(row['i']), "j": int(row['j']), "start_time": str(row['start_time']), "end_time": str(row['end_time']), "ind": int(row['ind']), "size": int(row['size']), } example_id += 1 else: # For specific resolution configs (128x128 or 256x256) if self.config.name == "128x128": size = 128 else: # 256x256 size = 256 # Filter metadata for current resolution size_metadata = split_metadata[split_metadata['size'] == size] # Get corresponding arrays X_key = f"X_{split}_{size}" Y_key = f"Y_{split}_{size}" X_data = data[X_key] Y_data = data[Y_key] # Generate examples using metadata for _, row in size_metadata.iterrows(): ind = row['ind'] if ind < len(X_data): # Safety check # Return numpy arrays directly for fixed-size configs image_array = X_data[ind].astype(np.float32) label_array = Y_data[ind].astype(np.uint8) yield example_id, { "image": image_array, # Return numpy array directly "label": label_array, # Return numpy array directly "i": int(row['i']), "j": int(row['j']), "start_time": str(row['start_time']), "end_time": str(row['end_time']), "ind": int(row['ind']), "size": int(row['size']), } example_id += 1