from huggingface_hub import hf_hub_download import h5py import torch import numpy as np from torch.utils.data import Dataset, DataLoader def download_and_load_dataset(repo_id, filename="ucf_crime_features_labeled.h5"): """Download the HDF5 file from Hugging Face and return the local path.""" hdf5_path = hf_hub_download( repo_id=repo_id, filename=filename, repo_type="dataset" ) return hdf5_path class UCFCrimeDataset(Dataset): """Dataset class for loading UCF-Crime features with temporal annotations.""" def __init__(self, hdf5_path, split=None, transform=None): self.hdf5_path = hdf5_path self.transform = transform # Open the HDF5 file self.hdf5_file = h5py.File(hdf5_path, 'r') # Build list of video paths (category/video_name) self.video_paths = [] for category_name in self.hdf5_file.keys(): category_group = self.hdf5_file[category_name] for video_name in category_group.keys(): video_path = f"{category_name}/{video_name}" self.video_paths.append(video_path) # Filter by split if specified if split is not None: filtered_paths = [] for video_path in self.video_paths: video_group = self.hdf5_file[video_path] video_split = video_group.attrs.get('split', 'Unknown') # Handle bytes type if isinstance(video_split, bytes): video_split = video_split.decode('utf-8') # Case-insensitive comparison if video_split.lower() == split.lower(): filtered_paths.append(video_path) self.video_paths = filtered_paths print(f"Loaded {len(self.video_paths)} videos for split: {split}") def __len__(self): return len(self.video_paths) def __getitem__(self, idx): video_path = self.video_paths[idx] video_group = self.hdf5_file[video_path] features = np.array(video_group['features']) labels = np.array(video_group['labels']) # Convert to tensors features = torch.from_numpy(features).float() labels = torch.from_numpy(labels).float() if self.transform: features = self.transform(features) # Get metadata duration = video_group.attrs.get('duration', 0.0) split = video_group.attrs.get('split', 'Unknown') if isinstance(split, bytes): split = split.decode('utf-8') return { 'video_id': video_path, 'features': features, 'labels': labels, 'duration': duration, 'split': split } def close(self): """Close the HDF5 file.""" if self.hdf5_file: self.hdf5_file.close() def create_dataloaders_from_huggingface(repo_id, batch_size=16, num_workers=2): """Download dataset from Hugging Face and create dataloaders.""" # Download the HDF5 file print(f"Downloading dataset from {repo_id}...") hdf5_path = download_and_load_dataset(repo_id) print(f"✓ Dataset downloaded to: {hdf5_path}") # Create datasets print("\nCreating datasets...") train_dataset = UCFCrimeDataset(hdf5_path, split='Train') val_dataset = UCFCrimeDataset(hdf5_path, split='Val') test_dataset = UCFCrimeDataset(hdf5_path, split='Test') # Create dataloaders train_loader = DataLoader( train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True ) val_loader = DataLoader( val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True ) test_loader = DataLoader( test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True ) print(f"\n{'='*60}") print("Dataset Statistics:") print(f"{'='*60}") print(f" Training set: {len(train_dataset):>4} videos") print(f" Validation set: {len(val_dataset):>4} videos") print(f" Test set: {len(test_dataset):>4} videos") print(f" Total: {len(train_dataset) + len(val_dataset) + len(test_dataset):>4} videos") print(f"{'='*60}") return train_loader, val_loader, test_loader, hdf5_path if __name__ == "__main__": repo_id = "Rahima411/ucf-anomaly-detection-mapped" # Create dataloaders train_loader, val_loader, test_loader, hdf5_path = create_dataloaders_from_huggingface( repo_id=repo_id, batch_size=16, num_workers=2 ) # Test loading batches print("\nLoading Data...") print("-" * 60) for split_name, loader in [("Train", train_loader), ("Val", val_loader), ("Test", test_loader)]: print(f"\n{split_name} set - First batch:") for batch in loader: print(f" Batch size: {len(batch['video_id'])} videos") print(f" Features shape: {batch['features'].shape}") print(f" Labels shape: {batch['labels'].shape}") print(f" Sample video IDs: {batch['video_id'][:3]}") # Calculate anomaly statistics labels_np = batch['labels'].numpy() num_anomaly_frames = (labels_np == 1).sum() total_frames = labels_np.size anomaly_pct = 100 * num_anomaly_frames / total_frames if total_frames > 0 else 0 print(f" Anomaly frames: {num_anomaly_frames:,} / {total_frames:,} ({anomaly_pct:.2f}%)") break # Only show first batch