Rahima411 commited on
Commit
46b5521
·
verified ·
1 Parent(s): a3cb0b1

Update data_loader.py

Browse files
Files changed (1) hide show
  1. data_loader.py +138 -25
data_loader.py CHANGED
@@ -1,3 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  class UCFCrimeDataset(Dataset):
3
  """Dataset class for loading UCF-Crime features with temporal annotations."""
@@ -6,48 +20,147 @@ class UCFCrimeDataset(Dataset):
6
  self.hdf5_path = hdf5_path
7
  self.transform = transform
8
 
9
- with h5py.File(hdf5_path, 'r') as f:
10
- self.video_ids = list(f.keys())
 
 
 
 
 
 
 
 
11
 
 
12
  if split is not None:
13
- filtered_ids = []
14
- with h5py.File(hdf5_path, 'r') as f:
15
- for video_id in self.video_ids:
16
- video_split = f[video_id].attrs.get('split', 'Unknown')
17
- if video_split.lower() == split.lower():
18
- filtered_ids.append(video_id)
19
- self.video_ids = filtered_ids
 
 
 
 
 
 
 
 
20
 
21
  def __len__(self):
22
- return len(self.video_ids)
23
 
24
  def __getitem__(self, idx):
25
- video_id = self.video_ids[idx]
26
- with h5py.File(self.hdf5_path, 'r') as f:
27
- features = np.array(f[f"{video_id}/features"])
28
- labels = np.array(f[f"{video_id}/labels"])
 
29
 
 
30
  features = torch.from_numpy(features).float()
31
  labels = torch.from_numpy(labels).float()
32
 
33
  if self.transform:
34
  features = self.transform(features)
35
 
 
 
 
 
 
 
36
  return {
37
- 'video_id': video_id,
38
  'features': features,
39
- 'labels': labels
 
 
40
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- def create_dataloaders(hdf5_path, batch_size=16):
43
- """Create train/validation/test dataloaders."""
44
- train_dataset = UCFCrimeDataset(hdf5_path, split='train')
45
- val_dataset = UCFCrimeDataset(hdf5_path, split='validation')
46
- test_dataset = UCFCrimeDataset(hdf5_path, split='test')
 
 
 
 
47
 
48
- train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
49
- val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
50
- test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
51
 
52
- return train_loader, val_loader, test_loader
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
 
1
+ from huggingface_hub import hf_hub_download
2
+ import h5py
3
+ import torch
4
+ import numpy as np
5
+ from torch.utils.data import Dataset, DataLoader
6
+
7
+ def download_and_load_dataset(repo_id, filename="ucf_crime_features_labeled.h5"):
8
+ """Download the HDF5 file from Hugging Face and return the local path."""
9
+ hdf5_path = hf_hub_download(
10
+ repo_id=repo_id,
11
+ filename=filename,
12
+ repo_type="dataset"
13
+ )
14
+ return hdf5_path
15
 
16
  class UCFCrimeDataset(Dataset):
17
  """Dataset class for loading UCF-Crime features with temporal annotations."""
 
20
  self.hdf5_path = hdf5_path
21
  self.transform = transform
22
 
23
+ # Open the HDF5 file
24
+ self.hdf5_file = h5py.File(hdf5_path, 'r')
25
+
26
+ # Build list of video paths (category/video_name)
27
+ self.video_paths = []
28
+ for category_name in self.hdf5_file.keys():
29
+ category_group = self.hdf5_file[category_name]
30
+ for video_name in category_group.keys():
31
+ video_path = f"{category_name}/{video_name}"
32
+ self.video_paths.append(video_path)
33
 
34
+ # Filter by split if specified
35
  if split is not None:
36
+ filtered_paths = []
37
+ for video_path in self.video_paths:
38
+ video_group = self.hdf5_file[video_path]
39
+ video_split = video_group.attrs.get('split', 'Unknown')
40
+
41
+ # Handle bytes type
42
+ if isinstance(video_split, bytes):
43
+ video_split = video_split.decode('utf-8')
44
+
45
+ # Case-insensitive comparison
46
+ if video_split.lower() == split.lower():
47
+ filtered_paths.append(video_path)
48
+
49
+ self.video_paths = filtered_paths
50
+ print(f"Loaded {len(self.video_paths)} videos for split: {split}")
51
 
52
  def __len__(self):
53
+ return len(self.video_paths)
54
 
55
  def __getitem__(self, idx):
56
+ video_path = self.video_paths[idx]
57
+ video_group = self.hdf5_file[video_path]
58
+
59
+ features = np.array(video_group['features'])
60
+ labels = np.array(video_group['labels'])
61
 
62
+ # Convert to tensors
63
  features = torch.from_numpy(features).float()
64
  labels = torch.from_numpy(labels).float()
65
 
66
  if self.transform:
67
  features = self.transform(features)
68
 
69
+ # Get metadata
70
+ duration = video_group.attrs.get('duration', 0.0)
71
+ split = video_group.attrs.get('split', 'Unknown')
72
+ if isinstance(split, bytes):
73
+ split = split.decode('utf-8')
74
+
75
  return {
76
+ 'video_id': video_path,
77
  'features': features,
78
+ 'labels': labels,
79
+ 'duration': duration,
80
+ 'split': split
81
  }
82
+
83
+ def close(self):
84
+ """Close the HDF5 file."""
85
+ if self.hdf5_file:
86
+ self.hdf5_file.close()
87
+
88
+ def create_dataloaders_from_huggingface(repo_id, batch_size=16, num_workers=2):
89
+ """Download dataset from Hugging Face and create dataloaders."""
90
+
91
+ # Download the HDF5 file
92
+ print(f"Downloading dataset from {repo_id}...")
93
+ hdf5_path = download_and_load_dataset(repo_id)
94
+ print(f"✓ Dataset downloaded to: {hdf5_path}")
95
+
96
+ # Create datasets
97
+ print("\nCreating datasets...")
98
+ train_dataset = UCFCrimeDataset(hdf5_path, split='Train')
99
+ val_dataset = UCFCrimeDataset(hdf5_path, split='Val')
100
+ test_dataset = UCFCrimeDataset(hdf5_path, split='Test')
101
+
102
+ # Create dataloaders
103
+ train_loader = DataLoader(
104
+ train_dataset,
105
+ batch_size=batch_size,
106
+ shuffle=True,
107
+ num_workers=num_workers,
108
+ pin_memory=True
109
+ )
110
+ val_loader = DataLoader(
111
+ val_dataset,
112
+ batch_size=batch_size,
113
+ shuffle=False,
114
+ num_workers=num_workers,
115
+ pin_memory=True
116
+ )
117
+ test_loader = DataLoader(
118
+ test_dataset,
119
+ batch_size=batch_size,
120
+ shuffle=False,
121
+ num_workers=num_workers,
122
+ pin_memory=True
123
+ )
124
+
125
+ print(f"\n{'='*60}")
126
+ print("Dataset Statistics:")
127
+ print(f"{'='*60}")
128
+ print(f" Training set: {len(train_dataset):>4} videos")
129
+ print(f" Validation set: {len(val_dataset):>4} videos")
130
+ print(f" Test set: {len(test_dataset):>4} videos")
131
+ print(f" Total: {len(train_dataset) + len(val_dataset) + len(test_dataset):>4} videos")
132
+ print(f"{'='*60}")
133
+
134
+ return train_loader, val_loader, test_loader, hdf5_path
135
 
136
+ if __name__ == "__main__":
137
+ repo_id = "Rahima411/ucf-anomaly-detection-mapped"
138
+
139
+ # Create dataloaders
140
+ train_loader, val_loader, test_loader, hdf5_path = create_dataloaders_from_huggingface(
141
+ repo_id=repo_id,
142
+ batch_size=16,
143
+ num_workers=2
144
+ )
145
 
146
+ # Test loading batches
147
+ print("\nLoading Data...")
148
+ print("-" * 60)
149
 
150
+ for split_name, loader in [("Train", train_loader), ("Val", val_loader), ("Test", test_loader)]:
151
+ print(f"\n{split_name} set - First batch:")
152
+ for batch in loader:
153
+ print(f" Batch size: {len(batch['video_id'])} videos")
154
+ print(f" Features shape: {batch['features'].shape}")
155
+ print(f" Labels shape: {batch['labels'].shape}")
156
+ print(f" Sample video IDs: {batch['video_id'][:3]}")
157
+
158
+ # Calculate anomaly statistics
159
+ labels_np = batch['labels'].numpy()
160
+ num_anomaly_frames = (labels_np == 1).sum()
161
+ total_frames = labels_np.size
162
+ anomaly_pct = 100 * num_anomaly_frames / total_frames if total_frames > 0 else 0
163
+
164
+ print(f" Anomaly frames: {num_anomaly_frames:,} / {total_frames:,} ({anomaly_pct:.2f}%)")
165
+ break # Only show first batch
166