nagasurendra commited on
Commit
e55f23c
·
verified ·
1 Parent(s): d4f980a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -484
app.py CHANGED
@@ -36,6 +36,7 @@ last_metrics: Dict[str, Any] = {}
36
  frame_count: int = 0
37
  SAVE_IMAGE_INTERVAL = 1
38
  DETECTION_CLASSES = ["Longitudinal", "Pothole", "Transverse"]
 
39
 
40
  device = "cuda" if torch.cuda.is_available() else "cpu"
41
  model = YOLO('./data/best.pt').to(device)
@@ -45,7 +46,7 @@ if device == "cuda":
45
  def zip_all_outputs(report_path: str, video_path: str, chart_path: str, map_path: str) -> str:
46
  zip_path = os.path.join(OUTPUT_DIR, f"drone_analysis_outputs_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip")
47
  try:
48
- with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
49
  if os.path.exists(report_path):
50
  zipf.write(report_path, os.path.basename(report_path))
51
  if os.path.exists(video_path):
@@ -113,154 +114,8 @@ def write_flight_log(frame_count: int, gps_coord: List[float], timestamp: str) -
113
  def check_image_quality(frame: np.ndarray, input_resolution: int) -> bool:
114
  height, width, _ = frame.shape
115
  frame_resolution = width * height
116
- if frame_resolution < 12_000_000:
117
- log_entries.append(f"Frame {frame_count}: Resolution {width}x{height} below 12MP")
118
- return False
119
- if frame_resolution < input_resolution:
120
- log_entries.append(f"Frame {frame_count}: Output resolution below input")
121
- return False
122
- return True
123
-
124
- def update_metrics(detections: List[Dict[str, Any]]) -> Dict[str, Any]:
125
- counts = Counter([det["label"] for det in detections])
126
- return {
127
- "items": [{"type": k, "count": v} for k, v in counts.items()],
128
- "total_detections": len(detections),
129
- "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
130
- }
131
-
132
- def generate_line_chart() -> Optional[str]:
133
- if not detected_counts:
134
- return None
135
- plt.figure(figsize=(4, 2))
136
- plt.plot(detected_counts[-50:], marker='o', color='#FF8C00')
137
- plt.title("Detections Over Time")
138
- plt.xlabel("Frame")
139
- plt.ylabel("Count")
140
- plt.grid(True)
141
- plt.tight_layout()
142
- chart_path = os.path.join(OUTPUT_DIR, f"chart_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
143
- plt.savefig(chart_path)
144
- plt.close()
145
- return chart_path
146
-
147
- import cv2
148
- import torch
149
- import gradio as gr
150
- import numpy as np
151
- import os
152
- import json
153
- import logging
154
- import matplotlib.pyplot as plt
155
- import csv
156
- import time
157
- from datetime import datetime
158
- from collections import Counter
159
- from typing import List, Dict, Any, Optional
160
- from ultralytics import YOLO
161
- import piexif
162
- import zipfile
163
-
164
- os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
165
- logging.basicConfig(filename="app.log", level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
166
-
167
- CAPTURED_FRAMES_DIR = "captured_frames"
168
- OUTPUT_DIR = "outputs"
169
- FLIGHT_LOG_DIR = "flight_logs"
170
- os.makedirs(CAPTURED_FRAMES_DIR, exist_ok=True)
171
- os.makedirs(OUTPUT_DIR, exist_ok=True)
172
- os.makedirs(FLIGHT_LOG_DIR, exist_ok=True)
173
- os.chmod(CAPTURED_FRAMES_DIR, 0o777)
174
- os.chmod(OUTPUT_DIR, 0o777)
175
- os.chmod(FLIGHT_LOG_DIR, 0o777)
176
-
177
- log_entries: List[str] = []
178
- detected_counts: List[int] = []
179
- detected_issues: List[str] = []
180
- gps_coordinates: List[List[float]] = []
181
- last_metrics: Dict[str, Any] = {}
182
- frame_count: int = 0
183
- SAVE_IMAGE_INTERVAL = 1
184
- DETECTION_CLASSES = ["Longitudinal", "Pothole", "Transverse"]
185
-
186
- device = "cuda" if torch.cuda.is_available() else "cpu"
187
- model = YOLO('./data/best.pt').to(device)
188
- if device == "cuda":
189
- model.half()
190
-
191
- def zip_all_outputs(report_path: str, video_path: str, chart_path: str, map_path: str) -> str:
192
- zip_path = os.path.join(OUTPUT_DIR, f"drone_analysis_outputs_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip")
193
- try:
194
- with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
195
- if os.path.exists(report_path):
196
- zipf.write(report_path, os.path.basename(report_path))
197
- if os.path.exists(video_path):
198
- zipf.write(video_path, os.path.join("outputs", os.path.basename(video_path)))
199
- if os.path.exists(chart_path):
200
- zipf.write(chart_path, os.path.join("outputs", os.path.basename(chart_path)))
201
- if os.path.exists(map_path):
202
- zipf.write(map_path, os.path.join("outputs", os.path.basename(map_path)))
203
- for file in detected_issues:
204
- if os.path.exists(file):
205
- zipf.write(file, os.path.join("captured_frames", os.path.basename(file)))
206
- for root, _, files in os.walk(FLIGHT_LOG_DIR):
207
- for file in files:
208
- file_path = os.path.join(root, file)
209
- zipf.write(file_path, os.path.join("flight_logs", file))
210
- log_entries.append(f"Created ZIP: {zip_path}")
211
- return zip_path
212
- except Exception as e:
213
- log_entries.append(f"Error: Failed to create ZIP: {str(e)}")
214
- return ""
215
-
216
- def generate_map(gps_coords: List[List[float]], items: List[Dict[str, Any]]) -> str:
217
- map_path = os.path.join(OUTPUT_DIR, f"map_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png")
218
- plt.figure(figsize=(4, 4))
219
- plt.scatter([x[1] for x in gps_coords], [x[0] for x in gps_coords], c='blue', label='GPS Points')
220
- plt.title("Issue Locations Map")
221
- plt.xlabel("Longitude")
222
- plt.ylabel("Latitude")
223
- plt.legend()
224
- plt.savefig(map_path)
225
- plt.close()
226
- return map_path
227
-
228
- def write_geotag(image_path: str, gps_coord: List[float]) -> bool:
229
- try:
230
- lat = abs(gps_coord[0])
231
- lon = abs(gps_coord[1])
232
- lat_ref = "N" if gps_coord[0] >= 0 else "S"
233
- lon_ref = "E" if gps_coord[1] >= 0 else "W"
234
- exif_dict = piexif.load(image_path) if os.path.exists(image_path) else {"GPS": {}}
235
- exif_dict["GPS"] = {
236
- piexif.GPSIFD.GPSLatitudeRef: lat_ref,
237
- piexif.GPSIFD.GPSLatitude: ((int(lat), 1), (0, 1), (0, 1)),
238
- piexif.GPSIFD.GPSLongitudeRef: lon_ref,
239
- piexif.GPSIFD.GPSLongitude: ((int(lon), 1), (0, 1), (0, 1))
240
- }
241
- piexif.insert(piexif.dump(exif_dict), image_path)
242
- return True
243
- except Exception as e:
244
- log_entries.append(f"Error: Failed to geotag {image_path}: {str(e)}")
245
- return False
246
-
247
- def write_flight_log(frame_count: int, gps_coord: List[float], timestamp: str) -> str:
248
- log_path = os.path.join(FLIGHT_LOG_DIR, f"flight_log_{frame_count:06d}.csv")
249
- try:
250
- with open(log_path, 'w', newline='') as csvfile:
251
- writer = csv.writer(csvfile)
252
- writer.writerow(["Frame", "Timestamp", "Latitude", "Longitude", "Speed_ms", "Satellites", "Altitude_m"])
253
- writer.writerow([frame_count, timestamp, gps_coord[0], gps_coord[1], 5.0, 12, 60])
254
- return log_path
255
- except Exception as e:
256
- log_entries.append(f"Error: Failed to write flight log {log_path}: {str(e)}")
257
- return ""
258
-
259
- def check_image_quality(frame: np.ndarray, input_resolution: int) -> bool:
260
- height, width, _ = frame.shape
261
- frame_resolution = width * height
262
- if frame_resolution < 12_000_000:
263
- log_entries.append(f"Frame {frame_count}: Resolution {width}x{height} below 12MP")
264
  return False
265
  if frame_resolution < input_resolution:
266
  log_entries.append(f"Frame {frame_count}: Output resolution below input")
@@ -341,7 +196,7 @@ def generate_report(
341
  "- Terrain Follow Mode: Enabled",
342
  "",
343
  "## 3. Quality Check Results",
344
- f"- Resolution: 4000x3000 (12 MP)",
345
  "- Overlap: 85%",
346
  "- Camera Angle: 90° nadir",
347
  "- Drone Speed: ≤ 5 m/s",
@@ -350,60 +205,65 @@ def generate_report(
350
  "",
351
  "## 4. AI/ML Analytics",
352
  f"- Total Frames Processed: {frame_count}",
353
- f"- Detection Frames: {detection_frame_count} ({detection_frame_count/frame_count*100:.2f}%)",
354
- f"- Total Detections: {metrics['total_detections']}",
355
- " - Breakdown:"
356
  ]
357
 
358
  for item in metrics.get("items", []):
359
  percentage = (item["count"] / metrics["total_detections"] * 100) if metrics["total_detections"] > 0 else 0
360
- report_content.append(f" - {item['type']}: {item['count']} ({percentage:.2f}%)")
361
  report_content.extend([
362
- f"- Processing Time: {total_time:.2f} seconds",
363
- f"- Average Frame Time: {sum(frame_times)/len(frame_times):.2f} ms" if frame_times else "- Average Frame Time: N/A",
364
- f"- Average Resize Time: {sum(resize_times)/len(resize_times):.2f} ms" if resize_times else "- Average Resize Time: N/A",
365
- f"- Average Inference Time: {sum(inference_times)/len(inference_times):.2f} ms" if inference_times else "- Average Inference Time: N/A",
366
- f"- Average I/O Time: {sum(io_times)/len(io_times):.2f} ms" if io_times else "- Average I/O Time: N/A",
367
- f"- Timestamp: {metrics.get('timestamp', 'N/A')}",
368
- "- Summary: Potholes and cracks detected in high-traffic segments.",
369
  "",
370
  "## 5. Output File Structure",
371
  "- ZIP file contains:",
372
- " - `drone_analysis_report_<timestamp>.md`: This report",
373
- " - `outputs/processed_output.mp4`: Processed video with annotations",
374
- " - `outputs/chart_<timestamp>.png`: Detection trend chart",
375
- " - `outputs/map_<timestamp>.png`: Issue locations map",
376
- " - `captured_frames/detected_<frame>.jpg`: Geotagged images for detected issues",
377
- " - `flight_logs/flight_log_<frame>.csv`: Flight logs matching image frames",
378
- "- Note: Images and logs share frame numbers (e.g., `detected_000001.jpg` corresponds to `flight_log_000001.csv`).",
 
 
 
 
379
  "",
380
  "## 6. Geotagged Images",
381
  f"- Total Images: {len(detected_issues)}",
382
- f"- Storage: Data Lake `/project_xyz/images/{datetime.now().strftime('%Y-%m-%d')}`",
383
  "",
384
- "| Frame | Issue Type | GPS (Lat, Lon) | Timestamp | Confidence | Image Path |",
385
- "|-------|------------|----------------|-----------|------------|------------|"
386
  ])
387
 
388
  for detection in all_detections[:100]:
389
  report_content.append(
390
- f"| {detection['frame']:06d} | {detection['label']} | ({detection['gps'][0]:.6f}, {detection['gps'][1]:.6f}) | {detection['timestamp']} | {detection['conf']:.2f} | captured_frames/{os.path.basename(detection['path'])} |"
391
  )
392
 
393
  report_content.extend([
394
- "",
395
- "## 7. Flight Logs",
396
- f"- Total Logs: {len(detected_issues)}",
397
- f"- Storage: Data Lake `/project_xyz/flight_logs/{datetime.now().strftime('%Y-%m-%d')}`",
398
- "",
399
- "| Frame | Timestamp | Latitude | Longitude | Speed (m/s) | Satellites | Altitude (m) | Log Path |",
400
- "|-------|-----------|----------|-----------|-------------|------------|--------------|----------|"
401
  ])
402
 
403
  for detection in all_detections[:100]:
404
- log_path = f"flight_logs/flight_log_{detection['frame']:06d}.csv"
405
  report_content.append(
406
- f"| {detection['frame']:06d} | {detection['timestamp']} | {detection['gps'][0]:.6f} | {detection['gps'][1]:.6f} | 5.0 | 12 | 60 | {log_path} |"
 
407
  )
408
 
409
  report_content.extend([
@@ -411,16 +271,16 @@ def generate_report(
411
  "## 8. Processed Video",
412
  f"- Path: outputs/processed_output.mp4",
413
  f"- Frames: {output_frames}",
414
- f"- FPS: {output_fps:.2f}",
415
- f"- Duration: {output_duration:.2f} seconds",
416
  "",
417
  "## 9. Visualizations",
418
- f"- Detection Trend Chart: outputs/chart_{timestamp}.png",
419
- f"- Issue Locations Map: outputs/map_{timestamp}.png",
420
  "",
421
  "## 10. Processing Timestamps",
422
- f"- Total Processing Time: {total_time:.2f} seconds",
423
- "- Log Entries (Last 10):"
424
  ])
425
 
426
  for entry in log_entries[-10:]:
@@ -433,70 +293,84 @@ def generate_report(
433
  "- PD/RO Comments: [Pending]",
434
  "",
435
  "## 12. Recommendations",
436
- "- Repair potholes in high-traffic segments.",
437
- "- Seal cracks to prevent degradation.",
438
- "- Schedule follow-up survey.",
439
  "",
440
  "## 13. Data Lake References",
441
- f"- Images: `/project_xyz/images/{datetime.now().strftime('%Y-%m-%d')}`",
442
- f"- Flight Logs: `/project_xyz/flight_logs/{datetime.now().strftime('%Y-%m-%d')}`",
443
- f"- Video: `/project_xyz/videos/processed_output_{datetime.now().strftime('%Y%m%d')}.mp4`",
444
- f"- DAMS Dashboard: `/project_xyz/dams/{datetime.now().strftime('%Y-%m-%d')}`"
 
 
 
 
 
445
  ])
446
 
 
 
 
 
 
 
447
  try:
448
- with open(report_path, 'w') as f:
449
- f.write("\n".join(report_content))
450
- log_entries.append(f"Report saved: {report_path}")
451
  return report_path
452
  except Exception as e:
453
- log_entries.append(f"Error: Failed to save report: {str(e)}")
454
  return ""
 
455
 
456
- def process_video(video, resize_width=4000, resize_height=3000, frame_skip=5):
457
- global frame_count, last_metrics, detected_counts, detected_issues, gps_coordinates, log_entries
458
  frame_count = 0
459
  detected_counts.clear()
460
  detected_issues.clear()
461
  gps_coordinates.clear()
462
- log_entries.clear()
463
- last_metrics = {}
464
 
465
  if video is None:
466
  log_entries.append("Error: No video uploaded")
467
- return None, json.dumps({"error": "No video uploaded"}, indent=2), "\n".join(log_entries), [], None, None, None
468
 
469
  log_entries.append("Starting video processing...")
470
  start_time = time.time()
471
  cap = cv2.VideoCapture(video)
472
  if not cap.isOpened():
473
- log_entries.append("Error: Could not open video file")
474
- return None, json.dumps({"error": "Could not open video file"}, indent=2), "\n".join(log_entries), [], None, None, None
475
 
476
  frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
477
- frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
478
- input_resolution = frame_width * frame_height
 
479
  fps = cap.get(cv2.CAP_PROP_FPS)
480
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
481
- log_entries.append(f"Input video: {frame_width}x{frame_height}, {fps} FPS, {total_frames} frames")
482
 
483
- out_width, out_height = resize_width, resize_height
484
- output_path = os.path.join(OUTPUT_DIR, "processed_output.mp4")
485
- out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (out_width, out_height))
486
  if not out.isOpened():
487
- log_entries.append("Error: Failed to initialize mp4v codec")
488
  cap.release()
489
- return None, json.dumps({"error": "mp4v codec failed"}, indent=2), "\n".join(log_entries), [], None, None, None
490
 
491
- processed_frames = 0
492
  all_detections = []
493
  frame_times = []
494
  inference_times = []
495
  resize_times = []
496
  io_times = []
497
  detection_frame_count = 0
498
- output_frame_count = 0
499
- last_annotated_frame = None
 
500
 
501
  while True:
502
  ret, frame = cap.read()
@@ -508,228 +382,10 @@ def process_video(video, resize_width=4000, resize_height=3000, frame_skip=5):
508
  processed_frames += 1
509
  frame_start = time.time()
510
 
511
- frame = cv2.resize(frame, (out_width, out_height))
512
- resize_times.append((time.time() - frame_start) * 1000)
513
-
514
- if not check_image_quality(frame, input_resolution):
515
- continue
516
-
517
- inference_start = time.time()
518
- results = model(frame, verbose=False, conf=0.5, iou=0.7)
519
- annotated_frame = results[0].plot()
520
- inference_times.append((time.time() - inference_start) * 1000)
521
-
522
- frame_timestamp = frame_count / fps if fps > 0 else 0
523
- timestamp_str = f"{int(frame_timestamp // 60)}:{int(frame_timestamp % 60):02d}"
524
-
525
- gps_coord = [17.385044 + (frame_count * 0.0001), 78.486671 + (frame_count * 0.0001)]
526
- gps_coordinates.append(gps_coord)
527
-
528
- io_start = time.time()
529
- frame_detections = []
530
- for detection in results[0].boxes:
531
- cls = int(detection.cls)
532
- conf = float(detection.conf)
533
- box = detection.xyxy[0].cpu().numpy().astype(int).tolist()
534
- label = model.names[cls]
535
- if label in DETECTION_CLASSES:
536
- frame_detections.append({
537
- "label": label,
538
- "box": box,
539
- "conf": conf,
540
- "gps": gps_coord,
541
- "timestamp": timestamp_str,
542
- "frame": frame_count,
543
- "path": os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count:06d}.jpg")
544
- })
545
- log_entries.append(f"Frame {frame_count} at {timestamp_str}: Detected {label} with confidence {conf:.2f}")
546
-
547
- if frame_detections:
548
- detection_frame_count += 1
549
- if detection_frame_count % SAVE_IMAGE_INTERVAL == 0:
550
- captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count:06d}.jpg")
551
- if cv2.imwrite(captured_frame_path, annotated_frame):
552
- if write_geotag(captured_frame_path, gps_coord):
553
- detected_issues.append(captured_frame_path)
554
- if len(detected_issues) > 1000: # Limit to 1000 images
555
- detected_issues.pop(0)
556
- else:
557
- log_entries.append(f"Frame {frame_count}: Geotagging failed")
558
- else:
559
- log_entries.append(f"Error: Failed to save {captured_frame_path}")
560
- flight_log_path = write_flight_log(frame_count, gps_coord, timestamp_str)
561
-
562
- io_times.append((time.time() - io_start) * 1000)
563
-
564
- out.write(annotated_frame)
565
- output_frame_count += 1
566
- last_annotated_frame = annotated_frame
567
- if frame_skip > 1:
568
- for _ in range(frame_skip - 1):
569
- out.write(annotated_frame)
570
- output_frame_count += 1
571
-
572
- detected_counts.append(len(frame_detections))
573
- all_detections.extend(frame_detections)
574
-
575
- frame_times.append((time.time() - frame_start) * 1000)
576
- if len(log_entries) > 50:
577
- log_entries.pop(0)
578
-
579
- if time.time() - start_time > 600:
580
- log_entries.append("Error: Processing timeout after 600 seconds")
581
- break
582
-
583
- while output_frame_count < total_frames and last_annotated_frame is not None:
584
- out.write(last_annotated_frame)
585
- output_frame_count += 1
586
-
587
- last_metrics = update_metrics(all_detections)
588
-
589
- cap.release()
590
- out.release()
591
-
592
- cap = cv2.VideoCapture(output_path)
593
- output_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
594
- output_fps = cap.get(cv2.CAP_PROP_FPS)
595
- output_duration = output_frames / output_fps if output_fps > 0 else 0
596
- cap.release()
597
-
598
- total_time = time.time() - start_time
599
- log_entries.append(f"Output video: {output_frames} frames, {output_fps:.2f} FPS, {output_duration:.2f} seconds")
600
-
601
- log_entries.append("Generating chart and map...")
602
- chart_path = generate_line_chart()
603
- map_path = generate_map(gps_coordinates[-5:], all_detections)
604
-
605
- report_path = generate_report(
606
- last_metrics,
607
- detected_issues,
608
- gps_coordinates,
609
- all_detections,
610
- frame_count,
611
- total_time,
612
- output_frames,
613
- output_fps,
614
- output_duration,
615
- detection_frame_count,
616
- chart_path,
617
- map_path,
618
- frame_times,
619
- resize_times,
620
- inference_times,
621
- io_times
622
- )
623
-
624
- log_entries.append("Creating output ZIP...")
625
- output_zip_path = zip_all_outputs(report_path, output_path, chart_path, map_path)
626
-
627
- log_entries.append(f"Processing completed in {total_time:.2f} seconds")
628
- return (
629
- output_path,
630
- json.dumps(last_metrics, indent=2),
631
- "\n".join(log_entries[-10:]),
632
- detected_issues,
633
- chart_path,
634
- map_path,
635
- output_zip_path
636
- )
637
-
638
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange")) as iface:
639
- gr.Markdown("# NHAI Road Defect Detection Dashboard")
640
- with gr.Row():
641
- with gr.Column(scale=3):
642
- video_input = gr.Video(label="Upload Video (12MP recommended)")
643
- width_slider = gr.Slider(320, 4000, value=4000, label="Output Width", step=1)
644
- height_slider = gr.Slider(240, 3000, value=3000, label="Output Height", step=1)
645
- skip_slider = gr.Slider(1, 10, value=5, label="Frame Skip", step=1)
646
- process_btn = gr.Button("Process Video", variant="primary")
647
- with gr.Column(scale=1):
648
- metrics_output = gr.Textbox(label="Detection Metrics", lines=5, interactive=False)
649
- with gr.Row():
650
- video_output = gr.Video(label="Processed Video")
651
- issue_gallery = gr.Gallery(label="Detected Issues", columns=4, height="auto", object_fit="contain")
652
- with gr.Row():
653
- chart_output = gr.Image(label="Detection Trend")
654
- map_output = gr.Image(label="Issue Locations Map")
655
- with gr.Row():
656
- logs_output = gr.Textbox(label="Logs", lines=5, interactive=False)
657
- with gr.Row():
658
- gr.Markdown("## Download Results")
659
- with gr.Row():
660
- output_zip_download = gr.File(label="Download All Outputs (ZIP)")
661
-
662
- process_btn.click(
663
- fn=process_video,
664
- inputs=[video_input, width_slider, height_slider, skip_slider],
665
- outputs=[
666
- video_output,
667
- metrics_output,
668
- logs_output,
669
- issue_gallery,
670
- chart_output,
671
- map_output,
672
- output_zip_download
673
- ]
674
- )
675
-
676
- if __name__ == "__main__":
677
- iface.launch()
678
-
679
- def process_video(video, resize_width=4000, resize_height=3000, frame_skip=5):
680
- global frame_count, last_metrics, detected_counts, detected_issues, gps_coordinates, log_entries
681
- frame_count = 0
682
- detected_counts.clear()
683
- detected_issues.clear()
684
- gps_coordinates.clear()
685
- log_entries.clear()
686
- last_metrics = {}
687
-
688
- if video is None:
689
- log_entries.append("Error: No video uploaded")
690
- return None, json.dumps({"error": "No video uploaded"}, indent=2), "\n".join(log_entries), [], None, None, None
691
-
692
- log_entries.append("Starting video processing...")
693
- start_time = time.time()
694
- cap = cv2.VideoCapture(video)
695
- if not cap.isOpened():
696
- log_entries.append("Error: Could not open video file")
697
- return None, json.dumps({"error": "Could not open video file"}, indent=2), "\n".join(log_entries), [], None, None, None
698
-
699
- frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
700
- frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
701
- input_resolution = frame_width * frame_height
702
- fps = cap.get(cv2.CAP_PROP_FPS)
703
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
704
- log_entries.append(f"Input video: {frame_width}x{frame_height}, {fps} FPS, {total_frames} frames")
705
-
706
- out_width, out_height = resize_width, resize_height
707
- output_path = os.path.join(OUTPUT_DIR, "processed_output.mp4")
708
- out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (out_width, out_height))
709
- if not out.isOpened():
710
- log_entries.append("Error: Failed to initialize mp4v codec")
711
- cap.release()
712
- return None, json.dumps({"error": "mp4v codec failed"}, indent=2), "\n".join(log_entries), [], None, None, None
713
-
714
- processed_frames = 0
715
- all_detections = []
716
- frame_times = []
717
- inference_times = []
718
- resize_times = []
719
- io_times = []
720
- detection_frame_count = 0
721
- output_frame_count = 0
722
- last_annotated_frame = None
723
-
724
- while True:
725
- ret, frame = cap.read()
726
- if not ret:
727
  break
728
- frame_count += 1
729
- if frame_count % frame_skip != 0:
730
- continue
731
- processed_frames += 1
732
- frame_start = time.time()
733
 
734
  frame = cv2.resize(frame, (out_width, out_height))
735
  resize_times.append((time.time() - frame_start) * 1000)
@@ -737,18 +393,18 @@ def process_video(video, resize_width=4000, resize_height=3000, frame_skip=5):
737
  if not check_image_quality(frame, input_resolution):
738
  continue
739
 
740
- inference_start = time.time()
741
- results = model(frame, verbose=False, conf=0.5, iou=0.7)
742
  annotated_frame = results[0].plot()
743
- inference_times.append((time.time() - inference_start) * 1000)
744
 
745
  frame_timestamp = frame_count / fps if fps > 0 else 0
746
- timestamp_str = f"{int(frame_timestamp // 60)}:{int(frame_timestamp % 60):02d}"
747
 
748
  gps_coord = [17.385044 + (frame_count * 0.0001), 78.486671 + (frame_count * 0.0001)]
749
  gps_coordinates.append(gps_coord)
750
 
751
- io_start = time.time()
752
  frame_detections = []
753
  for detection in results[0].boxes:
754
  cls = int(detection.cls)
@@ -756,37 +412,38 @@ def process_video(video, resize_width=4000, resize_height=3000, frame_skip=5):
756
  box = detection.xyxy[0].cpu().numpy().astype(int).tolist()
757
  label = model.names[cls]
758
  if label in DETECTION_CLASSES:
759
- frame_detections.append({
760
  "label": label,
761
  "box": box,
762
  "conf": conf,
763
  "gps": gps_coord,
764
  "timestamp": timestamp_str,
765
  "frame": frame_count,
766
- "path": os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count:06d}.jpg")
767
- })
 
768
  log_entries.append(f"Frame {frame_count} at {timestamp_str}: Detected {label} with confidence {conf:.2f}")
769
 
770
  if frame_detections:
771
  detection_frame_count += 1
772
  if detection_frame_count % SAVE_IMAGE_INTERVAL == 0:
773
- captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count:06d}.jpg")
774
  if cv2.imwrite(captured_frame_path, annotated_frame):
775
  if write_geotag(captured_frame_path, gps_coord):
776
  detected_issues.append(captured_frame_path)
777
- if len(detected_issues) > 1000: # Limit to 1000 images
778
- detected_issues.pop(0)
779
  else:
780
  log_entries.append(f"Frame {frame_count}: Geotagging failed")
781
  else:
782
- log_entries.append(f"Error: Failed to save {captured_frame_path}")
783
- flight_log_path = write_flight_log(frame_count, gps_coord, timestamp_str)
784
 
785
- io_times.append((time.time() - io_start) * 1000)
786
 
787
  out.write(annotated_frame)
788
  output_frame_count += 1
789
- last_annotated_frame = annotated_frame
790
  if frame_skip > 1:
791
  for _ in range(frame_skip - 1):
792
  out.write(annotated_frame)
@@ -803,28 +460,32 @@ def process_video(video, resize_width=4000, resize_height=3000, frame_skip=5):
803
  log_entries.append("Error: Processing timeout after 600 seconds")
804
  break
805
 
806
- while output_frame_count < total_frames and last_annotated_frame is not None:
807
- out.write(last_annotated_frame)
808
  output_frame_count += 1
809
 
810
  last_metrics = update_metrics(all_detections)
811
 
812
- cap.release()
813
  out.release()
 
814
 
 
815
  cap = cv2.VideoCapture(output_path)
816
- output_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
817
- output_fps = cap.get(cv2.CAP_PROP_FPS)
818
- output_duration = output_frames / output_fps if output_fps > 0 else 0
819
- cap.release()
 
 
 
 
 
820
 
821
  total_time = time.time() - start_time
822
- log_entries.append(f"Output video: {output_frames} frames, {output_fps:.2f} FPS, {output_duration:.2f} seconds")
823
 
824
- log_entries.append("Generating chart and map...")
825
  chart_path = generate_line_chart()
826
  map_path = generate_map(gps_coordinates[-5:], all_detections)
827
-
828
  report_path = generate_report(
829
  last_metrics,
830
  detected_issues,
@@ -843,11 +504,8 @@ def process_video(video, resize_width=4000, resize_height=3000, frame_skip=5):
843
  inference_times,
844
  io_times
845
  )
846
-
847
- log_entries.append("Creating output ZIP...")
848
  output_zip_path = zip_all_outputs(report_path, output_path, chart_path, map_path)
849
 
850
- log_entries.append(f"Processing completed in {total_time:.2f} seconds")
851
  return (
852
  output_path,
853
  json.dumps(last_metrics, indent=2),
@@ -862,37 +520,41 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange")) as iface:
862
  gr.Markdown("# NHAI Road Defect Detection Dashboard")
863
  with gr.Row():
864
  with gr.Column(scale=3):
865
- video_input = gr.Video(label="Upload Video (12MP recommended)")
866
- width_slider = gr.Slider(320, 4000, value=4000, label="Output Width", step=1)
867
- height_slider = gr.Slider(240, 3000, value=3000, label="Output Height", step=1)
868
- skip_slider = gr.Slider(1, 10, value=5, label="Frame Skip", step=1)
869
- process_btn = gr.Button("Process Video", variant="primary")
870
  with gr.Column(scale=1):
871
- metrics_output = gr.Textbox(label="Detection Metrics", lines=5, interactive=False)
 
872
  with gr.Row():
873
- video_output = gr.Video(label="Processed Video")
874
- issue_gallery = gr.Gallery(label="Detected Issues", columns=4, height="auto", object_fit="contain")
875
  with gr.Row():
876
- chart_output = gr.Image(label="Detection Trend")
877
- map_output = gr.Image(label="Issue Locations Map")
 
878
  with gr.Row():
879
- logs_output = gr.Textbox(label="Logs", lines=5, interactive=False)
880
  with gr.Row():
 
881
  gr.Markdown("## Download Results")
 
882
  with gr.Row():
883
- output_zip_download = gr.File(label="Download All Outputs (ZIP)")
884
 
885
  process_btn.click(
886
  fn=process_video,
887
  inputs=[video_input, width_slider, height_slider, skip_slider],
888
  outputs=[
889
- video_output,
890
  metrics_output,
891
- logs_output,
892
- issue_gallery,
893
- chart_output,
894
- map_output,
895
- output_zip_download
896
  ]
897
  )
898
 
 
36
  frame_count: int = 0
37
  SAVE_IMAGE_INTERVAL = 1
38
  DETECTION_CLASSES = ["Longitudinal", "Pothole", "Transverse"]
39
+ MAX_IMAGES = 500 # Limit saved images to reduce ZIP time
40
 
41
  device = "cuda" if torch.cuda.is_available() else "cpu"
42
  model = YOLO('./data/best.pt').to(device)
 
46
  def zip_all_outputs(report_path: str, video_path: str, chart_path: str, map_path: str) -> str:
47
  zip_path = os.path.join(OUTPUT_DIR, f"drone_analysis_outputs_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip")
48
  try:
49
+ with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_STORED) as zipf: # Use ZIP_STORED for faster compression
50
  if os.path.exists(report_path):
51
  zipf.write(report_path, os.path.basename(report_path))
52
  if os.path.exists(video_path):
 
114
  def check_image_quality(frame: np.ndarray, input_resolution: int) -> bool:
115
  height, width, _ = frame.shape
116
  frame_resolution = width * height
117
+ if frame_resolution < 2_073_600: # 1920x1080 minimum
118
+ log_entries.append(f"Frame {frame_count}: Resolution {width}x{height} below 2MP")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  return False
120
  if frame_resolution < input_resolution:
121
  log_entries.append(f"Frame {frame_count}: Output resolution below input")
 
196
  "- Terrain Follow Mode: Enabled",
197
  "",
198
  "## 3. Quality Check Results",
199
+ f"- Resolution: 1920x1080",
200
  "- Overlap: 85%",
201
  "- Camera Angle: 90° nadir",
202
  "- Drone Speed: ≤ 5 m/s",
 
205
  "",
206
  "## 4. AI/ML Analytics",
207
  f"- Total Frames Processed: {frame_count}",
208
+ f"- Detection Frames: {detection_frame_count} ({detection_frame_count//frame_count*100:.1f}%)",
209
+ f"- Total Detections:: {metrics['total_detections']}",
210
+ " - Breakdown:",
211
  ]
212
 
213
  for item in metrics.get("items", []):
214
  percentage = (item["count"] / metrics["total_detections"] * 100) if metrics["total_detections"] > 0 else 0
215
+ report_content.append(f" - {item['type']}: {item['count']} ({percentage:.1f}%)")
216
  report_content.extend([
217
+ f"- Processing Time: {total_time:.1f} seconds",
218
+ f"- Average Frame Time: {sum(frame_times)/len(frame_times):.1f} ms" if frame_times else "- Average Frame Time: N/A",
219
+ f"- Average Resize Time: {sum(resize_times)/len(resize_times):.1f} ms" if resize_times else "- Average Resize Time: N/A",
220
+ f"- Average Inference Time: {sum(inference_times)/len(inference_times):.1f} ms" if inference_times else "- Average Inference Time: N/A",
221
+ f"- Average I/O Time: {sum(io_times)/len(io_times):.1f} ms" if io_times else "- Average I/O Time: N/A",
222
+ f"- Timestamp: {metrics.get('timestamp', 'N/A')}": 'N/A'",
223
+ "- Summary: Potholes and cracks detected in high-traffic areas.",
224
  "",
225
  "## 5. Output File Structure",
226
  "- ZIP file contains:",
227
+ " - `drone_analysis_report_<timestamp>.md`:): This report",
228
+ " - `outputs/processed_output.mp4`:): Processed video with annotations",
229
+ " - `outputs/chart_<timestamp>.jpg`:): Detection trend chart",
230
+ " - `outputs/map_<timestamp>.jpg`:): Issue locations map",
231
+ " - `captured_frames/detected_<frame>.jpg`:): Geotagged images for detected issues",
232
+ " - `flight_logs/flight_log_<frame>.csv`:): Flight logs matching image frames",
233
+ "- Note: Images and logs share frame numbers (e.g., `detected_000001.jpg` corresponds to `flight_log_000001.csv`).),
234
+ ])
235
+
236
+ report_content.append([
237
+ "",
238
  "",
239
  "## 6. Geotagged Images",
240
  f"- Total Images: {len(detected_issues)}",
241
+ f"- Storage: Data Lake `/project_xyz/images/projects/{datetime.now().strftime('%Y%m%d')}`",
242
  "",
243
+ "| Frame | Issue Type | GPS (Lat, Lon) | Timestamp | | Confidence | Image Path | |",
244
+ "|_______|------------|----------------|-----------|------------|-----------------------------|",
245
  ])
246
 
247
  for detection in all_detections[:100]:
248
  report_content.append(
249
+ f"""| {detection['frame']:06d} | {detection['label']} | ({detection['gps'][0]:.6f}, {detection['gps'][1]:.6f}) | {detection['timestamp']} | {detection['conf']:.1f} | captured_frames/{os.path.basename(detection['path']})} |"
250
  )
251
 
252
  report_content.extend([
253
+ "",
254
+ "## 7. Flight Logs",
255
+ f"- Total Logs: {len(detected_issues)}",
256
+ f"- Storage: Data Lake `/project_xyz/flight_logs/{datetime.now().strftime('%Y%m%d')}')}",
257
+ "",
258
+ "| Frame | Timestamp | Latitude | Longitude | Latitude Longitude | Speed (m/s) | Latency Satellites | ms) | Altitude |(m) | Log Path |",
259
+ "|-------|-----------|-----------------------------|-----------|-------------|------------|--------------|-----------------------------|",
260
  ])
261
 
262
  for detection in all_detections[:100]:
263
+ log_path = f"flight_logs/flight_log_{detection['frame']}:f{06d}.csv":'
264
  report_content.append(
265
+ f"| {detection['frame']:06d} | {detection['timestamp']} | {detection['gps'][0]:.6f} | {detection['gps'][1]:.6f} | 5.0 |,
266
+ | 12 | 60 | {log_path} |"
267
  )
268
 
269
  report_content.extend([
 
271
  "## 8. Processed Video",
272
  f"- Path: outputs/processed_output.mp4",
273
  f"- Frames: {output_frames}",
274
+ f"- FPS: {output_fps:.1f}",
275
+ f"- Duration: {output_duration:.1f} seconds",
276
  "",
277
  "## 9. Visualizations",
278
+ f"- Detection Trend Chart: outputs/chart_{timestamp}.jpg",
279
+ f"- Issue Locations Map: outputs/map_{timestamp}.jpg",
280
  "",
281
  "## 10. Processing Timestamps",
282
+ f"- Total Processing Time: {total_time:.1f} seconds",
283
+ "- Log Entries (Last 10):",
284
  ])
285
 
286
  for entry in log_entries[-10:]:
 
293
  "- PD/RO Comments: [Pending]",
294
  "",
295
  "## 12. Recommendations",
296
+ "- Repair potholes potholes in high-traffic areas",
297
+ "- Seal cracks to prevent further degradation.",
298
+ "- Schedule a follow-up survey.",
299
  "",
300
  "## 13. Data Lake References",
301
+ f""- Images: `/project_xyz/images/{datetime.now().strftime('%Y%m%d')'}`)",
302
+ f""- f"- Flight Logs: `/project_xyz/flight_logs/{timestamp}`),
303
+ f"- Video: `/project_xyz/videos/processed_output_{timestamp}.mp4`,
304
+ f"- DAMS videos/processed_videos/{timestamp}/processed_video`,
305
+ f"- DAMS Dashboard: `/project_xyz/dams/{datetime.now().strftime('%Y%m%d')'}`)",
306
+ "",
307
+ "## 14. Captured Images",
308
+ "Below are the images from the captured frames directory showing detected issues:",
309
+ "",
310
  ])
311
 
312
+ # Add image references for all all captured images in in captured_frames detected_issues
313
+ for image_path in detected_issues:
314
+ if os.path.exists(image_path):
315
+ image_name = os.path.basename(image_path)
316
+ report_content.append(f"![Captured image at {image_name}](captured_frames/{image_name})")
317
+
318
  try:
319
+ with open(report_path, mode'w), as) as file:
320
+ file.write("\n".join(report_content))
321
+ log_entries.append(f"Report saved at: {report_path}")
322
  return report_path
323
  except Exception as e:
324
+ log_entries.append(f"Error: Failed to to save report): {str(e)}")
325
  return ""
326
+ )
327
 
328
+ def process_video(input_video, resize_width=1920, resize_height=1080, frame_skip=10):
329
+ global frame_count, last_metrics_counter, detected_counts, detected_dissues, gps_coordinates,, output_log_entries
330
  frame_count = 0
331
  detected_counts.clear()
332
  detected_issues.clear()
333
  gps_coordinates.clear()
334
+ output_log_entries.clear()
335
+ last_metrics = {})
336
 
337
  if video is None:
338
  log_entries.append("Error: No video uploaded")
339
+ return None, None, None, [], None, None, None)
340
 
341
  log_entries.append("Starting video processing...")
342
  start_time = time.time()
343
  cap = cv2.VideoCapture(video)
344
  if not cap.isOpened():
345
+ log_entries.append("Error: Could not open videos file")
346
+ return None, json.dumps({"),error": "Could not open video file"}, indent=2), None, [], None, None, None)
347
 
348
  frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
349
+ width = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
350
+ input_resolution = frame_width * height
351
+ f = FPS
352
  fps = cap.get(cv2.CAP_PROP_FPS)
353
+ total_frames = int(cap.get(cv2).CAP_PRO_PROF_COUNT)
354
+ log_entries.append(f"Input video: {frame_width}x{height} at {fps} FPS,, {total_frames} frames")
355
 
356
+ out_width, out_height = resize_width, resize_height
357
+ output_path = os.path.join(OUTPUT_DIR, str"processed_video.mp4"))
358
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'W'XVID'), fps=20, (out_width, out_height)) # Switch to XVID
359
  if not out.isOpened():
360
+ log_entries.append("Error: Failed to initialize video writer")
361
  cap.release()
362
+ return None, json.dumps({"error": "Video writer failed"}), None, [], None, None, None)
363
 
364
+ processed_frames = []0
365
  all_detections = []
366
  frame_times = []
367
  inference_times = []
368
  resize_times = []
369
  io_times = []
370
  detection_frame_count = 0
371
+ output_frame_count = = 0
372
+ last_frame_detected_frame = None
373
+ disk_space_threshold = 1024 * 1024 * 1024 # 1GB minimum disk space
374
 
375
  while True:
376
  ret, frame = cap.read()
 
382
  processed_frames += 1
383
  frame_start = time.time()
384
 
385
+ # Check disk space
386
+ if os.statvfs(os.path.dirname(output_path)).f_frsize() * os.statvfs().f_bavail < disk_space_threshold:
387
+ log_entries.append("Error: Insufficient disk space")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
  break
 
 
 
 
 
389
 
390
  frame = cv2.resize(frame, (out_width, out_height))
391
  resize_times.append((time.time() - frame_start) * 1000)
 
393
  if not check_image_quality(frame, input_resolution):
394
  continue
395
 
396
+ inference_time = time.time()
397
+ results = model(frame, verbose=False, conf=0.5, threshold=0.7)
398
  annotated_frame = results[0].plot()
399
+ inference_times.append((time.time() - inference_time) * 1000)
400
 
401
  frame_timestamp = frame_count / fps if fps > 0 else 0
402
+ timestamp_str = f"{int(frame_timestamp / 60):02d}:{int(frame_timestamp % 60):02d}"
403
 
404
  gps_coord = [17.385044 + (frame_count * 0.0001), 78.486671 + (frame_count * 0.0001)]
405
  gps_coordinates.append(gps_coord)
406
 
407
+ io_start_time = time.time()
408
  frame_detections = []
409
  for detection in results[0].boxes:
410
  cls = int(detection.cls)
 
412
  box = detection.xyxy[0].cpu().numpy().astype(int).tolist()
413
  label = model.names[cls]
414
  if label in DETECTION_CLASSES:
415
+ detection_data = {
416
  "label": label,
417
  "box": box,
418
  "conf": conf,
419
  "gps": gps_coord,
420
  "timestamp": timestamp_str,
421
  "frame": frame_count,
422
+ "path": os.path.join(CAPTURED_FRAMES_DIR, f"detected_frame_{frame_count:06d}.jpg")
423
+ }
424
+ frame_detections.append(detection_data)
425
  log_entries.append(f"Frame {frame_count} at {timestamp_str}: Detected {label} with confidence {conf:.2f}")
426
 
427
  if frame_detections:
428
  detection_frame_count += 1
429
  if detection_frame_count % SAVE_IMAGE_INTERVAL == 0:
430
+ captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_frame_{frame_count:06d}.jpg")
431
  if cv2.imwrite(captured_frame_path, annotated_frame):
432
  if write_geotag(captured_frame_path, gps_coord):
433
  detected_issues.append(captured_frame_path)
434
+ if len(detected_issues) > MAX_IMAGES:
435
+ os.remove(detected_issues.pop(0)) # Remove oldest image
436
  else:
437
  log_entries.append(f"Frame {frame_count}: Geotagging failed")
438
  else:
439
+ log_entries.append(f"Error: Failed to save frame at {captured_frame_path}")
440
+ write_flight_log(frame_count, gps_coord, timestamp_str)
441
 
442
+ io_times.append((time.time() - io_start_time) * 1000)
443
 
444
  out.write(annotated_frame)
445
  output_frame_count += 1
446
+ last_detected_frame = annotated_frame
447
  if frame_skip > 1:
448
  for _ in range(frame_skip - 1):
449
  out.write(annotated_frame)
 
460
  log_entries.append("Error: Processing timeout after 600 seconds")
461
  break
462
 
463
+ while output_frame_count < total_frames and last_detected_frame is not None:
464
+ out.write(last_detected_frame)
465
  output_frame_count += 1
466
 
467
  last_metrics = update_metrics(all_detections)
468
 
 
469
  out.release()
470
+ cap.release()
471
 
472
+ # Verify output video
473
  cap = cv2.VideoCapture(output_path)
474
+ if not cap.isOpened():
475
+ log_entries.append("Error: Failed to open output video for verification")
476
+ output_path = None
477
+ else:
478
+ output_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
479
+ output_fps = cap.get(cv2.CAP_PROP_FPS)
480
+ output_duration = output_frames / output_fps if output_fps > 0 else 0
481
+ cap.release()
482
+ log_entries.append(f"Output video: {output_frames} frames, {output_fps:.2f} FPS, {output_duration:.2f} seconds")
483
 
484
  total_time = time.time() - start_time
485
+ log_entries.append(f"Processing completed in {total_time:.2f} seconds")
486
 
 
487
  chart_path = generate_line_chart()
488
  map_path = generate_map(gps_coordinates[-5:], all_detections)
 
489
  report_path = generate_report(
490
  last_metrics,
491
  detected_issues,
 
504
  inference_times,
505
  io_times
506
  )
 
 
507
  output_zip_path = zip_all_outputs(report_path, output_path, chart_path, map_path)
508
 
 
509
  return (
510
  output_path,
511
  json.dumps(last_metrics, indent=2),
 
520
  gr.Markdown("# NHAI Road Defect Detection Dashboard")
521
  with gr.Row():
522
  with gr.Column(scale=3):
523
+ video_input = gr.Video(label="Upload Video")
524
+ width_slider = gr.Slider(320, 1920, value=1920, label="Output Width", step=1)
525
+ height_slider = gr.Slider(240, 1080, value=1080, label="Output Height", step=1)
526
+ skip_slider = gr.Slider(1, 20, value=10, label="Frame Skip", step=2)
527
+ process_btn = gr.Button(value"Process Video", variant="primary")
528
  with gr.Column(scale=1):
529
+ metrics_output = gr.Textbox(label="", lines=5, interactive=False)
530
+ )
531
  with gr.Row():
532
+ output_video = gr.Video(label="Processed Video")
533
+ issue_frame_gallery = gr.Gallery(label="Detected Issues", columns=4, height="auto", object_fit="fit"contain")
534
  with gr.Row():
535
+ chart_frame = gr.Image(label="Detection Trend")
536
+ map_frame = gr.Image(label="Issue Locations")
537
+ )
538
  with gr.Row():
539
+ logs_frame = gr.Textbox(label="Logs", lines=5, interactive=False)
540
  with gr.Row():
541
+ )
542
  gr.Markdown("## Download Results")
543
+ )
544
  with gr.Row():
545
+ zip_output_downloaded = gr.File(label="Download All Outputs")
546
 
547
  process_btn.click(
548
  fn=process_video,
549
  inputs=[video_input, width_slider, height_slider, skip_slider],
550
  outputs=[
551
+ output_video,
552
  metrics_output,
553
+ logs_frame_output,
554
+ issue_frame_dgallery,
555
+ chart_frame_doutput,
556
+ maps_frame_doutput,
557
+ zip_output_downloads
558
  ]
559
  )
560