Spaces:
Sleeping
Sleeping
| from ultralytics import YOLO | |
| import os | |
| # Define the correct path to config.yaml (in the root directory) | |
| config_path = './config.yaml' # Adjust based on the actual path to your config.yaml | |
| # Load YOLO model | |
| model = YOLO("yolo11n.yaml") # You can choose a different model type like yolo5n, yolo6n, etc. | |
| # Train the model | |
| results = model.train(data=config_path, epochs=1) | |
| # Define the save directory | |
| save_dir = './runs/detect/train/weights' | |
| # Create directory if it doesn't exist | |
| if not os.path.exists(save_dir): | |
| os.makedirs(save_dir) | |
| # Save the model | |
| model.save(os.path.join(save_dir, 'best.pt')) | |
| # Print confirmation | |
| print("Model saved to:", os.path.join(save_dir, 'best.pt')) | |
| from ultralytics import YOLO | |
| import gradio as gr | |
| import cv2 | |
| import os | |
| import tempfile | |
| # Load the trained YOLO model | |
| model = YOLO("./runs/detect/train/weights/best.pt") # Path to your trained model | |
| def process_video(video_path): | |
| """ | |
| Process the input video using the YOLO model and save the output with bounding boxes. | |
| Returns the path to the output video. | |
| """ | |
| # Create a temporary file for the output video | |
| output_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name | |
| # Open the input video | |
| cap = cv2.VideoCapture(video_path) | |
| if not cap.isOpened(): | |
| raise ValueError("Error opening video file") | |
| # Get video properties | |
| width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
| height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| fps = int(cap.get(cv2.CAP_PROP_FPS)) | |
| total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
| # Define the codec and create VideoWriter object | |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Use 'mp4v' for MP4 format | |
| out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) | |
| # Process each frame | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| # Perform YOLO inference on the frame | |
| results = model(frame) | |
| # Draw bounding boxes and labels on the frame | |
| annotated_frame = results[0].plot() # Ultralytics provides a plot method to draw boxes | |
| # Write the annotated frame to the output video | |
| out.write(annotated_frame) | |
| # Release resources | |
| cap.release() | |
| out.release() | |
| cv2.destroyAllWindows() | |
| return output_path | |
| def gradio_interface(video): | |
| """ | |
| Gradio interface function to handle video input and return the processed video. | |
| """ | |
| if video is None: | |
| return "Please upload a video file." | |
| try: | |
| # Process the video and get the output path | |
| output_video_path = process_video(video) | |
| # Return the output video for Gradio to display | |
| return output_video_path | |
| except Exception as e: | |
| return f"Error processing video: {str(e)}" | |
| # Create Gradio interface | |
| iface = gr.Interface( | |
| fn=gradio_interface, | |
| inputs=gr.Video(label="Upload Video"), | |
| outputs=gr.Video(label="Processed Video with Detections"), | |
| title="YOLOv11 Object Detection on Video", | |
| description="Upload a video to run object detection using a trained YOLOv11 model." | |
| ) | |
| # Launch the Gradio interface | |
| iface.launch() | |