Spaces:
Sleeping
Sleeping
File size: 2,684 Bytes
3f185a6 1c92ef5 7f0b8e1 1c92ef5 3f185a6 1c92ef5 3f185a6 7f0b8e1 3f185a6 7f0b8e1 3f185a6 7f0b8e1 3f185a6 7f0b8e1 3f185a6 7f0b8e1 3f185a6 7f0b8e1 c3db1d4 7f0b8e1 3f185a6 7f0b8e1 aacdd9b 7f0b8e1 3f185a6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import os
import gradio as gr
import cv2
from ultralytics import YOLO
# Define the folder containing the images and video
folder_path = "info" # Replace with your folder name or path
# Get list of files from the folder
image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
video_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith(('.mp4', '.avi', '.mov'))]
# Ensure the folder contains the expected number of files
if len(image_files) < 2 or len(video_files) < 1:
raise ValueError("Folder must contain at least 2 images and 1 video.")
# Select the first two images and the first video
image_examples = [[image_files[0]], [image_files[1]]]
video_examples = [[video_files[0]]]
# Load the YOLO model
model = YOLO('best.pt')
# Function for processing images
def show_preds_image(image_path):
image = cv2.imread(image_path)
results = model.predict(source=image_path)
annotated_image = results[0].plot()
return cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
# Function for processing videos
def show_preds_video(video_path):
cap = cv2.VideoCapture(video_path)
out_frames = []
fps = int(cap.get(cv2.CAP_PROP_FPS))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
results = model.predict(source=frame)
annotated_frame = results[0].plot()
out_frames.append(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
cap.release()
# Save the annotated video
output_path = "annotated_video.mp4"
height, width, _ = out_frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
for frame in out_frames:
writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
writer.release()
return output_path
# Gradio interfaces
inputs_image = gr.Image(type="filepath", label="Input Image")
outputs_image = gr.Image(type="numpy", label="Output Image")
interface_image = gr.Interface(
fn=show_preds_image,
inputs=inputs_image,
outputs=outputs_image,
title="Safety Head Detector - Image",
examples=image_examples,
)
inputs_video = gr.Video(label="Input Video")
outputs_video = gr.Video(label="Annotated Output")
interface_video = gr.Interface(
fn=show_preds_video,
inputs=inputs_video,
outputs=outputs_video,
title="Safety Head Detector - Video",
examples=video_examples,
)
# Combine into a tabbed interface
gr.TabbedInterface(
[interface_image, interface_video],
tab_names=['Image Inference', 'Video Inference']
).launch(share=True)
|