Spaces:
Sleeping
Sleeping
import gradio as gr | |
from PIL import Image | |
import cv2 | |
from ultralytics import YOLO | |
import numpy as np | |
model_path = "car_logos.pt" | |
detection_model = YOLO(model_path) | |
def predict_image(pil_image): | |
"""Process an image and return the annotated image.""" | |
frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR) | |
results = detection_model.predict(frame, conf=0.5, iou=0.6) | |
annotated_frame = results[0].plot() | |
out_pil_image = Image.fromarray(annotated_frame[..., ::-1]) | |
return out_pil_image | |
def predict_video(video_path): | |
"""Process a video and return the path to the annotated output video.""" | |
cap = cv2.VideoCapture(video_path) | |
output_frames = [] | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
results = detection_model.predict(frame, conf=0.5, iou=0.6) | |
annotated_frame = results[0].plot() | |
output_frames.append(annotated_frame) | |
cap.release() | |
if output_frames: | |
height, width, _ = output_frames[0].shape | |
out_path = "output_video.mp4" | |
out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), 30, (width, height)) | |
for frame in output_frames: | |
out.write(frame) | |
out.release() | |
return out_path | |
else: | |
return "No frames processed." | |
def create_gradio_interface(): | |
with gr.Blocks() as demo: | |
with gr.Tab("Upload and Honda or Toyota Logo image"): | |
gr.Markdown("### Upload Honda or Toyota for Object Detection") | |
image_input = gr.Image(type="pil", label="Input Image") | |
image_output = gr.Image(type="pil", label="Annotated Image") | |
image_button = gr.Button("Process Image") | |
image_button.click(fn=predict_image, inputs=image_input, outputs=image_output) | |
with gr.Tab("Video Upload for Toyota and Honda logo detection"): | |
gr.Markdown("### Upload a Video for Object Detection") | |
video_input = gr.Video(label="Input Video") | |
video_output = gr.File(label="Annotated Video") | |
video_button = gr.Button("Process Video") | |
video_button.click(fn=predict_video, inputs=video_input, outputs=video_output) | |
demo.launch() | |
create_gradio_interface() | |