|
import gradio as gr |
|
from ultralytics import YOLO |
|
import cv2 |
|
import numpy as np |
|
|
|
|
|
model = YOLO("./model/best.pt") |
|
|
|
def detect_emotion(image): |
|
""" |
|
Perform YOLO8 inference on the uploaded image. |
|
:param image: Input image from the Gradio interface |
|
:return: Annotated image with bounding boxes and emotion labels |
|
""" |
|
|
|
image = np.array(image) |
|
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) |
|
|
|
|
|
results = model(image) |
|
|
|
|
|
annotated_image = results[0].plot() |
|
|
|
|
|
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) |
|
return annotated_image |
|
|
|
|
|
def detect_emotion_video(video_path): |
|
""" |
|
Perform YOLO8 inference on an uploaded video. |
|
:param video_path: Path to the video file from Gradio interface |
|
:return: Processed video with bounding boxes and emotion labels |
|
""" |
|
cap = cv2.VideoCapture(video_path) |
|
if not cap.isOpened(): |
|
return "Error: Could not open video file." |
|
|
|
|
|
frame_width = int(cap.get(3)) |
|
frame_height = int(cap.get(4)) |
|
fps = int(cap.get(cv2.CAP_PROP_FPS)) |
|
|
|
|
|
output_video_path = "output_video.mp4" |
|
fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
|
out = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height)) |
|
|
|
while cap.isOpened(): |
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
|
|
|
|
results = model(frame) |
|
|
|
|
|
annotated_frame = results[0].plot() |
|
|
|
|
|
out.write(annotated_frame) |
|
|
|
cap.release() |
|
out.release() |
|
|
|
return output_video_path |
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## YOLOv8 Fruits Detection") |
|
|
|
with gr.Tabs(): |
|
|
|
with gr.Tab("Image Detection"): |
|
gr.Markdown("### Upload an Image for Fruits Detection") |
|
image_input = gr.Image(type="pil") |
|
image_output = gr.Image(type="numpy") |
|
image_btn = gr.Button("Detect Fruit") |
|
image_btn.click(detect_emotion, inputs=image_input, outputs=image_output) |
|
|
|
|
|
with gr.Tab("Video Detection"): |
|
gr.Markdown("### Upload a Video for Fruits Detection") |
|
video_input = gr.Video() |
|
video_output = gr.Video() |
|
video_btn = gr.Button("Detect Fruits in Video") |
|
video_btn.click(detect_emotion_video, inputs=video_input, outputs=video_output) |
|
|
|
|
|
demo.launch(share=True) |
|
|