Spaces:
Sleeping
Sleeping
from ultralytics import YOLO | |
from PIL import Image | |
import gradio as gr | |
from huggingface_hub import snapshot_download | |
import os | |
import cv2 | |
import tempfile | |
def load_model(repo_id): | |
download_dir = snapshot_download(repo_id) | |
print(download_dir) | |
path = os.path.join(download_dir, "best_int8_openvino_model") | |
print(path) | |
detection_model = YOLO(path, task='detect') | |
return detection_model | |
def predict_image(pilimg): | |
result = detection_model.predict(pilimg, conf=0.5, iou=0.6) | |
img_bgr = result[0].plot() | |
out_pilimg = Image.fromarray(img_bgr[..., ::-1]) # RGB-order PIL image | |
return out_pilimg | |
def process_video(video_file): | |
cap = cv2.VideoCapture(video_file) | |
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
fps = cap.get(cv2.CAP_PROP_FPS) | |
# Use a temporary file to store the annotated video | |
temp_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") | |
temp_video_path = temp_video.name | |
writer = cv2.VideoWriter(temp_video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height)) | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
# Perform object detection | |
results = detection_model.predict(frame, conf=0.5, iou=0.6) | |
annotated_frame = results[0].plot() | |
writer.write(annotated_frame) | |
cap.release() | |
writer.release() | |
return temp_video_path | |
REPO_ID = "zell-dev/fire-smoke-detection" | |
detection_model = load_model(REPO_ID) | |
# Improved UI with image and video upload | |
with gr.Blocks() as app: | |
gr.Markdown("# π₯ Fire and Smoke Detection App - 2415336E") | |
gr.Markdown("Upload an image or a video to detect fire or smoke using YOLO model.") | |
with gr.Tabs(): | |
with gr.Tab("Image Detection"): | |
gr.Markdown("### Upload an Image") | |
img_input = gr.Image(type="pil", label="Input Image") | |
img_output = gr.Image(type="pil", label="Detection Output") | |
img_button = gr.Button("Detect Fire/Smoke") | |
img_button.click(predict_image, inputs=img_input, outputs=img_output) | |
with gr.Tab("Video Detection"): | |
gr.Markdown("### Upload a video to detect fire and smoke. You can download the processed video after detection.") | |
video_input = gr.Video(label="Upload Video") | |
video_output = gr.File(label="Download Processed Video") # File download component | |
process_button = gr.Button("Process Video") | |
process_button.click(process_video, inputs=video_input, outputs=video_output) | |
gr.Markdown("Developed with π» by Zell (Feng Long)") | |
app.launch(share=True) | |