thimwai commited on
Commit
eb285c2
·
verified ·
1 Parent(s): a7ead9f

Add video tab

Browse files
Files changed (1) hide show
  1. app.py +92 -36
app.py CHANGED
@@ -1,36 +1,92 @@
1
- import gradio as gr
2
- from ultralytics import YOLO
3
- import cv2
4
- import numpy as np
5
-
6
- # Load the YOLOv8 model
7
- model = YOLO("./model/best.pt")
8
-
9
- def detect_emotion(image):
10
- """
11
- Perform YOLO8 inference on the uploaded image.
12
- :param image: Input image from the Gradio interface
13
- :return: Annotated image with bounding boxes and emotion labels
14
- """
15
- # Convert PIL image to OpenCV format
16
- image = np.array(image)
17
- image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
18
-
19
- # Perform inference
20
- results = model(image)
21
-
22
- # Annotate the image with predictions
23
- annotated_image = results[0].plot()
24
-
25
- # Convert OpenCV BGR image back to RGB for display
26
- annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
27
- return annotated_image
28
-
29
- # Create Gradio interface
30
- gr.Interface(
31
- fn=detect_emotion,
32
- inputs=gr.Image(type="pil"),
33
- outputs=gr.Image(type="numpy"),
34
- title="YOLO8 Object Detection",
35
- description="Upload an image, and the model will detect the object with bounding boxes."
36
- ).launch(share=True) # Added share=True to expose a public link
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from ultralytics import YOLO
3
+ import cv2
4
+ import numpy as np
5
+
6
+ # Load the YOLOv8 model
7
+ model = YOLO("./model/best.pt")
8
+
9
+ def detect_emotion(image):
10
+ """
11
+ Perform YOLO8 inference on the uploaded image.
12
+ :param image: Input image from the Gradio interface
13
+ :return: Annotated image with bounding boxes and emotion labels
14
+ """
15
+ # Convert PIL image to OpenCV format
16
+ image = np.array(image)
17
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
18
+
19
+ # Perform inference
20
+ results = model(image)
21
+
22
+ # Annotate the image with predictions
23
+ annotated_image = results[0].plot()
24
+
25
+ # Convert OpenCV BGR image back to RGB for display
26
+ annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
27
+ return annotated_image
28
+
29
+
30
+ def detect_emotion_video(video_path):
31
+ """
32
+ Perform YOLO8 inference on an uploaded video.
33
+ :param video_path: Path to the video file from Gradio interface
34
+ :return: Processed video with bounding boxes and emotion labels
35
+ """
36
+ cap = cv2.VideoCapture(video_path)
37
+ if not cap.isOpened():
38
+ return "Error: Could not open video file."
39
+
40
+ # Get video properties
41
+ frame_width = int(cap.get(3))
42
+ frame_height = int(cap.get(4))
43
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
44
+
45
+ # Define the output video writer
46
+ output_video_path = "output_video.mp4"
47
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
48
+ out = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))
49
+
50
+ while cap.isOpened():
51
+ ret, frame = cap.read()
52
+ if not ret:
53
+ break # Stop if video ends
54
+
55
+ # Perform inference
56
+ results = model(frame)
57
+
58
+ # Annotate the frame with predictions
59
+ annotated_frame = results[0].plot()
60
+
61
+ # Write the processed frame to the output video
62
+ out.write(annotated_frame)
63
+
64
+ cap.release()
65
+ out.release()
66
+
67
+ return output_video_path # Return the processed video
68
+
69
+
70
+ # Create Gradio Tabs
71
+ with gr.Blocks() as demo:
72
+ gr.Markdown("## YOLOv8 Emotion Detection")
73
+
74
+ with gr.Tabs():
75
+ # Tab 1: Image Inference
76
+ with gr.Tab("Image Detection"):
77
+ gr.Markdown("### Upload an Image for Emotion Detection")
78
+ image_input = gr.Image(type="pil")
79
+ image_output = gr.Image(type="numpy")
80
+ image_btn = gr.Button("Detect Emotion")
81
+ image_btn.click(detect_emotion, inputs=image_input, outputs=image_output)
82
+
83
+ # Tab 2: Video Inference
84
+ with gr.Tab("Video Detection"):
85
+ gr.Markdown("### Upload a Video for Emotion Detection")
86
+ video_input = gr.Video()
87
+ video_output = gr.Video()
88
+ video_btn = gr.Button("Detect Emotion in Video")
89
+ video_btn.click(detect_emotion_video, inputs=video_input, outputs=video_output)
90
+
91
+ # Launch the Gradio App
92
+ demo.launch(share=True) # Enables public sharing