kimpepe commited on
Commit
3725150
·
verified ·
1 Parent(s): 02b51a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -76
app.py CHANGED
@@ -1,88 +1,26 @@
1
  from ultralytics import YOLO
2
  from PIL import Image
3
  import gradio as gr
4
- import cv2
5
- import tempfile
6
- import os
7
 
8
  # Load YOLOv8 model
9
  model = YOLO("best.pt") # Ensure best.pt is in the same directory
10
 
11
- # Preprocess and run inference for images
12
- def predict_image(image):
 
13
  results = model.predict(source=image, conf=0.5)
 
 
14
  annotated_image = results[0].plot()
 
 
15
  return Image.fromarray(annotated_image)
16
 
17
- # Preprocess and run inference for videos
18
- def predict_video(video):
19
- try:
20
- # Save video to a temporary file
21
- temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
22
- with open(temp_video_path, "wb") as f:
23
- f.write(video.read())
24
-
25
- # Open the video file
26
- cap = cv2.VideoCapture(temp_video_path)
27
- if not cap.isOpened():
28
- return "Error: Unable to open video file."
29
-
30
- # Get video properties
31
- fps = int(cap.get(cv2.CAP_PROP_FPS))
32
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
33
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
34
-
35
- # Define codec and create a video writer
36
- fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Use 'mp4v' for compatibility
37
- output_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
38
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
39
-
40
- while cap.isOpened():
41
- ret, frame = cap.read()
42
- if not ret:
43
- break
44
-
45
- # Perform predictions on the frame
46
- results = model.predict(source=frame, conf=0.5)
47
- annotated_frame = results[0].plot()
48
-
49
- # Write the annotated frame
50
- out.write(annotated_frame)
51
-
52
- # Release resources
53
- cap.release()
54
- out.release()
55
-
56
- # Ensure the output file exists and is playable
57
- if os.path.exists(output_path):
58
- return output_path
59
- else:
60
- return "Error: Annotated video could not be created."
61
-
62
- except Exception as e:
63
- return f"An error occurred while processing the video: {str(e)}"
64
-
65
- # Gradio interfaces
66
- image_interface = gr.Interface(
67
- fn=predict_image,
68
- inputs=gr.Image(type="pil", label="Upload an Image"),
69
  outputs="image",
70
- title="Image Detection",
71
- description="Upload an image for object detection."
72
- )
73
-
74
- video_interface = gr.Interface(
75
- fn=predict_video,
76
- inputs=gr.Video(label="Upload a Video"),
77
- outputs="video",
78
- title="Video Detection",
79
- description="Upload a video for object detection."
80
- )
81
-
82
- # Combine interfaces
83
- app = gr.TabbedInterface(
84
- [image_interface, video_interface],
85
- ["Image Detection", "Video Detection"]
86
- )
87
-
88
- app.launch()
 
1
  from ultralytics import YOLO
2
  from PIL import Image
3
  import gradio as gr
 
 
 
4
 
5
  # Load YOLOv8 model
6
  model = YOLO("best.pt") # Ensure best.pt is in the same directory
7
 
8
+ # Preprocess and run inference
9
+ def predict(image):
10
+ # Perform prediction
11
  results = model.predict(source=image, conf=0.5)
12
+
13
+ # Annotate the image with bounding boxes
14
  annotated_image = results[0].plot()
15
+
16
+ # Convert to PIL Image
17
  return Image.fromarray(annotated_image)
18
 
19
+ # Gradio interface
20
+ gr.Interface(
21
+ fn=predict,
22
+ inputs=gr.Image(type="pil"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  outputs="image",
24
+ title="Hippo or Rhino Detection",
25
+ description="Upload an image for object detection with YOLOv8."
26
+ ).launch()