Spaces:
Sleeping
Sleeping
Commit
Β·
b1b2c74
1
Parent(s):
d4a1106
code cleaned
Browse files- .gitignore +1 -0
- README.md +28 -0
- app.py +25 -139
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
venv
|
|
|
|
1 |
venv
|
2 |
+
.DS_Store
|
README.md
CHANGED
@@ -10,3 +10,31 @@ pinned: false
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
13 |
+
|
14 |
+
|
15 |
+
The dataset used during the training is also uploaded to HUgging Face here: https://huggingface.co/datasets/trifork/plastic-pellets
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
To successfully push binary image files or models to your Hugging Face repository, you'll need to use Git Large File Storage (Git LFS). Git LFS is designed to handle large files by storing them separately from your main Git repository, which prevents issues like the one you're encountering. To do that you have to:
|
21 |
+
|
22 |
+
1. Install Git LFS
|
23 |
+
'''
|
24 |
+
brew install git-lfs
|
25 |
+
'''
|
26 |
+
|
27 |
+
2. Initialize Git LFS in Your Repository
|
28 |
+
'''
|
29 |
+
git lfs install
|
30 |
+
'''
|
31 |
+
|
32 |
+
3. Track the Binary File Types, for example:
|
33 |
+
'''
|
34 |
+
git lfs track "*.bmp"
|
35 |
+
'''
|
36 |
+
4. Add .gitattributes to Git
|
37 |
+
'''
|
38 |
+
git add .gitattributes
|
39 |
+
git commit -m "Configure Git LFS to track .bmp files"
|
40 |
+
'''
|
app.py
CHANGED
@@ -1,163 +1,89 @@
|
|
1 |
# app.py
|
|
|
2 |
import gradio as gr
|
3 |
from PIL import Image, ImageDraw, ImageFont
|
4 |
import torch
|
5 |
from ultralytics import YOLO
|
6 |
import numpy as np
|
7 |
import os
|
8 |
-
|
9 |
-
# Print Pillow version for debugging
|
10 |
from PIL import __version__ as PIL_VERSION
|
11 |
print(f"Pillow version: {PIL_VERSION}")
|
12 |
|
13 |
-
|
14 |
-
MODEL_PATH = "model/231220_detect_lr_0001_640_brightness.pt" # Ensure this path is correct
|
15 |
|
16 |
-
# Verify
|
17 |
if not os.path.exists(MODEL_PATH):
|
18 |
-
raise FileNotFoundError(
|
19 |
-
f"YOLO model not found at '{MODEL_PATH}'. Please ensure the model file is in the 'model/' directory."
|
20 |
-
)
|
21 |
|
22 |
-
#
|
23 |
-
print("Loading YOLO model...")
|
24 |
model = YOLO(MODEL_PATH)
|
25 |
-
print("YOLO model loaded
|
26 |
|
27 |
def detect_plastic_pellets(input_image):
|
28 |
"""
|
29 |
-
Perform plastic pellet detection using
|
30 |
-
|
31 |
-
Args:
|
32 |
-
input_image (PIL.Image): The input beach image uploaded by the user.
|
33 |
-
|
34 |
-
Returns:
|
35 |
-
PIL.Image: The image with detected plastic pellets highlighted or an image with an error message.
|
36 |
"""
|
37 |
if input_image is None:
|
38 |
-
print("No image uploaded.")
|
39 |
-
# Create an error image with a message
|
40 |
error_image = Image.new('RGB', (500, 100), color=(255, 0, 0))
|
41 |
draw = ImageDraw.Draw(error_image)
|
42 |
try:
|
43 |
font = ImageFont.truetype("arial.ttf", size=15)
|
44 |
except IOError:
|
45 |
font = ImageFont.load_default()
|
46 |
-
|
47 |
-
bbox = font.getbbox(message)
|
48 |
-
draw.text((10, 40), message, fill=(255, 255, 255), font=font)
|
49 |
return error_image
|
50 |
|
51 |
try:
|
52 |
print("Starting detection...")
|
53 |
-
|
54 |
-
max_size = (1024, 1024)
|
55 |
-
|
56 |
-
# Attempt to use Resampling if available, else use LANCZOS
|
57 |
-
try:
|
58 |
-
from PIL import Resampling
|
59 |
-
resample_filter = Resampling.LANCZOS
|
60 |
-
except ImportError:
|
61 |
-
resample_filter = Image.LANCZOS # For Pillow versions <10.0.0
|
62 |
-
|
63 |
-
input_image.thumbnail(max_size, resample_filter)
|
64 |
-
print(f"Image resized to: {input_image.size}")
|
65 |
-
|
66 |
-
# Convert PIL Image to numpy array
|
67 |
img = np.array(input_image.convert("RGB"))
|
68 |
|
69 |
-
# Perform inference
|
70 |
results = model(img)
|
71 |
-
print(f"Number of results: {len(results)}")
|
72 |
-
|
73 |
-
# Initialize drawing context
|
74 |
draw = ImageDraw.Draw(input_image)
|
75 |
try:
|
76 |
-
# Attempt to load a TrueType font
|
77 |
font = ImageFont.truetype("arial.ttf", size=15)
|
78 |
except IOError:
|
79 |
-
# Fall back to the default font if arial.ttf is not found
|
80 |
font = ImageFont.load_default()
|
81 |
|
82 |
-
# Initialize a flag to check if any detection was made
|
83 |
detection_made = False
|
84 |
|
85 |
-
# Iterate over detections and draw bounding boxes
|
86 |
for result in results:
|
87 |
-
|
88 |
-
|
89 |
-
for box in boxes:
|
90 |
-
# Extract bounding box coordinates and confidence
|
91 |
-
x1, y1, x2, y2 = box.xyxy[0].tolist()
|
92 |
confidence = box.conf[0].item()
|
93 |
cls = int(box.cls[0].item())
|
94 |
name = model.names[cls] if model.names else "Object"
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
# Define bounding box color and thickness
|
99 |
-
color = (255, 0, 0) # Red color
|
100 |
-
thickness = 2
|
101 |
|
102 |
-
# Convert coordinates to integers
|
103 |
-
x1, y1, x2, y2 = map(int, [x1, y1, x2, y2])
|
104 |
-
|
105 |
-
# Draw rectangle
|
106 |
-
draw.rectangle(((x1, y1), (x2, y2)), outline=color, width=thickness)
|
107 |
-
|
108 |
-
# Prepare label
|
109 |
label = f"{name} {confidence:.2f}"
|
|
|
|
|
|
|
110 |
|
111 |
-
# Calculate text size using font.getbbox
|
112 |
-
bbox = font.getbbox(label)
|
113 |
-
text_width = bbox[2] - bbox[0]
|
114 |
-
text_height = bbox[3] - bbox[1]
|
115 |
-
|
116 |
-
# Draw label background
|
117 |
-
draw.rectangle(
|
118 |
-
((x1, y1 - text_height), (x1 + text_width, y1)),
|
119 |
-
fill=color
|
120 |
-
)
|
121 |
-
|
122 |
-
# Draw label text
|
123 |
-
draw.text(
|
124 |
-
(x1, y1 - text_height),
|
125 |
-
label,
|
126 |
-
fill=(255, 255, 255),
|
127 |
-
font=font
|
128 |
-
)
|
129 |
-
|
130 |
-
# Set the flag to True as a detection was made
|
131 |
detection_made = True
|
132 |
|
133 |
if not detection_made:
|
134 |
-
|
135 |
-
|
136 |
-
message = "No plastic pellets detected."
|
137 |
-
bbox = font.getbbox(message)
|
138 |
-
draw.text((10, 10), message, fill=(255, 0, 0), font=font)
|
139 |
-
return input_image # Still return an image, but with a message
|
140 |
|
141 |
print("Detection completed.")
|
142 |
return input_image
|
143 |
|
144 |
except Exception as e:
|
145 |
print(f"Detection error: {str(e)}")
|
146 |
-
# Instead of returning a string, create an image with the error message
|
147 |
error_image = Image.new('RGB', (500, 100), color=(255, 0, 0))
|
148 |
draw = ImageDraw.Draw(error_image)
|
149 |
try:
|
150 |
font = ImageFont.truetype("arial.ttf", size=15)
|
151 |
except IOError:
|
152 |
font = ImageFont.load_default()
|
153 |
-
|
154 |
-
bbox = font.getbbox(message)
|
155 |
-
draw.text((10, 40), message, fill=(255, 255, 255), font=font)
|
156 |
return error_image
|
157 |
|
158 |
def main():
|
159 |
with gr.Blocks(css=".gradio-container {max-width: 800px}") as demo:
|
160 |
-
# Header Section
|
161 |
gr.Markdown(
|
162 |
"""
|
163 |
<h1 align="center">π Beach Plastic Pellet Detection Challenge</h1>
|
@@ -165,63 +91,23 @@ def main():
|
|
165 |
"""
|
166 |
)
|
167 |
|
168 |
-
# Instructions Section
|
169 |
-
gr.Markdown(
|
170 |
-
"""
|
171 |
-
### πΈ How to Participate
|
172 |
-
1. **Upload or Select Image**: Click on the upload area to select a beach photo from your device or choose one of our sample images below.
|
173 |
-
2. **Detect**: Click the "Detect Plastic Pellets" button to run the model.
|
174 |
-
3. **Contribute**: Download the result and share it to help us collect data on plastic pellet pollution.
|
175 |
-
|
176 |
-
### π Why Participate?
|
177 |
-
By participating, you're helping us collect valuable data on plastic pellet pollution on beaches worldwide. Your contributions will aid in environmental research and clean-up initiatives, making a tangible impact on our planet's health.
|
178 |
-
"""
|
179 |
-
)
|
180 |
-
|
181 |
-
# Upload and Output Section
|
182 |
with gr.Row():
|
183 |
with gr.Column():
|
184 |
-
input_image = gr.Image(
|
185 |
-
|
186 |
-
|
187 |
-
interactive=True,
|
188 |
-
)
|
189 |
-
|
190 |
-
# Examples Section
|
191 |
-
examples = [
|
192 |
-
'images/image1.bmp',
|
193 |
-
'images/image2.bmp',
|
194 |
-
'images/image3.bmp'
|
195 |
-
]
|
196 |
-
|
197 |
-
gr.Examples(
|
198 |
-
examples=examples,
|
199 |
-
inputs=input_image,
|
200 |
-
label="Or choose one of these images",
|
201 |
-
examples_per_page=3,
|
202 |
-
)
|
203 |
-
|
204 |
submit_button = gr.Button("π Detect Plastic Pellets")
|
205 |
|
206 |
with gr.Column():
|
207 |
-
output_image = gr.Image(
|
208 |
-
|
209 |
-
label="β
Detection Result",
|
210 |
-
interactive=False,
|
211 |
-
show_download_button=True # Add download button directly to the output image
|
212 |
-
)
|
213 |
-
|
214 |
-
# Footer Section
|
215 |
gr.Markdown(
|
216 |
"""
|
217 |
---
|
218 |
-
<p align="center">
|
219 |
-
Β© 2024 Beach Clean-Up Initiative.
|
220 |
-
</p>
|
221 |
"""
|
222 |
)
|
223 |
|
224 |
-
# Connect the button to the detection function
|
225 |
submit_button.click(
|
226 |
fn=detect_plastic_pellets,
|
227 |
inputs=input_image,
|
|
|
1 |
# app.py
|
2 |
+
# app.py
|
3 |
import gradio as gr
|
4 |
from PIL import Image, ImageDraw, ImageFont
|
5 |
import torch
|
6 |
from ultralytics import YOLO
|
7 |
import numpy as np
|
8 |
import os
|
|
|
|
|
9 |
from PIL import __version__ as PIL_VERSION
|
10 |
print(f"Pillow version: {PIL_VERSION}")
|
11 |
|
12 |
+
MODEL_PATH = "model/231220_detect_lr_0001_640_brightness.pt"
|
|
|
13 |
|
14 |
+
# Verify the model path
|
15 |
if not os.path.exists(MODEL_PATH):
|
16 |
+
raise FileNotFoundError(f"YOLO model not found at '{MODEL_PATH}'.")
|
|
|
|
|
17 |
|
18 |
+
# Load the YOLO model
|
|
|
19 |
model = YOLO(MODEL_PATH)
|
20 |
+
print("YOLO model loaded.")
|
21 |
|
22 |
def detect_plastic_pellets(input_image):
|
23 |
"""
|
24 |
+
Perform plastic pellet detection using our customized model.
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
"""
|
26 |
if input_image is None:
|
|
|
|
|
27 |
error_image = Image.new('RGB', (500, 100), color=(255, 0, 0))
|
28 |
draw = ImageDraw.Draw(error_image)
|
29 |
try:
|
30 |
font = ImageFont.truetype("arial.ttf", size=15)
|
31 |
except IOError:
|
32 |
font = ImageFont.load_default()
|
33 |
+
draw.text((10, 40), "Please upload a valid image.", fill=(255, 255, 255), font=font)
|
|
|
|
|
34 |
return error_image
|
35 |
|
36 |
try:
|
37 |
print("Starting detection...")
|
38 |
+
input_image.thumbnail((1024, 1024), Image.LANCZOS)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
img = np.array(input_image.convert("RGB"))
|
40 |
|
|
|
41 |
results = model(img)
|
|
|
|
|
|
|
42 |
draw = ImageDraw.Draw(input_image)
|
43 |
try:
|
|
|
44 |
font = ImageFont.truetype("arial.ttf", size=15)
|
45 |
except IOError:
|
|
|
46 |
font = ImageFont.load_default()
|
47 |
|
|
|
48 |
detection_made = False
|
49 |
|
|
|
50 |
for result in results:
|
51 |
+
for box in result.boxes:
|
52 |
+
x1, y1, x2, y2 = map(int, box.xyxy[0].tolist())
|
|
|
|
|
|
|
53 |
confidence = box.conf[0].item()
|
54 |
cls = int(box.cls[0].item())
|
55 |
name = model.names[cls] if model.names else "Object"
|
56 |
|
57 |
+
color = (255, 0, 0)
|
58 |
+
draw.rectangle(((x1, y1), (x2, y2)), outline=color, width=2)
|
|
|
|
|
|
|
59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
label = f"{name} {confidence:.2f}"
|
61 |
+
text_width, text_height = font.getbbox(label)[2:]
|
62 |
+
draw.rectangle(((x1, y1 - text_height), (x1 + text_width, y1)), fill=color)
|
63 |
+
draw.text((x1, y1 - text_height), label, fill=(255, 255, 255), font=font)
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
detection_made = True
|
66 |
|
67 |
if not detection_made:
|
68 |
+
draw.text((10, 10), "No plastic pellets detected.", fill=(255, 0, 0), font=font)
|
69 |
+
return input_image
|
|
|
|
|
|
|
|
|
70 |
|
71 |
print("Detection completed.")
|
72 |
return input_image
|
73 |
|
74 |
except Exception as e:
|
75 |
print(f"Detection error: {str(e)}")
|
|
|
76 |
error_image = Image.new('RGB', (500, 100), color=(255, 0, 0))
|
77 |
draw = ImageDraw.Draw(error_image)
|
78 |
try:
|
79 |
font = ImageFont.truetype("arial.ttf", size=15)
|
80 |
except IOError:
|
81 |
font = ImageFont.load_default()
|
82 |
+
draw.text((10, 40), f"Error: {str(e)}", fill=(255, 255, 255), font=font)
|
|
|
|
|
83 |
return error_image
|
84 |
|
85 |
def main():
|
86 |
with gr.Blocks(css=".gradio-container {max-width: 800px}") as demo:
|
|
|
87 |
gr.Markdown(
|
88 |
"""
|
89 |
<h1 align="center">π Beach Plastic Pellet Detection Challenge</h1>
|
|
|
91 |
"""
|
92 |
)
|
93 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
with gr.Row():
|
95 |
with gr.Column():
|
96 |
+
input_image = gr.Image(type="pil", label="π Upload or Select Beach Image", interactive=True)
|
97 |
+
examples = ['images/image1.bmp', 'images/image2.bmp', 'images/image3.bmp']
|
98 |
+
gr.Examples(examples=examples, inputs=input_image, label="Or choose one of these images")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
submit_button = gr.Button("π Detect Plastic Pellets")
|
100 |
|
101 |
with gr.Column():
|
102 |
+
output_image = gr.Image(type="pil", label="β
Detection Result", interactive=False, show_download_button=True)
|
103 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
gr.Markdown(
|
105 |
"""
|
106 |
---
|
107 |
+
<p align="center">Β© 2024 Beach Clean-Up Initiative.</p>
|
|
|
|
|
108 |
"""
|
109 |
)
|
110 |
|
|
|
111 |
submit_button.click(
|
112 |
fn=detect_plastic_pellets,
|
113 |
inputs=input_image,
|