Update app.py
Browse files
app.py
CHANGED
@@ -1,33 +1,40 @@
|
|
1 |
import gradio as gr
|
2 |
-
import cv2
|
3 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
# Assuming you have a function to process individual frames
|
6 |
def process_frame(frame):
|
7 |
-
# Load the model
|
8 |
-
model = gr.load("models/dima806/deepfake_vs_real_image_detection")
|
9 |
-
|
10 |
# Process the frame using the model
|
11 |
-
|
12 |
-
|
13 |
return result
|
14 |
|
15 |
-
def process_video(
|
16 |
-
|
17 |
-
results = []
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
if
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
31 |
final_result = analyze_results(results)
|
32 |
|
33 |
return final_result
|
@@ -39,7 +46,7 @@ def analyze_results(results):
|
|
39 |
|
40 |
iface = gr.Interface(
|
41 |
fn=process_video,
|
42 |
-
inputs=gr.Video
|
43 |
outputs=gr.Text(label="Deepfake Detection Result")
|
44 |
)
|
45 |
|
|
|
1 |
import gradio as gr
|
|
|
2 |
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
import io
|
5 |
+
from huggingface_hub import hf_hub_download
|
6 |
+
|
7 |
+
# Download the model
|
8 |
+
model_path = hf_hub_download("dima806/deepfake_vs_real_image_detection", "model.pkl")
|
9 |
+
|
10 |
+
# Load the model (you might need to adjust this based on how the model is saved)
|
11 |
+
import pickle
|
12 |
+
with open(model_path, 'rb') as f:
|
13 |
+
model = pickle.load(f)
|
14 |
|
|
|
15 |
def process_frame(frame):
|
|
|
|
|
|
|
16 |
# Process the frame using the model
|
17 |
+
# You might need to preprocess the frame to match the model's input requirements
|
18 |
+
result = model.predict(np.array(frame))
|
19 |
return result
|
20 |
|
21 |
+
def process_video(video_file):
|
22 |
+
video = video_file.read()
|
|
|
23 |
|
24 |
+
# Use Pillow to open the video file
|
25 |
+
with Image.open(io.BytesIO(video)) as img:
|
26 |
+
# Check if it's an animated image (like GIF)
|
27 |
+
if hasattr(img, 'n_frames') and img.n_frames > 1:
|
28 |
+
results = []
|
29 |
+
for frame in range(img.n_frames):
|
30 |
+
img.seek(frame)
|
31 |
+
frame_result = process_frame(img.convert('RGB'))
|
32 |
+
results.append(frame_result)
|
33 |
+
else:
|
34 |
+
# If it's a single image, process it once
|
35 |
+
results = [process_frame(img.convert('RGB'))]
|
36 |
+
|
37 |
+
# Analyze results
|
38 |
final_result = analyze_results(results)
|
39 |
|
40 |
return final_result
|
|
|
46 |
|
47 |
iface = gr.Interface(
|
48 |
fn=process_video,
|
49 |
+
inputs=gr.File(label="Upload Video"),
|
50 |
outputs=gr.Text(label="Deepfake Detection Result")
|
51 |
)
|
52 |
|