Disha9854 commited on
Commit
865ef5b
·
verified ·
1 Parent(s): d8d7727

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -23
app.py CHANGED
@@ -1,33 +1,40 @@
1
  import gradio as gr
2
- import cv2
3
  import numpy as np
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- # Assuming you have a function to process individual frames
6
  def process_frame(frame):
7
- # Load the model
8
- model = gr.load("models/dima806/deepfake_vs_real_image_detection")
9
-
10
  # Process the frame using the model
11
- result = model(frame)
12
-
13
  return result
14
 
15
- def process_video(video_path):
16
- cap = cv2.VideoCapture(video_path)
17
- results = []
18
 
19
- while cap.isOpened():
20
- ret, frame = cap.read()
21
- if not ret:
22
- break
23
-
24
- # Process each frame
25
- result = process_frame(frame)
26
- results.append(result)
27
-
28
- cap.release()
29
-
30
- # Analyze results (e.g., calculate average score, majority vote, etc.)
 
 
31
  final_result = analyze_results(results)
32
 
33
  return final_result
@@ -39,7 +46,7 @@ def analyze_results(results):
39
 
40
  iface = gr.Interface(
41
  fn=process_video,
42
- inputs=gr.Video(),
43
  outputs=gr.Text(label="Deepfake Detection Result")
44
  )
45
 
 
1
  import gradio as gr
 
2
  import numpy as np
3
+ from PIL import Image
4
+ import io
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ # Download the model
8
+ model_path = hf_hub_download("dima806/deepfake_vs_real_image_detection", "model.pkl")
9
+
10
+ # Load the model (you might need to adjust this based on how the model is saved)
11
+ import pickle
12
+ with open(model_path, 'rb') as f:
13
+ model = pickle.load(f)
14
 
 
15
  def process_frame(frame):
 
 
 
16
  # Process the frame using the model
17
+ # You might need to preprocess the frame to match the model's input requirements
18
+ result = model.predict(np.array(frame))
19
  return result
20
 
21
+ def process_video(video_file):
22
+ video = video_file.read()
 
23
 
24
+ # Use Pillow to open the video file
25
+ with Image.open(io.BytesIO(video)) as img:
26
+ # Check if it's an animated image (like GIF)
27
+ if hasattr(img, 'n_frames') and img.n_frames > 1:
28
+ results = []
29
+ for frame in range(img.n_frames):
30
+ img.seek(frame)
31
+ frame_result = process_frame(img.convert('RGB'))
32
+ results.append(frame_result)
33
+ else:
34
+ # If it's a single image, process it once
35
+ results = [process_frame(img.convert('RGB'))]
36
+
37
+ # Analyze results
38
  final_result = analyze_results(results)
39
 
40
  return final_result
 
46
 
47
  iface = gr.Interface(
48
  fn=process_video,
49
+ inputs=gr.File(label="Upload Video"),
50
  outputs=gr.Text(label="Deepfake Detection Result")
51
  )
52