Arrcttacsrks commited on
Commit
f1611e7
·
verified ·
1 Parent(s): 95cff25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +168 -242
app.py CHANGED
@@ -1,38 +1,37 @@
1
- # -*- coding:UTF-8 -*-
2
  #!/usr/bin/env python
 
 
 
 
 
 
3
  import numpy as np
 
 
4
  import gradio as gr
5
- import roop.globals
6
- from roop.core import (
7
  start,
8
  decode_execution_providers,
9
  suggest_max_memory,
10
  suggest_execution_threads,
11
  )
 
12
  from roop.processors.frame.core import get_frame_processors_modules
13
- from roop.utilities import normalize_output_path
14
- import os
15
- from PIL import Image
16
- from datetime import datetime
17
- from huggingface_hub import HfApi, login
18
- from datasets import load_dataset, Dataset
19
- import json
20
- import shutil
21
- from dotenv import load_dotenv
22
- import cv2
23
  from insightface.app import FaceAnalysis
24
 
25
  # Load environment variables
26
  load_dotenv()
27
 
28
- # Hàm tính cosine similarity để mày so sánh "điểm tương đồng" của khuôn mặt
29
  def cosine_similarity(a, b):
30
  return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b) + 1e-6)
31
 
32
- # Class FaceIntegrDataset nguyên bản (cho image swap, không cần "xịn" cho video)
33
  class FaceIntegrDataset:
34
  def __init__(self, repo_id="Arrcttacsrks/face_integrData"):
35
- self.token = os.getenv('hf_token')
36
  if not self.token:
37
  raise ValueError("HF_TOKEN environment variable is not set")
38
  self.repo_id = repo_id
@@ -53,7 +52,7 @@ class FaceIntegrDataset:
53
  "source_image": source_path,
54
  "target_image": target_path,
55
  "output_image": output_path,
56
- "date_created": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
57
  }
58
  return metadata
59
 
@@ -63,46 +62,41 @@ class FaceIntegrDataset:
63
  folder_path=local_folder,
64
  repo_id=self.repo_id,
65
  repo_type="dataset",
66
- path_in_repo=date_folder
67
  )
68
  return True
69
  except Exception as e:
70
  print(f"Error uploading to Hugging Face: {str(e)}")
71
  return False
72
 
73
- # Hàm swap_face nguyên bản dành cho ghép ảnh tĩnh
74
  def swap_face(source_file, target_file, doFaceEnhancer):
75
- folder_path = None
 
 
 
76
  try:
77
- dataset_handler = FaceIntegrDataset()
78
- folder_path, date_folder = dataset_handler.create_date_folder()
79
- timestamp = datetime.now().strftime("%S-%M-%H-%d-%m-%Y")
80
  source_path = os.path.join(folder_path, f"source_{timestamp}.jpg")
81
  target_path = os.path.join(folder_path, f"target_{timestamp}.jpg")
82
- output_path = os.path.join(folder_path, f"OutputImage{timestamp}.jpg")
83
 
84
  if source_file is None or target_file is None:
85
  raise ValueError("Source and target images are required")
86
-
87
  Image.fromarray(source_file).save(source_path)
88
  Image.fromarray(target_file).save(target_path)
89
-
90
- print("source_path: ", source_path)
91
- print("target_path: ", target_path)
92
-
93
  roop.globals.source_path = source_path
94
  roop.globals.target_path = target_path
95
  roop.globals.output_path = normalize_output_path(
96
- roop.globals.source_path,
97
- roop.globals.target_path,
98
- output_path
 
 
99
  )
100
-
101
- if doFaceEnhancer:
102
- roop.globals.frame_processors = ["face_swapper", "face_enhancer"]
103
- else:
104
- roop.globals.frame_processors = ["face_swapper"]
105
-
106
  roop.globals.headless = True
107
  roop.globals.keep_fps = True
108
  roop.globals.keep_audio = True
@@ -113,162 +107,140 @@ def swap_face(source_file, target_file, doFaceEnhancer):
113
  roop.globals.max_memory = suggest_max_memory()
114
  roop.globals.execution_providers = decode_execution_providers(["cuda"])
115
  roop.globals.execution_threads = suggest_execution_threads()
116
-
117
- print(
118
- "start process",
119
- roop.globals.source_path,
120
- roop.globals.target_path,
121
- roop.globals.output_path,
122
- )
123
-
124
  for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
125
  if not frame_processor.pre_check():
126
- return None
127
-
 
128
  start()
129
-
 
130
  metadata = dataset_handler.save_metadata(
131
  f"source_{timestamp}.jpg",
132
  f"target_{timestamp}.jpg",
133
- f"OutputImage{timestamp}.jpg",
134
- timestamp
135
  )
136
-
137
  metadata_path = os.path.join(folder_path, f"metadata_{timestamp}.json")
138
- with open(metadata_path, 'w') as f:
139
  json.dump(metadata, f, indent=4)
140
-
 
141
  upload_success = dataset_handler.upload_to_hf(folder_path, date_folder)
142
-
143
- if upload_success:
144
- print(f"Successfully uploaded files to dataset {dataset_handler.repo_id}")
145
- else:
146
  print("Failed to upload files to Hugging Face dataset")
147
-
 
148
  if os.path.exists(output_path):
149
  output_image = Image.open(output_path)
150
- output_array = np.array(output_image)
151
- shutil.rmtree(folder_path)
152
- return output_array
153
  else:
154
- print("Output image not found")
155
- if folder_path and os.path.exists(folder_path):
156
- shutil.rmtree(folder_path)
157
- return None
158
-
159
  except Exception as e:
160
  print(f"Error in face swap process: {str(e)}")
 
 
161
  if folder_path and os.path.exists(folder_path):
162
  shutil.rmtree(folder_path)
163
- raise gr.Error(f"Face swap failed: {str(e)}")
164
 
165
- # Hàm xử ghép mặt cho 1 frame video bằng cách "mượn" thuật toán của roop
166
  def swap_face_frame(frame_bgr, replacement_face_rgb, doFaceEnhancer):
167
- # Chuyển frame từ BGR sang RGB vì PIL làm việc với RGB
168
  frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
169
  temp_dir = "temp_faceswap_frame"
170
  os.makedirs(temp_dir, exist_ok=True)
171
  timestamp = datetime.now().strftime("%S-%M-%H-%d-%m-%Y")
172
- source_path = os.path.join(temp_dir, f"source_{timestamp}.jpg")
173
- target_path = os.path.join(temp_dir, f"target_{timestamp}.jpg")
174
- output_path = os.path.join(temp_dir, f"OutputImage_{timestamp}.jpg")
175
- Image.fromarray(frame_rgb).save(source_path)
176
- Image.fromarray(replacement_face_rgb).save(target_path)
177
-
178
- roop.globals.source_path = source_path
179
- roop.globals.target_path = target_path
180
- roop.globals.output_path = normalize_output_path(source_path, target_path, output_path)
181
-
182
- if doFaceEnhancer:
183
- roop.globals.frame_processors = ["face_swapper", "face_enhancer"]
184
- else:
185
- roop.globals.frame_processors = ["face_swapper"]
186
-
187
- roop.globals.headless = True
188
- roop.globals.keep_fps = True
189
- roop.globals.keep_audio = True
190
- roop.globals.keep_frames = False
191
- roop.globals.many_faces = False
192
- roop.globals.video_encoder = "libx264"
193
- roop.globals.video_quality = 18
194
- roop.globals.max_memory = suggest_max_memory()
195
- roop.globals.execution_providers = decode_execution_providers(["cuda"])
196
- roop.globals.execution_threads = suggest_execution_threads()
197
-
198
- start()
199
-
200
- if os.path.exists(output_path):
201
- swapped_img = np.array(Image.open(output_path))
202
- else:
203
- swapped_img = frame_rgb
204
- shutil.rmtree(temp_dir)
205
- return swapped_img
206
-
207
- # Hàm xử lý ghép mặt cho video frame-by-frame với insightface để so sánh khuôn mặt
208
- def swap_face_video(reference_face, replacement_face, video_input, similarity_threshold, doFaceEnhancer):
209
- """
210
- reference_face: Ảnh tham chiếu (RGB) để khóa khuôn mặt
211
- replacement_face: Ảnh ghép (RGB)
212
- video_input: Đường dẫn file video đầu vào
213
- similarity_threshold: Ngưỡng (0.0 - 1.0) cho tỉ lệ tương đồng
214
- doFaceEnhancer: Boolean, có áp dụng cải thiện chất lượng hay không
215
- """
216
  try:
217
- # Chuẩn bị insightface
218
- fa = FaceAnalysis()
219
- # Loại bỏ nms=0.4 vì hàm prepare() không hỗ trợ argument này
220
- fa.prepare(ctx_id=0)
221
-
222
- # Lấy embedding của khuôn mặt tham chiếu
223
- ref_detections = fa.get(reference_face)
224
- if not ref_detections:
225
- raise gr.Error("Không phát hiện khuôn mặt trong ảnh tham chiếu!")
226
- ref_embedding = ref_detections[0].embedding
227
-
228
- # Mở video đầu vào
229
- cap = cv2.VideoCapture(video_input)
230
- if not cap.isOpened():
231
- raise gr.Error("Không mở được video đầu vào!")
232
- fps = cap.get(cv2.CAP_PROP_FPS)
233
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
234
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
235
-
236
- output_video_path = "temp_faceswap_video.mp4"
237
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
238
- out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))
239
-
240
- frame_index = 0
241
- while True:
242
- ret, frame = cap.read()
243
- if not ret:
244
- break
245
- # Chuyển frame sang RGB để insightface xử lý
246
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
247
- detections = fa.get(frame_rgb)
248
- swap_this_frame = False
249
- for det in detections:
250
- sim = cosine_similarity(det.embedding, ref_embedding)
251
- if sim >= similarity_threshold:
252
- swap_this_frame = True
253
- break
254
- if swap_this_frame:
255
- # Ghép mặt từ replacement_face vào frame
256
- swapped_frame_rgb = swap_face_frame(frame, replacement_face, doFaceEnhancer)
257
- # Chuyển ngược lại sang BGR để ghi video
258
- swapped_frame = cv2.cvtColor(swapped_frame_rgb, cv2.COLOR_RGB2BGR)
259
- else:
260
- swapped_frame = frame
261
- out.write(swapped_frame)
262
- frame_index += 1
263
- print(f"Đã xử lý frame {frame_index}")
264
- cap.release()
265
- out.release()
266
- return output_video_path
267
- except Exception as e:
268
- print(f"Lỗi khi xử lý video: {str(e)}")
269
- raise gr.Error(f"Face swap video failed: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
270
 
271
- # Giao diện Gradio được xây dựng với hai tab: Image và Video
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
  def create_interface():
273
  custom_css = """
274
  .container {
@@ -284,107 +256,61 @@ def create_interface():
284
  }
285
  """
286
  title = "Face - Integrator"
287
- description = r"""
288
- Upload source and target images to perform face swap.
289
- """
290
- article = r"""
291
  <div style="text-align: center; max-width: 650px; margin: 40px auto;">
292
- <p>
293
- This tool performs face swapping with optional enhancement.
294
- </p>
295
  </div>
296
  """
 
297
  with gr.Blocks(title=title, css=custom_css) as app:
298
  gr.Markdown(f"<h1 style='text-align: center;'>{title}</h1>")
299
  gr.Markdown(description)
 
300
  with gr.Tabs():
301
  with gr.TabItem("FaceSwap Image"):
302
  with gr.Row():
303
- with gr.Column(scale=1):
304
- source_image = gr.Image(
305
- label="Source Image",
306
- type="numpy",
307
- sources=["upload"]
308
- )
309
- with gr.Column(scale=1):
310
- target_image = gr.Image(
311
- label="Target Image",
312
- type="numpy",
313
- sources=["upload"]
314
- )
315
- with gr.Column(scale=1):
316
- output_image = gr.Image(
317
- label="Output Image",
318
- type="numpy",
319
- interactive=False,
320
- elem_classes="output-image"
321
- )
322
- with gr.Row():
323
- enhance_checkbox = gr.Checkbox(
324
- label="Apply the algorithm?",
325
- info="Image Quality Improvement",
326
- value=False
327
- )
328
- with gr.Row():
329
- process_btn = gr.Button(
330
- "Process Face Swap",
331
- variant="primary",
332
- size="lg"
333
- )
334
  process_btn.click(
335
  fn=swap_face,
336
  inputs=[source_image, target_image, enhance_checkbox],
337
  outputs=output_image,
338
- api_name="swap_face"
339
  )
 
340
  with gr.TabItem("FaceSwap Video"):
341
  gr.Markdown("<h2 style='text-align:center;'>FaceSwap Video</h2>")
342
  with gr.Row():
343
- ref_image = gr.Image(
344
- label="Ảnh mặt tham chiếu (khóa khuôn mặt)",
345
- type="numpy",
346
- sources=["upload"]
347
- )
348
- swap_image = gr.Image(
349
- label="Ảnh mặt ghép",
350
- type="numpy",
351
- sources=["upload"]
352
- )
353
- video_input = gr.Video(
354
- label="Video đầu vào"
355
- )
356
- similarity_threshold = gr.Slider(
357
- minimum=0.0,
358
- maximum=1.0,
359
- step=0.01,
360
- value=0.7,
361
- label="Tỉ lệ tương đồng"
362
- )
363
- enhance_checkbox_video = gr.Checkbox(
364
- label="Áp dụng cải thiện chất lượng ảnh",
365
- info="Tùy chọn cải thiện",
366
- value=False
367
- )
368
- process_video_btn = gr.Button(
369
- "Xử lý FaceSwap Video",
370
- variant="primary",
371
- size="lg"
372
- )
373
- video_output = gr.Video(
374
- label="Video kết quả"
375
- )
376
  process_video_btn.click(
377
  fn=swap_face_video,
378
  inputs=[ref_image, swap_image, video_input, similarity_threshold, enhance_checkbox_video],
379
  outputs=video_output,
380
- api_name="swap_face_video"
381
  )
 
382
  gr.Markdown(article)
 
383
  return app
384
 
 
385
  def main():
386
  app = create_interface()
387
  app.launch(share=False)
388
 
389
  if __name__ == "__main__":
390
- main()
 
1
+ # -*- coding: UTF-8 -*-
2
  #!/usr/bin/env python
3
+
4
+ import os
5
+ import json
6
+ import shutil
7
+ from datetime import datetime
8
+ from dotenv import load_dotenv
9
  import numpy as np
10
+ import cv2
11
+ from PIL import Image
12
  import gradio as gr
13
+ from huggingface_hub import HfApi, login
14
+ from roop.globals import (
15
  start,
16
  decode_execution_providers,
17
  suggest_max_memory,
18
  suggest_execution_threads,
19
  )
20
+ from roop.core import normalize_output_path
21
  from roop.processors.frame.core import get_frame_processors_modules
 
 
 
 
 
 
 
 
 
 
22
  from insightface.app import FaceAnalysis
23
 
24
  # Load environment variables
25
  load_dotenv()
26
 
27
+ # Cosine similarity function
28
  def cosine_similarity(a, b):
29
  return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b) + 1e-6)
30
 
31
+ # Dataset handler class
32
  class FaceIntegrDataset:
33
  def __init__(self, repo_id="Arrcttacsrks/face_integrData"):
34
+ self.token = os.getenv("hf_token")
35
  if not self.token:
36
  raise ValueError("HF_TOKEN environment variable is not set")
37
  self.repo_id = repo_id
 
52
  "source_image": source_path,
53
  "target_image": target_path,
54
  "output_image": output_path,
55
+ "date_created": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
56
  }
57
  return metadata
58
 
 
62
  folder_path=local_folder,
63
  repo_id=self.repo_id,
64
  repo_type="dataset",
65
+ path_in_repo=date_folder,
66
  )
67
  return True
68
  except Exception as e:
69
  print(f"Error uploading to Hugging Face: {str(e)}")
70
  return False
71
 
72
+ # Image face swap function
73
  def swap_face(source_file, target_file, doFaceEnhancer):
74
+ dataset_handler = FaceIntegrDataset()
75
+ folder_path, date_folder = dataset_handler.create_date_folder()
76
+ timestamp = datetime.now().strftime("%S-%M-%H-%d-%m-%Y")
77
+
78
  try:
79
+ # Save source and target images
 
 
80
  source_path = os.path.join(folder_path, f"source_{timestamp}.jpg")
81
  target_path = os.path.join(folder_path, f"target_{timestamp}.jpg")
82
+ output_path = os.path.join(folder_path, f"OutputImage_{timestamp}.jpg")
83
 
84
  if source_file is None or target_file is None:
85
  raise ValueError("Source and target images are required")
86
+
87
  Image.fromarray(source_file).save(source_path)
88
  Image.fromarray(target_file).save(target_path)
89
+
90
+ # Configure Roop globals
 
 
91
  roop.globals.source_path = source_path
92
  roop.globals.target_path = target_path
93
  roop.globals.output_path = normalize_output_path(
94
+ roop.globals.source_path, roop.globals.target_path, output_path
95
+ )
96
+
97
+ roop.globals.frame_processors = (
98
+ ["face_swapper", "face_enhancer"] if doFaceEnhancer else ["face_swapper"]
99
  )
 
 
 
 
 
 
100
  roop.globals.headless = True
101
  roop.globals.keep_fps = True
102
  roop.globals.keep_audio = True
 
107
  roop.globals.max_memory = suggest_max_memory()
108
  roop.globals.execution_providers = decode_execution_providers(["cuda"])
109
  roop.globals.execution_threads = suggest_execution_threads()
110
+
111
+ # Pre-check frame processors
 
 
 
 
 
 
112
  for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
113
  if not frame_processor.pre_check():
114
+ raise RuntimeError("Frame processor pre-check failed")
115
+
116
+ # Start face swap process
117
  start()
118
+
119
+ # Save metadata
120
  metadata = dataset_handler.save_metadata(
121
  f"source_{timestamp}.jpg",
122
  f"target_{timestamp}.jpg",
123
+ f"OutputImage_{timestamp}.jpg",
124
+ timestamp,
125
  )
 
126
  metadata_path = os.path.join(folder_path, f"metadata_{timestamp}.json")
127
+ with open(metadata_path, "w") as f:
128
  json.dump(metadata, f, indent=4)
129
+
130
+ # Upload to Hugging Face
131
  upload_success = dataset_handler.upload_to_hf(folder_path, date_folder)
132
+ if not upload_success:
 
 
 
133
  print("Failed to upload files to Hugging Face dataset")
134
+
135
+ # Return output image
136
  if os.path.exists(output_path):
137
  output_image = Image.open(output_path)
138
+ return np.array(output_image)
 
 
139
  else:
140
+ raise FileNotFoundError("Output image not found")
141
+
 
 
 
142
  except Exception as e:
143
  print(f"Error in face swap process: {str(e)}")
144
+ raise gr.Error(f"Face swap failed: {str(e)}")
145
+ finally:
146
  if folder_path and os.path.exists(folder_path):
147
  shutil.rmtree(folder_path)
 
148
 
149
+ # Video face swap helper function
150
  def swap_face_frame(frame_bgr, replacement_face_rgb, doFaceEnhancer):
 
151
  frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
152
  temp_dir = "temp_faceswap_frame"
153
  os.makedirs(temp_dir, exist_ok=True)
154
  timestamp = datetime.now().strftime("%S-%M-%H-%d-%m-%Y")
155
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  try:
157
+ source_path = os.path.join(temp_dir, f"source_{timestamp}.jpg")
158
+ target_path = os.path.join(temp_dir, f"target_{timestamp}.jpg")
159
+ output_path = os.path.join(temp_dir, f"OutputImage_{timestamp}.jpg")
160
+
161
+ Image.fromarray(frame_rgb).save(source_path)
162
+ Image.fromarray(replacement_face_rgb).save(target_path)
163
+
164
+ # Configure Roop globals
165
+ roop.globals.source_path = source_path
166
+ roop.globals.target_path = target_path
167
+ roop.globals.output_path = normalize_output_path(
168
+ source_path, target_path, output_path
169
+ )
170
+ roop.globals.frame_processors = (
171
+ ["face_swapper", "face_enhancer"] if doFaceEnhancer else ["face_swapper"]
172
+ )
173
+ roop.globals.headless = True
174
+ roop.globals.keep_fps = True
175
+ roop.globals.keep_audio = True
176
+ roop.globals.keep_frames = False
177
+ roop.globals.many_faces = False
178
+ roop.globals.video_encoder = "libx264"
179
+ roop.globals.video_quality = 18
180
+ roop.globals.max_memory = suggest_max_memory()
181
+ roop.globals.execution_providers = decode_execution_providers(["cuda"])
182
+ roop.globals.execution_threads = suggest_execution_threads()
183
+
184
+ start()
185
+
186
+ # Return swapped frame
187
+ if os.path.exists(output_path):
188
+ return np.array(Image.open(output_path))
189
+ else:
190
+ return frame_rgb
191
+ finally:
192
+ shutil.rmtree(temp_dir)
193
+
194
+ # Video face swap function
195
+ def swap_face_video(reference_face, replacement_face, video_input, similarity_threshold, doFaceEnhancer):
196
+ fa = FaceAnalysis()
197
+ fa.prepare(ctx_id=0)
198
+
199
+ ref_detections = fa.get(reference_face)
200
+ if not ref_detections:
201
+ raise gr.Error("No face detected in the reference image!")
202
+ ref_embedding = ref_detections[0].embedding
203
+
204
+ cap = cv2.VideoCapture(video_input)
205
+ if not cap.isOpened():
206
+ raise gr.Error("Failed to open input video!")
207
+
208
+ fps = cap.get(cv2.CAP_PROP_FPS)
209
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
210
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
211
+
212
+ output_video_path = "temp_faceswap_video.mp4"
213
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
214
+ out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))
215
+
216
+ frame_index = 0
217
+ while True:
218
+ ret, frame = cap.read()
219
+ if not ret:
220
+ break
221
 
222
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
223
+ detections = fa.get(frame_rgb)
224
+ swap_this_frame = any(
225
+ cosine_similarity(det.embedding, ref_embedding) >= similarity_threshold
226
+ for det in detections
227
+ )
228
+
229
+ if swap_this_frame:
230
+ swapped_frame_rgb = swap_face_frame(frame, replacement_face, doFaceEnhancer)
231
+ swapped_frame = cv2.cvtColor(swapped_frame_rgb, cv2.COLOR_RGB2BGR)
232
+ else:
233
+ swapped_frame = frame
234
+
235
+ out.write(swapped_frame)
236
+ frame_index += 1
237
+ print(f"Processed frame {frame_index}")
238
+
239
+ cap.release()
240
+ out.release()
241
+ return output_video_path
242
+
243
+ # Gradio interface
244
  def create_interface():
245
  custom_css = """
246
  .container {
 
256
  }
257
  """
258
  title = "Face - Integrator"
259
+ description = "Upload source and target images to perform face swap."
260
+ article = """
 
 
261
  <div style="text-align: center; max-width: 650px; margin: 40px auto;">
262
+ <p>This tool performs face swapping with optional enhancement.</p>
 
 
263
  </div>
264
  """
265
+
266
  with gr.Blocks(title=title, css=custom_css) as app:
267
  gr.Markdown(f"<h1 style='text-align: center;'>{title}</h1>")
268
  gr.Markdown(description)
269
+
270
  with gr.Tabs():
271
  with gr.TabItem("FaceSwap Image"):
272
  with gr.Row():
273
+ source_image = gr.Image(label="Source Image", type="numpy", sources=["upload"])
274
+ target_image = gr.Image(label="Target Image", type="numpy", sources=["upload"])
275
+ output_image = gr.Image(label="Output Image", type="numpy", interactive=False, elem_classes="output-image")
276
+
277
+ enhance_checkbox = gr.Checkbox(label="Apply Enhancement?", info="Improve image quality", value=False)
278
+ process_btn = gr.Button("Process Face Swap", variant="primary", size="lg")
279
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  process_btn.click(
281
  fn=swap_face,
282
  inputs=[source_image, target_image, enhance_checkbox],
283
  outputs=output_image,
284
+ api_name="swap_face",
285
  )
286
+
287
  with gr.TabItem("FaceSwap Video"):
288
  gr.Markdown("<h2 style='text-align:center;'>FaceSwap Video</h2>")
289
  with gr.Row():
290
+ ref_image = gr.Image(label="Reference Face", type="numpy", sources=["upload"])
291
+ swap_image = gr.Image(label="Replacement Face", type="numpy", sources=["upload"])
292
+ video_input = gr.Video(label="Input Video")
293
+ similarity_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.7, label="Similarity Threshold")
294
+ enhance_checkbox_video = gr.Checkbox(label="Apply Enhancement?", info="Improve image quality", value=False)
295
+ video_output = gr.Video(label="Output Video")
296
+
297
+ process_video_btn = gr.Button("Process FaceSwap Video", variant="primary", size="lg")
298
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
  process_video_btn.click(
300
  fn=swap_face_video,
301
  inputs=[ref_image, swap_image, video_input, similarity_threshold, enhance_checkbox_video],
302
  outputs=video_output,
303
+ api_name="swap_face_video",
304
  )
305
+
306
  gr.Markdown(article)
307
+
308
  return app
309
 
310
+ # Main function
311
  def main():
312
  app = create_interface()
313
  app.launch(share=False)
314
 
315
  if __name__ == "__main__":
316
+ main()