|
|
|
import os |
|
import subprocess |
|
import sys |
|
import importlib |
|
import pkg_resources |
|
|
|
def install_package(package, version=None): |
|
package_spec = f"{package}=={version}" if version else package |
|
print(f"Installing {package_spec}...") |
|
try: |
|
subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-cache-dir", package_spec]) |
|
except subprocess.CalledProcessError as e: |
|
print(f"Failed to install {package_spec}: {e}") |
|
raise |
|
|
|
def ensure_package(package, version=None): |
|
try: |
|
if version: |
|
pkg_resources.require(f"{package}=={version}") |
|
else: |
|
importlib.import_module(package) |
|
print(f"{package} is already installed with the correct version.") |
|
except (ImportError, pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e: |
|
print(f"Package requirement failed: {e}") |
|
install_package(package, version) |
|
|
|
|
|
if not os.path.exists("/.dockerenv") and not os.path.exists("/kaggle"): |
|
print("Setting up environment...") |
|
|
|
|
|
ensure_package("numpy", "1.23.5") |
|
ensure_package("protobuf", "3.20.3") |
|
ensure_package("tensorflow", "2.10.0") |
|
|
|
|
|
for pkg in ["gradio", "opencv-python-headless", "matplotlib", "pillow", "pandas"]: |
|
ensure_package(pkg) |
|
|
|
|
|
ensure_package("deepface") |
|
|
|
|
|
import gradio as gr |
|
import json |
|
import cv2 |
|
import numpy as np |
|
from PIL import Image |
|
import tempfile |
|
import pandas as pd |
|
import shutil |
|
import matplotlib.pyplot as plt |
|
|
|
|
|
from deepface import DeepFace |
|
|
|
def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"): |
|
temp_dir = tempfile.mkdtemp() |
|
img1_path = os.path.join(temp_dir, "image1.jpg") |
|
img2_path = os.path.join(temp_dir, "image2.jpg") |
|
|
|
if isinstance(img1, np.ndarray): |
|
Image.fromarray(img1).save(img1_path) |
|
else: |
|
img1.save(img1_path) |
|
|
|
if isinstance(img2, np.ndarray): |
|
Image.fromarray(img2).save(img2_path) |
|
else: |
|
img2.save(img2_path) |
|
|
|
try: |
|
result = DeepFace.verify( |
|
img1_path=img1_path, |
|
img2_path=img2_path, |
|
model_name=model, |
|
distance_metric="cosine", |
|
threshold=threshold |
|
) |
|
|
|
fig, ax = plt.subplots(1, 2, figsize=(10, 5)) |
|
|
|
img1_display = cv2.imread(img1_path) |
|
img1_display = cv2.cvtColor(img1_display, cv2.COLOR_BGR2RGB) |
|
img2_display = cv2.imread(img2_path) |
|
img2_display = cv2.cvtColor(img2_display, cv2.COLOR_BGR2RGB) |
|
|
|
ax[0].imshow(img1_display) |
|
ax[0].set_title("Image 1") |
|
ax[0].axis("off") |
|
|
|
ax[1].imshow(img2_display) |
|
ax[1].set_title("Image 2") |
|
ax[1].axis("off") |
|
|
|
verification_result = "β
FACE MATCHED" if result["verified"] else "β FACE NOT MATCHED" |
|
confidence = round((1 - result["distance"]) * 100, 2) |
|
|
|
plt.suptitle(f"{verification_result}\nConfidence: {confidence}%\nDistance: {result['distance']:.4f}", |
|
fontsize=16, fontweight='bold', |
|
color='green' if result["verified"] else 'red') |
|
|
|
plt.tight_layout() |
|
|
|
os.remove(img1_path) |
|
os.remove(img2_path) |
|
os.rmdir(temp_dir) |
|
|
|
return fig, json.dumps(result, indent=2) |
|
|
|
except Exception as e: |
|
if os.path.exists(img1_path): |
|
os.remove(img1_path) |
|
if os.path.exists(img2_path): |
|
os.remove(img2_path) |
|
if os.path.exists(temp_dir): |
|
os.rmdir(temp_dir) |
|
|
|
error_msg = f"Error: {str(e)}" |
|
if "No face detected" in str(e): |
|
error_msg = "No face detected in one or both images. Please try different images." |
|
|
|
return None, error_msg |
|
|
|
def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']): |
|
temp_dir = tempfile.mkdtemp() |
|
img_path = os.path.join(temp_dir, "analyze.jpg") |
|
|
|
if isinstance(img, np.ndarray): |
|
Image.fromarray(img).save(img_path) |
|
else: |
|
img.save(img_path) |
|
|
|
try: |
|
results = DeepFace.analyze( |
|
img_path=img_path, |
|
actions=actions, |
|
enforce_detection=True, |
|
detector_backend='opencv' |
|
) |
|
|
|
if isinstance(results, list): |
|
num_faces = len(results) |
|
else: |
|
num_faces = 1 |
|
results = [results] |
|
|
|
fig = plt.figure(figsize=(14, 7)) |
|
|
|
img_display = cv2.imread(img_path) |
|
img_display = cv2.cvtColor(img_display, cv2.COLOR_BGR2RGB) |
|
|
|
main_ax = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2) |
|
main_ax.imshow(img_display) |
|
main_ax.set_title(f"Analyzed Image ({num_faces} face{'s' if num_faces > 1 else ''} detected)") |
|
main_ax.axis('off') |
|
|
|
for i, face_result in enumerate(results): |
|
if i >= 4: |
|
break |
|
|
|
age = face_result.get('age', 'N/A') |
|
gender = face_result.get('dominant_gender', 'N/A') |
|
race = face_result.get('dominant_race', 'N/A') |
|
emotion = face_result.get('dominant_emotion', 'N/A') |
|
|
|
gender_conf = 'N/A' |
|
if 'gender' in face_result and isinstance(face_result['gender'], dict): |
|
for g, conf in face_result['gender'].items(): |
|
if g.lower() == gender.lower(): |
|
gender_conf = f"{conf:.1f}%" |
|
break |
|
|
|
race_conf = 'N/A' |
|
if 'race' in face_result and isinstance(face_result['race'], dict): |
|
for r, conf in face_result['race'].items(): |
|
if r.lower() == race.lower(): |
|
race_conf = f"{conf:.1f}%" |
|
break |
|
|
|
emotion_conf = 'N/A' |
|
if 'emotion' in face_result and isinstance(face_result['emotion'], dict): |
|
for e, conf in face_result['emotion'].items(): |
|
if e.lower() == emotion.lower(): |
|
emotion_conf = f"{conf:.1f}%" |
|
break |
|
|
|
ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + (i % 2))) |
|
|
|
text = ( |
|
f"Face #{i+1}\n\n" |
|
f"Age: {age}\n\n" |
|
f"Gender: {gender} ({gender_conf})\n\n" |
|
f"Race: {race} ({race_conf})\n\n" |
|
f"Emotion: {emotion} ({emotion_conf})" |
|
) |
|
|
|
ax.text(0.5, 0.5, text, ha='center', va='center', fontsize=11) |
|
ax.axis('off') |
|
|
|
plt.tight_layout() |
|
|
|
os.remove(img_path) |
|
os.rmdir(temp_dir) |
|
|
|
formatted_results = [] |
|
for i, res in enumerate(results[:8]): |
|
face_data = { |
|
"face_number": i+1, |
|
"age": res.get("age", "N/A"), |
|
"gender": { |
|
"dominant": res.get("dominant_gender", "N/A"), |
|
"confidence": res.get("gender", {}) |
|
}, |
|
"race": { |
|
"dominant": res.get("dominant_race", "N/A"), |
|
"confidence": res.get("race", {}) |
|
}, |
|
"emotion": { |
|
"dominant": res.get("dominant_emotion", "N/A"), |
|
"confidence": res.get("emotion", {}) |
|
} |
|
} |
|
formatted_results.append(face_data) |
|
|
|
return fig, formatted_results |
|
|
|
except Exception as e: |
|
if os.path.exists(img_path): |
|
os.remove(img_path) |
|
if os.path.exists(temp_dir): |
|
os.rmdir(temp_dir) |
|
|
|
error_msg = f"Error: {str(e)}" |
|
if "No face detected" in str(e): |
|
error_msg = "No face detected in the image. Please try a different image." |
|
|
|
return None, error_msg |
|
|
|
with gr.Blocks(title="Face Recognition Tool", theme=gr.themes.Soft()) as demo: |
|
gr.Markdown(""" |
|
# π Face Recognition Tool |
|
This tool provides two main features: |
|
- **Verify Faces**: Compare two specific images to check if they contain the same person |
|
- **Analyze Face**: Determine age, gender, race, and emotion from a facial image |
|
""") |
|
|
|
with gr.Tabs(): |
|
with gr.TabItem("Verify Faces"): |
|
with gr.Row(): |
|
img1_input = gr.Image(label="First Image", type="pil") |
|
img2_input = gr.Image(label="Second Image", type="pil") |
|
|
|
with gr.Row(): |
|
verify_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05, |
|
label="Similarity Threshold (lower = stricter matching)") |
|
verify_model = gr.Dropdown( |
|
choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"], |
|
value="VGG-Face", |
|
label="Face Recognition Model" |
|
) |
|
|
|
verify_button = gr.Button("Verify Faces", variant="primary") |
|
|
|
verify_result_plot = gr.Plot(label="Verification Result") |
|
verify_json = gr.JSON(label="Technical Details") |
|
|
|
verify_button.click( |
|
verify_faces, |
|
inputs=[img1_input, img2_input, verify_threshold, verify_model], |
|
outputs=[verify_result_plot, verify_json] |
|
) |
|
|
|
with gr.TabItem("Analyze Face"): |
|
analyze_img = gr.Image(label="Upload Image for Analysis", type="pil") |
|
actions_checkboxes = gr.CheckboxGroup( |
|
choices=["age", "gender", "race", "emotion"], |
|
value=["age", "gender", "race", "emotion"], |
|
label="Select Attributes to Analyze" |
|
) |
|
|
|
analyze_button = gr.Button("Analyze Face", variant="primary") |
|
|
|
analyze_result_plot = gr.Plot(label="Analysis Results") |
|
analyze_json = gr.JSON(label="Detailed Analysis") |
|
|
|
analyze_button.click( |
|
analyze_face, |
|
inputs=[analyze_img, actions_checkboxes], |
|
outputs=[analyze_result_plot, analyze_json] |
|
) |
|
|
|
demo.launch() |