Spaces:
Running
Running
import os | |
import subprocess | |
import sys | |
import pkg_resources | |
import warnings | |
warnings.filterwarnings("ignore") | |
def install_package(package, version=None): | |
package_spec = f"{package}=={version}" if version else package | |
print(f"Installing {package_spec}...") | |
try: | |
subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-cache-dir", package_spec]) | |
except subprocess.CalledProcessError as e: | |
print(f"Failed to install {package_spec}: {e}") | |
raise | |
# Required packages | |
required_packages = { | |
"mediapipe": None, | |
"tensorflow": None, | |
"opencv-python-headless": None, | |
"gradio": None, | |
"Pillow": None, | |
"numpy": None | |
} | |
installed_packages = {pkg.key for pkg in pkg_resources.working_set} | |
for package, version in required_packages.items(): | |
if package not in installed_packages: | |
install_package(package, version) | |
import numpy as np | |
import tensorflow as tf | |
import cv2 | |
import mediapipe as mp | |
import gradio as gr | |
from PIL import Image | |
# Hand Tracker class - using the provided implementation | |
class handTracker(): | |
def __init__(self, mode=False, maxHands=2, modelComplexity=1, | |
detectionConfidence=0.5, trackConfidence=0.5): | |
self.mode = mode | |
self.maxHands = maxHands | |
self.modelComplexity = modelComplexity | |
self.detectionConfidence = detectionConfidence | |
self.trackConfidence = trackConfidence | |
self.mpHands = mp.solutions.hands | |
self.hands = self.mpHands.Hands( | |
static_image_mode=self.mode, | |
max_num_hands=self.maxHands, | |
model_complexity=self.modelComplexity, | |
min_detection_confidence=self.detectionConfidence, | |
min_tracking_confidence=self.trackConfidence) | |
self.mpDraw = mp.solutions.drawing_utils | |
self.mpDrawStyles = mp.solutions.drawing_styles | |
def findAndDrawHands(self, frame): | |
RGBimage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
self.results = self.hands.process(RGBimage) | |
if self.results.multi_hand_landmarks: | |
for handLms in self.results.multi_hand_landmarks: | |
self.mpDraw.draw_landmarks( | |
frame, | |
handLms, | |
self.mpHands.HAND_CONNECTIONS, | |
self.mpDrawStyles.get_default_hand_landmarks_style(), | |
self.mpDrawStyles.get_default_hand_connections_style()) | |
return frame | |
def findLandmarks(self, frame, handNo=0): | |
landmarkList = [] | |
x_list = [] | |
y_list = [] | |
bbox = [] | |
if self.results.multi_hand_landmarks: | |
if handNo < len(self.results.multi_hand_landmarks): | |
myHand = self.results.multi_hand_landmarks[handNo] | |
for id, lm in enumerate(myHand.landmark): | |
h, w, c = frame.shape | |
cx, cy = int(lm.x * w), int(lm.y * h) | |
x_list.append(cx) | |
y_list.append(cy) | |
landmarkList.append([id, cx, cy]) | |
if x_list and y_list: | |
xmin, xmax = min(x_list), max(x_list) | |
ymin, ymax = min(y_list), max(y_list) | |
padding = 20 | |
xmin = max(0, xmin - padding) | |
ymin = max(0, ymin - padding) | |
boxW = min(w - xmin, xmax - xmin + 2*padding) | |
boxH = min(h - ymin, ymax - ymin + 2*padding) | |
if boxW > boxH: | |
diff = boxW - boxH | |
ymin = max(0, ymin - diff//2) | |
boxH = min(h - ymin, boxW) | |
elif boxH > boxW: | |
diff = boxH - boxW | |
xmin = max(0, xmin - diff//2) | |
boxW = min(w - xmin, boxH) | |
bbox = [xmin, ymin, boxW, boxH] | |
return landmarkList, bbox | |
# Model loading with compatibility handling | |
def load_model_with_compatibility(model_path): | |
try: | |
model = tf.keras.models.load_model(model_path) | |
print("β Model loaded successfully") | |
return model | |
except Exception as e: | |
print(f"Standard loading failed: {str(e)}") | |
try: | |
class CustomDepthwiseConv2D(tf.keras.layers.DepthwiseConv2D): | |
def __init__(self, **kwargs): | |
if 'groups' in kwargs: | |
del kwargs['groups'] | |
super(CustomDepthwiseConv2D, self).__init__(**kwargs) | |
custom_objects = {'DepthwiseConv2D': CustomDepthwiseConv2D} | |
model = tf.keras.models.load_model( | |
model_path, | |
custom_objects=custom_objects, | |
compile=False | |
) | |
print("β Model loaded in compatibility mode") | |
return model | |
except Exception as e2: | |
print(f"Compatibility loading failed: {str(e2)}") | |
return create_simple_asl_model() | |
def create_simple_asl_model(): | |
labels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', | |
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', | |
'T', 'U', 'V', 'W', 'X', 'Y'] | |
print("Creating a new compatible model...") | |
model = tf.keras.Sequential([ | |
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(224, 224, 3)), | |
tf.keras.layers.MaxPooling2D((2, 2)), | |
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), | |
tf.keras.layers.MaxPooling2D((2, 2)), | |
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), | |
tf.keras.layers.Flatten(), | |
tf.keras.layers.Dense(128, activation='relu'), | |
tf.keras.layers.Dropout(0.5), | |
tf.keras.layers.Dense(len(labels), activation='softmax') | |
]) | |
model.compile(optimizer='adam', | |
loss='sparse_categorical_crossentropy', | |
metrics=['accuracy']) | |
return model | |
model_path = "keras_model.h5" | |
model = load_model_with_compatibility(model_path) | |
model_input_shape = (224, 224, 3) | |
labels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', | |
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', | |
'T', 'U', 'V', 'W', 'X', 'Y'] | |
def preprocess_hand_roi(hand_roi, target_shape): | |
if target_shape[2] == 3: | |
if len(hand_roi.shape) == 2 or hand_roi.shape[2] == 1: | |
hand_roi_rgb = cv2.cvtColor(hand_roi, cv2.COLOR_GRAY2RGB) | |
else: | |
hand_roi_rgb = hand_roi.copy() | |
resized = cv2.resize(hand_roi_rgb, (target_shape[0], target_shape[1])) | |
normalized = resized.astype('float32') / 255.0 | |
else: | |
if len(hand_roi.shape) > 2 and hand_roi.shape[2] > 1: | |
hand_roi_gray = cv2.cvtColor(hand_roi, cv2.COLOR_BGR2GRAY) | |
else: | |
hand_roi_gray = hand_roi | |
resized = cv2.resize(hand_roi_gray, (target_shape[0], target_shape[1])) | |
normalized = resized.astype('float32') / 255.0 | |
if len(normalized.shape) == 2: | |
normalized = normalized[..., np.newaxis] | |
return np.expand_dims(normalized, axis=0), resized | |
def process_image(input_image): | |
frame = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR) | |
tracker = handTracker(detectionConfidence=0.7) | |
frame_with_hands = tracker.findAndDrawHands(frame.copy()) | |
landmarks, bbox = tracker.findLandmarks(frame) | |
if not bbox: | |
return "No hand detected", None | |
x, y, w, h = bbox | |
hand_roi = frame[y:y+h, x:x+w] | |
cv2.rectangle(frame_with_hands, (x, y), (x+w, y+h), (0, 255, 0), 2) | |
model_input, _ = preprocess_hand_roi(hand_roi, model_input_shape) | |
try: | |
prediction = model.predict(model_input, verbose=0)[0] | |
predicted_class = np.argmax(prediction) | |
confidence = np.max(prediction) | |
letter = labels[predicted_class] if predicted_class < len(labels) else "Unknown" | |
except: | |
return "Prediction error", None | |
result_text = f"Prediction: {letter} (Confidence: {confidence:.2f})" | |
cv2.putText(frame_with_hands, result_text, (10, 30), | |
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) | |
output_image = cv2.cvtColor(frame_with_hands, cv2.COLOR_BGR2RGB) | |
return result_text, Image.fromarray(output_image) | |
# Gradio interface | |
interface = gr.Interface( | |
fn=process_image, | |
inputs=gr.Image(label="Upload Hand Sign Image", type="pil"), | |
outputs=[ | |
gr.Text(label="Prediction Result"), | |
gr.Image(label="Processed Image") | |
], | |
title="ASL Sign Language Recognition", | |
description="Upload an image of a hand sign to recognize the ASL letter." | |
) | |
if __name__ == "__main__": | |
interface.launch(share=True) | |