from ultralytics import YOLO from PIL import Image import gradio as gr from huggingface_hub import snapshot_download import os #load model def load_model(repo_id): download_dir = snapshot_download(repo_id) print(download_dir) model_path = os.path.join(download_dir,"best_int8_openvino_model") print(model_path) detection_model = YOLO(model_path, task='detect') return detection_model #object detection for image def predict(pil_img, conf_thresh, iou_thresh): source = pil_img results = detection_model.predict(source, conf=conf_thresh, iou=iou_thresh) annotated_img = results[0].plot() return Image.fromarray(annotated_img[..., ::-1]) # Convert BGR to RGB REPO_ID = "qiqiyuan/glasses_and_mouth" detection_model = load_model(REPO_ID) #gradio interface def iface(): interface = gr.Interface( fn=predict, # Function to be called inputs=[ # List of input components gr.Image(type="pil", label="Input Image"), gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.5, label="Confidence Threshold"), gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.5, label="IoU Threshold") ], outputs=gr.Image(type="pil", label="InputImage"), # Output type title="Object Detection for Glasses and Mouth (Human)", description="Upload an image to detect glasses and mouth (Human) using a pre-trained YOLO model", theme="huggingface", ) return interface #launch the gradio app app_iface = iface() app_iface.launch(share=True)