Spaces:
Sleeping
Sleeping
File size: 2,046 Bytes
f279b7c 6cd9f8a 22e1bbd 9674949 22e1bbd 9674949 22e1bbd 416a390 9674949 416a390 fcbb3f1 9674949 fcbb3f1 9674949 76bb2a7 fcbb3f1 22e1bbd fcbb3f1 9674949 76bb2a7 9674949 fcbb3f1 9674949 fcbb3f1 9674949 fcbb3f1 9674949 fcbb3f1 9674949 fcbb3f1 9674949 fcbb3f1 9674949 fcbb3f1 22e1bbd 9674949 0e198ae 9674949 fcac80d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import os
from huggingface_hub import login
# 🔐 Authentification Hugging Face
hf_token = os.environ.get("HUGGINGFACE_API_KEY")
login(hf_token)
from transformers import AutoModelForCausalLM, AutoProcessor
import torch
import gradio as gr
from PIL import Image
model_id = "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1"
# ✅ Chargement du modèle
model = AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
torch_dtype=torch.float16,
device_map="auto"
)
# ✅ Chargement du processor
processor = AutoProcessor.from_pretrained(
model_id,
trust_remote_code=True
)
# 🧠 Fonction principale
def generate_answer(image, question):
print("📥 Question reçue:", question)
print("🖼️ Image présente ?", image is not None)
if not question or question.strip() == "":
question = "Please describe this medical image."
prompt = f"### User: {question}\n### Assistant:"
try:
if image is None:
inputs = processor(prompt, return_tensors="pt").to(model.device)
else:
print("✅ Image fournie, traitement en cours...")
inputs = processor(prompt, images=image, return_tensors="pt").to(model.device)
print("🚀 Génération...")
outputs = model.generate(**inputs, max_new_tokens=256)
decoded = processor.tokenizer.decode(outputs[0], skip_special_tokens=True)
print("✅ Réponse générée.")
return decoded[len(prompt):].strip()
except Exception as e:
print("❌ Exception attrapée :", str(e))
return f"⚠️ Internal Error: {str(e)}"
# 🎛️ Interface Gradio
demo = gr.Interface(
fn=generate_answer,
inputs=[
gr.Image(type="pil", label="🩻 Image médicale (optionnelle)"),
gr.Textbox(label="❓ Votre question médicale")
],
outputs="text",
title="🧠 ContactDoctor - Biomedical LLM",
description="Assistant médical multimodal. Posez une question ou uploadez une image."
)
# 🚀 Lancement
demo.launch()
|