|
import streamlit as st |
|
import tensorflow as tf |
|
from tensorflow.keras.layers import InputLayer |
|
from PIL import Image |
|
import numpy as np |
|
|
|
from gtts import gTTS |
|
from transformers import BioGptTokenizer, AutoModelForCausalLM, pipeline, TFAutoModel |
|
from deep_translator import GoogleTranslator, MyMemoryTranslator |
|
import tempfile |
|
import os |
|
|
|
|
|
st.set_page_config(page_title="Medical Image Classifier & Chatbot", layout="wide") |
|
st.title("Medical Image Classifier & Chatbot") |
|
st.sidebar.header("Medical Analysis") |
|
keys={'0':'Cyst', '1':'Normal', '2':'Stone', '3':'Tumor'} |
|
|
|
def translate_text(text, source='auto', target='es'): |
|
translators = [GoogleTranslator, MyMemoryTranslator] |
|
|
|
for translator in translators: |
|
try: |
|
translation = translator(source=source, target=target).translate(text) |
|
print(f"{translator.__name__}: {translation}") |
|
return translation |
|
except Exception as e: |
|
print(f"{translator.__name__} failed: {e}") |
|
|
|
print("All translators failed. No translation found.") |
|
return None |
|
|
|
def Analysing_image(st, model, image_file): |
|
try: |
|
|
|
image = Image.open(image_file) |
|
st.image(image, caption="Uploaded Image", use_container_width=True) |
|
|
|
|
|
|
|
img_resized = image.resize((150, 150)) |
|
img_array = np.array(img_resized).astype('float32') / 255.0 |
|
|
|
|
|
if img_array.ndim == 2: |
|
img_array = np.stack((img_array,)*3, axis=-1) |
|
elif img_array.shape[2] == 1: |
|
img_array = np.concatenate([img_array]*3, axis=-1) |
|
|
|
|
|
img_batch = np.expand_dims(img_array, axis=0) |
|
|
|
|
|
predictions = model.predict(img_batch) |
|
|
|
st.write("Prediction probabilities:", predictions) |
|
|
|
|
|
predicted_class = np.argmax(predictions, axis=1) |
|
st.write("Prediction :", keys[str(predicted_class[0])]) |
|
return keys[str(predicted_class[0])] |
|
except: |
|
return None |
|
|
|
|
|
def text_to_speech(text): |
|
tts = gTTS(text) |
|
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") |
|
tts.save(temp_file.name) |
|
return temp_file.name |
|
|
|
@st.cache_resource |
|
def load_generator(): |
|
|
|
tokenizer = BioGptTokenizer.from_pretrained("microsoft/BioGPT") |
|
model = AutoModelForCausalLM.from_pretrained("microsoft/BioGPT") |
|
return model, tokenizer |
|
|
|
@st.cache_resource |
|
def load_summarizer(): |
|
|
|
summarizer = pipeline("summarization", model="facebook/bart-large-cnn") |
|
return summarizer |
|
|
|
|
|
generator = pipeline("text2text-generation", model="EleutherAI/gpt-neo-125M") |
|
summarizer = load_summarizer() |
|
translator = pipeline("translation_en_to_de", model="Helsinki-NLP/opus-mt-en-de") |
|
|
|
image_file = st.sidebar.file_uploader("Upload an Image (.jpg, .jpeg, .png)", type=["jpg", "jpeg", "png"]) |
|
|
|
tab1, tab2, tab3, tab4 = st.tabs(["Classification", "Chatbot", "Translation & Summary", "Audio"]) |
|
predict_class = None |
|
Summary = None |
|
|
|
with tab1: |
|
|
|
|
|
model_file = st.sidebar.file_uploader("Upload your Keras model (.h5 file)", type=["h5"]) |
|
|
|
if model_file is not None: |
|
|
|
with open("uploaded_model.h5", "wb") as f: |
|
f.write(model_file.getbuffer()) |
|
st.sidebar.success("Model uploaded successfully!") |
|
|
|
|
|
try: |
|
model = tf.keras.models.load_model("uploaded_model.h5") |
|
st.sidebar.info("Model loaded successfully!") |
|
except Exception as e: |
|
st.sidebar.error("Error loading model: " + str(e)) |
|
st.stop() |
|
|
|
|
|
if image_file is not None: |
|
predict_class = Analysing_image(st, model, image_file) |
|
else: |
|
st.info("Please upload an image to classify.") |
|
else: |
|
st.info("Using Pretrained model") |
|
model = tf.keras.models.load_model("./models/medical_classifier/medical_classifier.h5") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if image_file is not None: |
|
predict_class = Analysing_image(st, model, image_file) |
|
else: |
|
st.info("Please upload an image to classify.") |
|
|
|
|
|
with tab2: |
|
if predict_class is not None: |
|
|
|
if predict_class == 'Normal': |
|
prompt = f"What does it mean when the doctor say my IRM is Normal ?" |
|
else: |
|
if predict_class == "Cyst": |
|
prompt = f"What is brain cystic lesion ?" |
|
else: |
|
prompt = f"What is brain {predict_class} ?" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
user_input = st.text_area("Enter your prompt:", prompt) |
|
|
|
max_length = st.slider("Max length of generated text", 50, 500, 100) |
|
|
|
if st.button("Generate Text"): |
|
with st.spinner("Generating text..."): |
|
|
|
output = generator(user_input, max_length=max_length, num_return_sequences=1) |
|
generated_text = output[0]['generated_text'] |
|
|
|
st.subheader("Description:") |
|
st.write(generated_text) |
|
|
|
st.session_state.chat_response = generated_text |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with tab3: |
|
st.header("Translation to German & Summary") |
|
if 'chat_response' in st.session_state and st.session_state.chat_response: |
|
medical_terms = st.session_state.chat_response |
|
|
|
|
|
|
|
|
|
|
|
translation = translator(medical_terms) |
|
outputs = 'Unable to translate, please Retry ...' |
|
if translation: |
|
outputs = translation[0]['translation_text'] |
|
|
|
|
|
else: |
|
st.info("Unable to translate the text. Please try to refresh") |
|
if st.button('Refresh'): |
|
outputs = translator(prompt, target='de') |
|
outputs = outputs[0]['translation_text'] |
|
|
|
|
|
|
|
st.write("German Translation:", outputs) |
|
|
|
if st.button("Generate Summary"): |
|
if outputs != 'Unable to translate, please Retry ...': |
|
|
|
|
|
prompt = outputs |
|
|
|
with st.spinner("Generating summary..."): |
|
|
|
summary = summarizer(prompt, max_length=85, min_length=60, do_sample=False) |
|
|
|
st.subheader("Generated Summary:") |
|
st.write(summary[0]['summary_text']) |
|
else: |
|
st.warning("Please enter a medical term.") |
|
|
|
else: |
|
st.info("No chatbot response available for translation and summary.") |
|
|
|
|
|
|
|
with tab4: |
|
st.header("Audio Output") |
|
if 'chat_response' in st.session_state and st.session_state.chat_response: |
|
|
|
audio_file = text_to_speech(st.session_state.chat_response) |
|
st.audio(audio_file) |
|
|
|
with open(audio_file, "rb") as file: |
|
btn = st.download_button( |
|
label="Download audio", |
|
data=file, |
|
file_name="chat_response.mp3", |
|
mime="audio/mpeg" |
|
) |
|
os.unlink(audio_file) |
|
else: |
|
st.info("No chatbot response available for audio conversion.") |
|
|
|
|
|
|
|
print("Streamlit app updated with translation, summarization, and audio features.") |