LLMManuscript_mistral / app_arabic.py
NassimeBejaia's picture
Rename app.py to app_arabic.py
fdb5add verified
import streamlit as st
from transformers import pipeline
# Cache the model to load it only once
@st.cache_resource
def load_generator():
return pipeline("text-generation", model="aubmindlab/aragpt2-base", device=-1) # device=-1 forces CPU
# Load the text generation pipeline
generator = load_generator()
# App title
st.title("Arabic Sentence Improver & Chat App")
# Sentence Correction Section
st.subheader("Improve an Arabic Sentence")
user_input = st.text_input("Enter an Arabic sentence to improve:", "أنا ذهبت الحديقة")
if st.button("Improve Sentence"):
if user_input:
# Prompt the model to correct the sentence
prompt = f"Correct this Arabic sentence: '{user_input}' to"
try:
response = generator(prompt, max_new_tokens=50, temperature=0.7)[0]["generated_text"]
# Extract the corrected sentence (assuming it follows "to")
corrected_sentence = response.split("to")[1].strip() if "to" in response else response
st.session_state.corrected_sentence = corrected_sentence
st.success(f"Improved Sentence: {corrected_sentence}")
except Exception as e:
st.error(f"Error: {str(e)}")
else:
st.warning("Please enter a sentence first!")
# Chat Section
st.subheader("Chat About the Corrected Sentence")
if "corrected_sentence" in st.session_state:
chat_input = st.text_input("Ask something about the corrected sentence:", key="chat_input")
if st.button("Send"):
if chat_input:
# Prompt the model with context for chatting
prompt = f"The corrected sentence is: '{st.session_state.corrected_sentence}'. User asks: '{chat_input}'"
try:
response = generator(prompt, max_new_tokens=100, temperature=0.7)[0]["generated_text"]
st.write(f"**You:** {chat_input}")
st.write(f"**LLM:** {response}")
except Exception as e:
st.error(f"Error in chat: {str(e)}")
else:
st.warning("Please enter a question!")
else:
st.write("Improve a sentence first to start chatting!")