File size: 1,825 Bytes
1580220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# Load model and tokenizer
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForCausalLM.from_pretrained(
    "universitytehran/PersianMind-v1.0",
    torch_dtype=torch.bfloat16,
    low_cpu_mem_usage=True,
    device_map={"": device},
)
tokenizer = AutoTokenizer.from_pretrained("universitytehran/PersianMind-v1.0")

# Conversation template
TEMPLATE = "{context}\nYou: {prompt}\nPersianMind: "
CONTEXT = "This is a conversation with PersianMind. It is an artificial intelligence model designed by a team of " \
    "NLP experts at the University of Tehran to help you with various tasks such as answering questions, " \
    "providing recommendations, and helping with decision making. You can ask it anything you want and " \
    "it will do its best to give you accurate and relevant information."

# Streamlit app
st.title("PersianMind Chat")
st.markdown("Chat with **PersianMind**, an AI model by the University of Tehran.")

# User input
prompt = st.text_input("Enter your question (in Persian):")

if st.button("Get Response"):
    if prompt.strip():
        with st.spinner("Generating response..."):
            model_input = TEMPLATE.format(context=CONTEXT, prompt=prompt)
            input_tokens = tokenizer(model_input, return_tensors="pt").to(device)
            generate_ids = model.generate(**input_tokens, max_new_tokens=512, do_sample=False, repetition_penalty=1.1)
            model_output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
            response = model_output[len(model_input):]
        
        st.text_area("PersianMind's Response:", response, height=200)
    else:
        st.warning("Please enter a question.")