Spaces:
Sleeping
Sleeping
import streamlit as st | |
import random | |
import time | |
import json | |
from llm_loader import get_llm_response | |
from langchain.prompts import PromptTemplate | |
age = 15 | |
name = 'Fidva' | |
grade = 10 | |
context = '{context}' | |
question = '{question}' | |
DB_FAISS_PATH = 'refbooks-vectorstore/geo-10-1' | |
JSON_FILE_PATH = 'data/geo-10/geo-chapter-list.json' | |
with open(JSON_FILE_PATH, 'r') as file: | |
book_data = json.load(file) | |
chapter_data = book_data[0] | |
subtopics = chapter_data['subtopics'] | |
######## Streamlit App ######## | |
st.title("Learn.AI") | |
# Add custom CSS properties | |
st.markdown( | |
""" | |
<style> | |
@import url('https://fonts.googleapis.com/css2?family=Sometype+Mono:wght@400;500;600;700&display=swap'); | |
:root { | |
--color-bg: #03045E; | |
--color-footer: #0077B6; | |
--color-component: #00B4D8; | |
--font-family: 'Sometype Mono', sans-serif; | |
--font-color: #FAFAFA; | |
} | |
body, .stTextArea textarea { | |
font-family: var(--font-family); !important; | |
background-color: var(--color-bg); | |
color: var(--font-color); !important; | |
} | |
h1, p, li { | |
font-family: var(--font-family); | |
color: var(--font-color); | |
} | |
textarea { | |
font-family: var(--font-family); | |
color: var(--font-color); | |
background-color: #FAFAFA; | |
} | |
.stApp { | |
background-color: var(--color-bg); | |
} | |
.stChatMessage:nth-child(even) .stMarkdown { | |
background-color: var(--color-bg); | |
} | |
.stChatFloatingInputContainer { | |
background-color: var(--color-bg); | |
color: var(--font-color); | |
} | |
.st-emotion-cache-1avcm0n ezrtsby2 { | |
background-color: #050647; | |
} | |
.st-emotion-cache-10trblm { | |
--font-family: 'Sometype Mono', sans-serif; | |
} | |
.st-emotion-cache-nahz7x { | |
--font-family: 'Sometype Mono', sans-serif; | |
} | |
</style> | |
""", | |
unsafe_allow_html=True | |
) | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
if "lesson_count" not in st.session_state: | |
st.session_state.lesson_count = 0 | |
# GreetingMessage = f'Hi there, {name}! Let\'s start the lesson! Type \'start\' when you\'re ready to begin!' | |
# st.session_state.messages.append({"role": "assistant", "content": GreetingMessage}) | |
# Accept user input | |
if prompt := st.chat_input(f'Hi there, {name}! Let\'s start the lesson! Type \'start\' when you\'re ready to begin!'): | |
print("TOPIC NAME:",subtopics[st.session_state.lesson_count]) | |
print("USER:",prompt) | |
if prompt.lower() =='start' or prompt.lower()=='continue' or prompt.lower()=='next': | |
print("Topic Name:", subtopics[st.session_state.lesson_count]) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
placeholder_list = ['Writing notes...', 'Revising topic...', 'Clearing blackboard...', 'Formulating Lesson Plan...', 'Getting ready for doubts...'] | |
placeholder_text = random.choice(placeholder_list) | |
with st.spinner(placeholder_text): | |
assistant_response = get_llm_response(subtopics[st.session_state.lesson_count], template_type='lesson') | |
st.session_state.lesson_count += 1 | |
message_placeholder = st.empty() | |
full_response = "" | |
# Simulate stream of response with milliseconds delay\ | |
print(assistant_response) | |
for chunk in assistant_response.split(): | |
full_response += chunk + " " | |
time.sleep(0.05) | |
# Add a blinking cursor to simulate typing | |
message_placeholder.markdown(full_response + "β") | |
message_placeholder.markdown(assistant_response) | |
st.session_state.messages.append({"role": "assistant", "content": assistant_response}) | |
else: | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
placeholder_list = ['Thinking...', 'Reading Textbook...', 'Clearing blackboard...', 'Revising Topics...', 'Refilling pen...'] | |
placeholder_text = random.choice(placeholder_list) | |
with st.spinner(placeholder_text): | |
assistant_response = get_llm_response(prompt, template_type='user') | |
message_placeholder = st.empty() | |
full_response = "" | |
# Simulate stream of response with milliseconds delay\ | |
print(assistant_response) | |
for chunk in assistant_response.split(): | |
full_response += chunk + " " | |
time.sleep(0.05) | |
# Add a blinking cursor to simulate typing | |
message_placeholder.markdown(full_response + "β") | |
message_placeholder.markdown(assistant_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": assistant_response}) |