import streamlit as st import random import time import json from llm_loader import get_llm_response from langchain.prompts import PromptTemplate age = 15 name = 'Fidva' grade = 10 context = '{context}' question = '{question}' DB_FAISS_PATH = 'refbooks-vectorstore/geo-10-1' JSON_FILE_PATH = 'data/geo-10/geo-chapter-list.json' with open(JSON_FILE_PATH, 'r') as file: book_data = json.load(file) chapter_data = book_data[0] subtopics = chapter_data['subtopics'] ######## Streamlit App ######## st.title("Learn.AI") # Add custom CSS properties st.markdown( """ """, unsafe_allow_html=True ) # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) if "lesson_count" not in st.session_state: st.session_state.lesson_count = 0 # GreetingMessage = f'Hi there, {name}! Let\'s start the lesson! Type \'start\' when you\'re ready to begin!' # st.session_state.messages.append({"role": "assistant", "content": GreetingMessage}) # Accept user input if prompt := st.chat_input(f'Hi there, {name}! Let\'s start the lesson! Type \'start\' when you\'re ready to begin!'): print("TOPIC NAME:",subtopics[st.session_state.lesson_count]) print("USER:",prompt) if prompt.lower() =='start' or prompt.lower()=='continue' or prompt.lower()=='next': print("Topic Name:", subtopics[st.session_state.lesson_count]) # Display assistant response in chat message container with st.chat_message("assistant"): placeholder_list = ['Writing notes...', 'Revising topic...', 'Clearing blackboard...', 'Formulating Lesson Plan...', 'Getting ready for doubts...'] placeholder_text = random.choice(placeholder_list) with st.spinner(placeholder_text): assistant_response = get_llm_response(subtopics[st.session_state.lesson_count], template_type='lesson') st.session_state.lesson_count += 1 message_placeholder = st.empty() full_response = "" # Simulate stream of response with milliseconds delay\ print(assistant_response) for chunk in assistant_response.split(): full_response += chunk + " " time.sleep(0.05) # Add a blinking cursor to simulate typing message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(assistant_response) st.session_state.messages.append({"role": "assistant", "content": assistant_response}) else: # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Display assistant response in chat message container with st.chat_message("assistant"): placeholder_list = ['Thinking...', 'Reading Textbook...', 'Clearing blackboard...', 'Revising Topics...', 'Refilling pen...'] placeholder_text = random.choice(placeholder_list) with st.spinner(placeholder_text): assistant_response = get_llm_response(prompt, template_type='user') message_placeholder = st.empty() full_response = "" # Simulate stream of response with milliseconds delay\ print(assistant_response) for chunk in assistant_response.split(): full_response += chunk + " " time.sleep(0.05) # Add a blinking cursor to simulate typing message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(assistant_response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": assistant_response})