Spaces:
Sleeping
Sleeping
File size: 5,473 Bytes
888d109 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import streamlit as st
import random
import time
import json
from llm_loader import get_llm_response
from langchain.prompts import PromptTemplate
age = 15
name = 'Fidva'
grade = 10
context = '{context}'
question = '{question}'
DB_FAISS_PATH = 'refbooks-vectorstore/geo-10-1'
JSON_FILE_PATH = 'data/geo-10/geo-chapter-list.json'
with open(JSON_FILE_PATH, 'r') as file:
book_data = json.load(file)
chapter_data = book_data[0]
subtopics = chapter_data['subtopics']
######## Streamlit App ########
st.title("Learn.AI")
# Add custom CSS properties
st.markdown(
"""
<style>
@import url('https://fonts.googleapis.com/css2?family=Sometype+Mono:wght@400;500;600;700&display=swap');
:root {
--color-bg: #03045E;
--color-footer: #0077B6;
--color-component: #00B4D8;
--font-family: 'Sometype Mono', sans-serif;
--font-color: #FAFAFA;
}
body, .stTextArea textarea {
font-family: var(--font-family); !important;
background-color: var(--color-bg);
color: var(--font-color); !important;
}
h1, p, li {
font-family: var(--font-family);
color: var(--font-color);
}
textarea {
font-family: var(--font-family);
color: var(--font-color);
background-color: #FAFAFA;
}
.stApp {
background-color: var(--color-bg);
}
.stChatMessage:nth-child(even) .stMarkdown {
background-color: var(--color-bg);
}
.stChatFloatingInputContainer {
background-color: var(--color-bg);
color: var(--font-color);
}
.st-emotion-cache-1avcm0n ezrtsby2 {
background-color: #050647;
}
.st-emotion-cache-10trblm {
--font-family: 'Sometype Mono', sans-serif;
}
.st-emotion-cache-nahz7x {
--font-family: 'Sometype Mono', sans-serif;
}
</style>
""",
unsafe_allow_html=True
)
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if "lesson_count" not in st.session_state:
st.session_state.lesson_count = 0
# GreetingMessage = f'Hi there, {name}! Let\'s start the lesson! Type \'start\' when you\'re ready to begin!'
# st.session_state.messages.append({"role": "assistant", "content": GreetingMessage})
# Accept user input
if prompt := st.chat_input(f'Hi there, {name}! Let\'s start the lesson! Type \'start\' when you\'re ready to begin!'):
print("TOPIC NAME:",subtopics[st.session_state.lesson_count])
print("USER:",prompt)
if prompt.lower() =='start' or prompt.lower()=='continue' or prompt.lower()=='next':
print("Topic Name:", subtopics[st.session_state.lesson_count])
# Display assistant response in chat message container
with st.chat_message("assistant"):
placeholder_list = ['Writing notes...', 'Revising topic...', 'Clearing blackboard...', 'Formulating Lesson Plan...', 'Getting ready for doubts...']
placeholder_text = random.choice(placeholder_list)
with st.spinner(placeholder_text):
assistant_response = get_llm_response(subtopics[st.session_state.lesson_count], template_type='lesson')
st.session_state.lesson_count += 1
message_placeholder = st.empty()
full_response = ""
# Simulate stream of response with milliseconds delay\
print(assistant_response)
for chunk in assistant_response.split():
full_response += chunk + " "
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "β")
message_placeholder.markdown(assistant_response)
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
else:
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
placeholder_list = ['Thinking...', 'Reading Textbook...', 'Clearing blackboard...', 'Revising Topics...', 'Refilling pen...']
placeholder_text = random.choice(placeholder_list)
with st.spinner(placeholder_text):
assistant_response = get_llm_response(prompt, template_type='user')
message_placeholder = st.empty()
full_response = ""
# Simulate stream of response with milliseconds delay\
print(assistant_response)
for chunk in assistant_response.split():
full_response += chunk + " "
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "β")
message_placeholder.markdown(assistant_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": assistant_response}) |