Spaces:
Running
Running
Commit
·
8fd8ae7
1
Parent(s):
5e15d66
add database memory
Browse files- bot/panda_bot.py +39 -9
- database/__init__.py +40 -0
- database/database_util.py +13 -0
- requirements.txt +0 -0
- server.py +15 -4
bot/panda_bot.py
CHANGED
@@ -24,9 +24,14 @@ Patient Care Focus: As an assistant, Panda should regularly check in on the pati
|
|
24 |
Patient Information:
|
25 |
|
26 |
- Patient Name: '{username}'
|
27 |
-
- Hemodialysis Schedule
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
Panda should always remember this schedule and provide reminders or updates when relevant.
|
30 |
|
31 |
---
|
32 |
|
@@ -71,6 +76,8 @@ Goal: Your goal is to ensure the patient feels well-informed, safe, and supporte
|
|
71 |
|
72 |
---
|
73 |
|
|
|
|
|
74 |
Begin!
|
75 |
|
76 |
Previous conversation history:
|
@@ -106,6 +113,13 @@ class CustomPromptTemplate(StringPromptTemplate):
|
|
106 |
|
107 |
tools_getter: List[StructuredTool]
|
108 |
username: str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
def format(self, **kwargs) -> str:
|
111 |
# Get the intermediate steps (AgentAction, Observation tuples)
|
@@ -125,6 +139,13 @@ class CustomPromptTemplate(StringPromptTemplate):
|
|
125 |
# Create a list of tool names for the tools provided
|
126 |
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools_getter])
|
127 |
kwargs["username"] = self.username
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
return self.template.format(**kwargs)
|
130 |
|
@@ -163,26 +184,35 @@ class CustomOutputParser(AgentOutputParser):
|
|
163 |
class PandaBot:
|
164 |
def __init__(self, memory=None):
|
165 |
# Initialize memory if provided, otherwise create a new one
|
166 |
-
self.memory =
|
167 |
# Initialize LLM, prompt, output parser, and tool list
|
168 |
self.llm = Mistral()
|
169 |
self.tools_list = [send_emergency_message_to_medic, search_information_for_question]
|
170 |
self.tool_names = [i.name for i in self.tools_list]
|
171 |
self.output_parser = CustomOutputParser()
|
172 |
os.environ["SERPER_API_KEY"] = 'f90fe84e78ef9d2d8e377ab5c6fe3a4a25f42ef0'
|
173 |
-
os.environ["LANGCHAIN_TRACING_V2"] = 'true'
|
174 |
-
os.environ["LANGCHAIN_ENDPOINT"] = 'https://api.smith.langchain.com'
|
175 |
-
os.environ["LANGCHAIN_API_KEY"] = 'ls__413a7563fa034592be6c6a241176932a'
|
176 |
-
os.environ["LANGCHAIN_PROJECT"] = 'LLM Patient Monitoring'
|
177 |
|
178 |
|
179 |
-
def query(self, input_text: str, username: str) -> str:
|
180 |
prompt = CustomPromptTemplate(
|
181 |
input_variables=["input", "intermediate_steps", "chat_history"],
|
182 |
template=template,
|
183 |
validate_template=False,
|
184 |
tools_getter=self.tools_list,
|
185 |
-
username=username
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
|
187 |
self.llm_chains = LLMChain(llm=self.llm, prompt=prompt)
|
188 |
|
|
|
24 |
Patient Information:
|
25 |
|
26 |
- Patient Name: '{username}'
|
27 |
+
- Hemodialysis Schedule 1: {schedule_one}
|
28 |
+
- Hemodialysis Schedule 2: {schedule_two}
|
29 |
+
- Liquid Intake: {liquid_intake}
|
30 |
+
- temperature: {temperature}
|
31 |
+
- blood_pressure: {blood_pressure}
|
32 |
+
- food_intake: {food_intake}
|
33 |
|
34 |
+
Panda should always remember this schedule and provide reminders or updates when relevant. Your current time is {current_time}.
|
35 |
|
36 |
---
|
37 |
|
|
|
76 |
|
77 |
---
|
78 |
|
79 |
+
|
80 |
+
|
81 |
Begin!
|
82 |
|
83 |
Previous conversation history:
|
|
|
113 |
|
114 |
tools_getter: List[StructuredTool]
|
115 |
username: str
|
116 |
+
schedule_one: str
|
117 |
+
schedule_two: str
|
118 |
+
liquid_intake: str
|
119 |
+
temperature: str
|
120 |
+
blood_pressure: str
|
121 |
+
food_intake: str
|
122 |
+
current_time: str
|
123 |
|
124 |
def format(self, **kwargs) -> str:
|
125 |
# Get the intermediate steps (AgentAction, Observation tuples)
|
|
|
139 |
# Create a list of tool names for the tools provided
|
140 |
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools_getter])
|
141 |
kwargs["username"] = self.username
|
142 |
+
kwargs["schedule_one"] = self.schedule_one
|
143 |
+
kwargs["schedule_two"] = self.schedule_two
|
144 |
+
kwargs["liquid_intake"] = self.liquid_intake
|
145 |
+
kwargs["temperature"] = self.temperature
|
146 |
+
kwargs["blood_pressure"] = self.blood_pressure
|
147 |
+
kwargs["food_intake"] = self.food_intake
|
148 |
+
kwargs["current_time"] = self.current_time
|
149 |
|
150 |
return self.template.format(**kwargs)
|
151 |
|
|
|
184 |
class PandaBot:
|
185 |
def __init__(self, memory=None):
|
186 |
# Initialize memory if provided, otherwise create a new one
|
187 |
+
self.memory = None
|
188 |
# Initialize LLM, prompt, output parser, and tool list
|
189 |
self.llm = Mistral()
|
190 |
self.tools_list = [send_emergency_message_to_medic, search_information_for_question]
|
191 |
self.tool_names = [i.name for i in self.tools_list]
|
192 |
self.output_parser = CustomOutputParser()
|
193 |
os.environ["SERPER_API_KEY"] = 'f90fe84e78ef9d2d8e377ab5c6fe3a4a25f42ef0'
|
194 |
+
# os.environ["LANGCHAIN_TRACING_V2"] = 'true'
|
195 |
+
# os.environ["LANGCHAIN_ENDPOINT"] = 'https://api.smith.langchain.com'
|
196 |
+
# os.environ["LANGCHAIN_API_KEY"] = 'ls__413a7563fa034592be6c6a241176932a'
|
197 |
+
# os.environ["LANGCHAIN_PROJECT"] = 'LLM Patient Monitoring'
|
198 |
|
199 |
|
200 |
+
def query(self, input_text: str, username: str, memory: any, schedule_one:any, schedule_two:any, liquid_intake:any, temperature:any, blood_pressure:any, food_intake:any, current_time:any) -> str:
|
201 |
prompt = CustomPromptTemplate(
|
202 |
input_variables=["input", "intermediate_steps", "chat_history"],
|
203 |
template=template,
|
204 |
validate_template=False,
|
205 |
tools_getter=self.tools_list,
|
206 |
+
username=username,
|
207 |
+
schedule_one=schedule_one,
|
208 |
+
schedule_two=schedule_two,
|
209 |
+
liquid_intake=liquid_intake,
|
210 |
+
temperature=temperature,
|
211 |
+
blood_pressure=blood_pressure,
|
212 |
+
food_intake=food_intake,
|
213 |
+
current_time=current_time)
|
214 |
+
|
215 |
+
self.memory = memory
|
216 |
|
217 |
self.llm_chains = LLMChain(llm=self.llm, prompt=prompt)
|
218 |
|
database/__init__.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from supabase import create_client
|
2 |
+
|
3 |
+
# Supabase Database URL & API Key (from Supabase dashboard)
|
4 |
+
SUPABASE_URL = "https://ixugilhodhqmdedbzoaq.supabase.co"
|
5 |
+
SUPABASE_KEY = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Iml4dWdpbGhvZGhxbWRlZGJ6b2FxIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDIyMTg0MzIsImV4cCI6MjA1Nzc5NDQzMn0.YHD5VBaUKLHieIG7KsI3ZHBCeSBli27F2lOdNrQCfck"
|
6 |
+
|
7 |
+
# Connect to Supabase
|
8 |
+
supabase = create_client(SUPABASE_URL, SUPABASE_KEY)
|
9 |
+
|
10 |
+
# PostgreSQL connection string for LangChain
|
11 |
+
DB_URI = f"postgresql://postgres.ixugilhodhqmdedbzoaq:[email protected]:5432/postgres"
|
12 |
+
|
13 |
+
|
14 |
+
# # Function to get user-specific chat memory stored in Supabase
|
15 |
+
# def get_user_memory(user_id):
|
16 |
+
# return ConversationBufferMemory(
|
17 |
+
# chat_memory=SQLChatMessageHistory(
|
18 |
+
# connection_string=DB_URI,
|
19 |
+
# session_id=user_id
|
20 |
+
# ),
|
21 |
+
# return_messages=True # FIX: Prevent duplicate storage
|
22 |
+
# )
|
23 |
+
|
24 |
+
# # Example Usage
|
25 |
+
# if __name__ == "__main__":
|
26 |
+
# user_id = "user_meh12312313123"
|
27 |
+
|
28 |
+
# #Create new user memory if not exists
|
29 |
+
|
30 |
+
# memory = ConversationBufferMemory( chat_memory=SQLChatMessageHistory(
|
31 |
+
# connection_string=DB_URI,
|
32 |
+
# session_id=user_id
|
33 |
+
# ),
|
34 |
+
# return_messages=False )
|
35 |
+
|
36 |
+
# memory.save_context({"input": "Hello, Supabase!"}, {"output": "Hello! How can I assist you?"})
|
37 |
+
|
38 |
+
# memory = get_user_memory('user_meh12312313123')
|
39 |
+
|
40 |
+
# print(memory.load_memory_variables({}))
|
database/database_util.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.memory import ConversationBufferMemory
|
2 |
+
from langchain.memory.chat_message_histories import SQLChatMessageHistory
|
3 |
+
from database import DB_URI
|
4 |
+
|
5 |
+
def get_user_memory(user_id):
|
6 |
+
# Check if the user exists in the database, if not, create new memory
|
7 |
+
return ConversationBufferMemory(
|
8 |
+
chat_memory=SQLChatMessageHistory(
|
9 |
+
connection_string=DB_URI,
|
10 |
+
session_id=user_id,
|
11 |
+
),memory_key="chat_history",
|
12 |
+
return_messages=True # FIX: Prevent duplicate storage
|
13 |
+
)
|
requirements.txt
CHANGED
Binary files a/requirements.txt and b/requirements.txt differ
|
|
server.py
CHANGED
@@ -4,7 +4,8 @@ from flask import Flask, request, jsonify
|
|
4 |
# from langchain_core.messages import HumanMessage, AIMessage
|
5 |
# from langchain.prompts import ChatPromptTemplate
|
6 |
from deep_translator import GoogleTranslator
|
7 |
-
|
|
|
8 |
|
9 |
app = Flask(__name__)
|
10 |
|
@@ -42,26 +43,36 @@ Output:
|
|
42 |
# llm.memory.chat_memory.add_ai_message(AIMessage(response))
|
43 |
# return jsonify({"response": translate})
|
44 |
|
45 |
-
def simulate_llm_query(user_input, username):
|
46 |
"""
|
47 |
Simulates querying a language model.
|
48 |
Replace this function's logic with actual LLM querying.
|
49 |
"""
|
50 |
# Placeholder response logic, replace with actual LLM integration
|
51 |
-
|
|
|
|
|
|
|
|
|
52 |
|
53 |
@app.route('/query', methods=['POST'])
|
54 |
def query_llm():
|
55 |
data = request.json
|
56 |
user_input = data.get('input')
|
57 |
username = data.get('username')
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
input_translate = GoogleTranslator(source='id', target='en').translate(user_input)
|
60 |
|
61 |
if not user_input:
|
62 |
return jsonify({"error": "No input provided"}), 400
|
63 |
|
64 |
-
response = simulate_llm_query(input_translate, username)
|
65 |
|
66 |
output_translate = GoogleTranslator(source='en', target='id').translate(response)
|
67 |
|
|
|
4 |
# from langchain_core.messages import HumanMessage, AIMessage
|
5 |
# from langchain.prompts import ChatPromptTemplate
|
6 |
from deep_translator import GoogleTranslator
|
7 |
+
from database.database_util import get_user_memory
|
8 |
+
from datetime import datetime
|
9 |
|
10 |
app = Flask(__name__)
|
11 |
|
|
|
43 |
# llm.memory.chat_memory.add_ai_message(AIMessage(response))
|
44 |
# return jsonify({"response": translate})
|
45 |
|
46 |
+
def simulate_llm_query(user_input, username, schedule_one, schedule_two, liquid_intake, temperature, blood_pressure, food_intake):
|
47 |
"""
|
48 |
Simulates querying a language model.
|
49 |
Replace this function's logic with actual LLM querying.
|
50 |
"""
|
51 |
# Placeholder response logic, replace with actual LLM integration
|
52 |
+
memory = get_user_memory(username)
|
53 |
+
|
54 |
+
now = datetime.now()
|
55 |
+
current_time = now.strftime("%A, %I:%M %p")
|
56 |
+
return llm.query(user_input, username, memory, schedule_one, schedule_two, liquid_intake, temperature, blood_pressure, food_intake, current_time)
|
57 |
|
58 |
@app.route('/query', methods=['POST'])
|
59 |
def query_llm():
|
60 |
data = request.json
|
61 |
user_input = data.get('input')
|
62 |
username = data.get('username')
|
63 |
+
schedule_one = str(data.get('schedule_one'))
|
64 |
+
schedule_two = str(data.get('schedule_two'))
|
65 |
+
liquid_intake = str(data.get('liquid_intake'))
|
66 |
+
temperature = str(data.get('temperature'))
|
67 |
+
blood_pressure = str(data.get('blood_pressure'))
|
68 |
+
food_intake = str(data.get('food_intake'))
|
69 |
|
70 |
input_translate = GoogleTranslator(source='id', target='en').translate(user_input)
|
71 |
|
72 |
if not user_input:
|
73 |
return jsonify({"error": "No input provided"}), 400
|
74 |
|
75 |
+
response = simulate_llm_query(input_translate, username, schedule_one, schedule_two, liquid_intake, temperature, blood_pressure, food_intake)
|
76 |
|
77 |
output_translate = GoogleTranslator(source='en', target='id').translate(response)
|
78 |
|