Spaces:
Running
Running
ahmadgenus
commited on
Commit
·
e552ac2
1
Parent(s):
8721562
new_chatbot
Browse files- chatbot.py +7 -4
chatbot.py
CHANGED
@@ -73,6 +73,8 @@ def keyword_in_post_or_comments(post, keyword):
|
|
73 |
|
74 |
# Fetch and process Reddit data
|
75 |
def fetch_reddit_data(keyword, days=7, limit=None):
|
|
|
|
|
76 |
end_time = datetime.utcnow()
|
77 |
start_time = end_time - timedelta(days=days)
|
78 |
subreddit = reddit.subreddit("all")
|
@@ -105,7 +107,8 @@ def fetch_reddit_data(keyword, days=7, limit=None):
|
|
105 |
"embedding": embedding,
|
106 |
"metadata": metadata
|
107 |
})
|
108 |
-
|
|
|
109 |
|
110 |
# Save data into SQLite
|
111 |
def save_to_db(posts):
|
@@ -177,7 +180,7 @@ Chat History:
|
|
177 |
Context from Reddit and User Question:
|
178 |
{input}
|
179 |
|
180 |
-
Act as
|
181 |
""")
|
182 |
|
183 |
chat_chain = LLMChain(
|
@@ -194,5 +197,5 @@ def get_chatbot_response(question, keyword, reddit_id=None):
|
|
194 |
if len(context) > 3000:
|
195 |
context = summarize_chain.run({"context": context})
|
196 |
combined_input = f"Context:\n{context}\n\nUser Question: {question}"
|
197 |
-
response = chat_chain.
|
198 |
-
return response, context_posts
|
|
|
73 |
|
74 |
# Fetch and process Reddit data
|
75 |
def fetch_reddit_data(keyword, days=7, limit=None):
|
76 |
+
print("Reddit Keys Check:", os.getenv("REDDIT_CLIENT_ID"), os.getenv("REDDIT_CLIENT_SECRET"))
|
77 |
+
|
78 |
end_time = datetime.utcnow()
|
79 |
start_time = end_time - timedelta(days=days)
|
80 |
subreddit = reddit.subreddit("all")
|
|
|
107 |
"embedding": embedding,
|
108 |
"metadata": metadata
|
109 |
})
|
110 |
+
if data:
|
111 |
+
save_to_db(data)
|
112 |
|
113 |
# Save data into SQLite
|
114 |
def save_to_db(posts):
|
|
|
180 |
Context from Reddit and User Question:
|
181 |
{input}
|
182 |
|
183 |
+
Act as a Professional Assistant as an incremental chat agent. Provide reasoning and answer clearly based on the context and chat history. Your response should be valid, concise, and relevant.
|
184 |
""")
|
185 |
|
186 |
chat_chain = LLMChain(
|
|
|
197 |
if len(context) > 3000:
|
198 |
context = summarize_chain.run({"context": context})
|
199 |
combined_input = f"Context:\n{context}\n\nUser Question: {question}"
|
200 |
+
response = chat_chain.invoke({"input": combined_input})
|
201 |
+
return response, context_posts
|