Ritvik commited on
Commit
add1d5a
·
1 Parent(s): 4f05f34

Updated app 5

Browse files
Files changed (1) hide show
  1. app.py +202 -67
app.py CHANGED
@@ -5,6 +5,9 @@ from duckduckgo_search import DDGS
5
  import os
6
  import traceback
7
  import json
 
 
 
8
 
9
  # Load .env environment variables
10
  load_dotenv()
@@ -12,39 +15,91 @@ api_key = os.getenv("GROQ_API_KEY")
12
  client = Groq(api_key=api_key)
13
  MODEL_NAME = "llama-3.3-70b-versatile"
14
 
15
- # Tool: DuckDuckGo web search
16
- def web_search_duckduckgo(query: str, max_results: int = 5):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  results = []
18
- try:
19
- with DDGS() as ddgs:
20
- for r in ddgs.text(query, region="in-en", safesearch="Moderate", max_results=max_results):
21
- results.append(f"- {r['title']}\n {r['href']}")
22
- return "\n\n".join(results)
23
- except Exception as e:
24
- return f"⚠️ Web search failed: {str(e)}"
 
 
 
 
 
 
25
 
26
  # ReAct agent response with thought process
27
- def respond(message, history, system_message, max_tokens, temperature, top_p):
28
  try:
29
  # Initialize messages with ReAct system prompt
30
  react_prompt = (
31
  f"{system_message}\n\n"
32
  "You are using the ReAct framework. For each user query, follow these steps:\n"
33
- "1. **Thought**: Reason about the query and decide the next step. For location-specific queries (e.g., finding garages or repair shops) or real-time data (e.g., pricing, availability), prioritize performing a web search using the provided tool unless you have sufficient context to answer directly.\n"
34
- "2. **Observation**: Note relevant information (e.g., user input, tool results, or context).\n"
35
- "3. **Action**: Choose an action: 'search' (to perform a web search), 'respond' (to provide a final answer), or 'clarify' (to ask for more details).\n"
36
- "Format your response as a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query' (for search actions) or 'response' (for final answers). Example:\n"
37
  "{\n"
38
  " \"thought\": \"User asks for garages in Dehradun, need to search.\",\n"
39
  " \"observation\": \"Location: Dehradun\",\n"
40
  " \"action\": \"search\",\n"
41
  " \"search_query\": \"car repair shops Dehradun\"\n"
42
  "}\n"
43
- "Use the search tool for queries involving locations, prices, or real-time data unless explicitly instructed otherwise. Ensure your response is valid JSON."
 
44
  )
45
  messages = [{"role": "system", "content": react_prompt}]
46
 
47
- # Add history (handle OpenAI-style messages format)
48
  for msg in history:
49
  role = msg.get("role")
50
  content = msg.get("content")
@@ -69,60 +124,101 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
69
  "ac not cooling", "car breakdown", "pickup and drop car service"
70
  ]
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  # Check for trigger keywords to directly perform search
73
  if any(keyword in message.lower() for keyword in trigger_keywords):
74
  print(f"Trigger keyword detected in query: {message}")
75
  search_results = web_search_duckduckgo(message)
76
  print(f"Search Results:\n{search_results}")
77
- final_response = f"🔍 Here are some results I found:\n\n{search_results}"
78
- for i in range(0, len(final_response), 10): # Simulate streaming
79
  yield final_response[:i + 10]
80
  return
81
 
82
  # ReAct loop (up to 3 iterations)
83
  max_iterations = 3
84
- max_json_retries = 2 # Retry for invalid JSON
85
  current_response = ""
86
  for iteration in range(max_iterations):
87
  print(f"\n--- ReAct Iteration {iteration + 1} ---")
88
 
89
  # Call LLM with current messages
90
  for retry in range(max_json_retries):
91
- completion = client.chat.completions.create(
92
- model=MODEL_NAME,
93
- messages=messages,
94
- temperature=temperature,
95
- max_completion_tokens=max_tokens,
96
- top_p=top_p,
97
- stream=False,
98
- )
99
- raw_response = completion.choices[0].message.content
100
-
101
- # Parse LLM response
102
  try:
103
- react_step = json.loads(raw_response)
104
- thought = react_step.get("thought", "")
105
- observation = react_step.get("observation", "")
106
- action = react_step.get("action", "")
 
 
 
 
 
107
 
108
- # Log to console
109
- print("Thought:", thought)
110
- print("Observation:", observation)
111
- print("Action:", action)
112
- break # Successful JSON parse
113
- except json.JSONDecodeError:
114
- print(f"Error: LLM response is not valid JSON (attempt {retry + 1}/{max_json_retries}).")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  if retry + 1 == max_json_retries:
116
- print("Max retries reached. Treating as direct response.")
117
- react_step = {"response": raw_response, "action": "respond"}
118
- thought = "N/A (Invalid JSON)"
119
- observation = "N/A (Invalid JSON)"
120
  action = "respond"
121
  else:
122
- messages.append({
123
- "role": "system",
124
- "content": "Previous response was not valid JSON. Please provide a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query' or 'response'."
125
- })
126
 
127
  # Handle action
128
  if action == "search":
@@ -138,7 +234,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
138
 
139
  elif action == "respond":
140
  final_response = react_step.get("response", raw_response)
141
- current_response = final_response
142
  print(f"Final Response:\n{current_response}")
143
  break
144
  elif action == "clarify":
@@ -146,6 +242,22 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
146
  messages.append({"role": "assistant", "content": raw_response})
147
  current_response = clarification
148
  print(f"Clarification Request:\n{current_response}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  else:
150
  print("Unknown action, continuing to next iteration.")
151
  messages.append({"role": "assistant", "content": raw_response})
@@ -159,23 +271,46 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
159
  print(error_msg)
160
  yield error_msg
161
 
162
- # Gradio chat interface
163
- demo = gr.ChatInterface(
164
- fn=respond,
165
- additional_inputs=[
166
- gr.Textbox(value=(
167
- "You are CarMaa, a highly intelligent and trusted AI Car Doctor trained on comprehensive automobile data, diagnostics, "
168
- "and service records with specialized knowledge of Indian vehicles, road conditions, and market pricing. Your role is to "
169
- "guide car owners with accurate insights, including service intervals, symptoms, estimated repair costs, garage locations, "
170
- "climate effects, and fuel-efficiency tips. Personalize answers by vehicle details and city."
171
- ), label="System message"),
172
- gr.Slider(minimum=1, maximum=4096, value=1024, step=1, label="Max new tokens"),
173
- gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
174
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
175
- ],
176
- title="CarMaa - India's AI Car Doctor",
177
- type="messages" # Use modern messages format
178
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  if __name__ == "__main__":
181
  demo.launch()
 
5
  import os
6
  import traceback
7
  import json
8
+ import time
9
+ from collections import defaultdict
10
+ import requests
11
 
12
  # Load .env environment variables
13
  load_dotenv()
 
15
  client = Groq(api_key=api_key)
16
  MODEL_NAME = "llama-3.3-70b-versatile"
17
 
18
+ # In-memory cache for search results
19
+ search_cache = defaultdict(str)
20
+ cache_timeout = 3600 # 1 hour
21
+
22
+ # In-memory Q&A store for community simulation
23
+ community_qa = []
24
+
25
+ # Diagnostics knowledge base (simplified)
26
+ diagnostics_db = {
27
+ "Maruti Alto": {
28
+ "check engine light": {
29
+ "causes": ["Faulty oxygen sensor", "Loose fuel cap", "Spark plug issues"],
30
+ "solutions": ["Run OBD-II scan (₹500-₹1500)", "Tighten/replace fuel cap (₹100-₹500)", "Replace spark plugs (₹1000-₹2000)"],
31
+ "severity": "Moderate"
32
+ },
33
+ "poor fuel efficiency": {
34
+ "causes": ["Clogged air filter", "Tire underinflation", "Fuel injector issues"],
35
+ "solutions": ["Replace air filter (₹300-₹800)", "Check tire pressure (free)", "Clean/replace injectors (₹2000-₹5000)"],
36
+ "severity": "Low"
37
+ }
38
+ },
39
+ "Hyundai i20": {
40
+ "ac not cooling": {
41
+ "causes": ["Low refrigerant", "Faulty compressor", "Clogged condenser"],
42
+ "solutions": ["Refill refrigerant (₹1500-₹3000)", "Repair/replace compressor (₹5000-₹15000)", "Clean condenser (₹1000-₹2000)"],
43
+ "severity": "High"
44
+ }
45
+ }
46
+ }
47
+
48
+ # Maintenance tips
49
+ maintenance_tips = [
50
+ "Check tire pressure monthly to improve fuel efficiency.",
51
+ "Change engine oil every 10,000 km or 6 months for Indian road conditions.",
52
+ "Inspect brakes regularly, especially during monsoon seasons.",
53
+ "Keep your car clean to prevent rust in humid climates."
54
+ ]
55
+
56
+ # Tool: DuckDuckGo web search with retry and structured output
57
+ def web_search_duckduckgo(query: str, max_results: int = 5, max_retries: int = 2):
58
+ cache_key = query.lower()
59
+ if cache_key in search_cache:
60
+ cached_time, cached_results = search_cache[cache_key]
61
+ if time.time() - cached_time < cache_timeout:
62
+ print(f"Using cached results for: {query}")
63
+ return cached_results
64
+
65
  results = []
66
+ for attempt in range(max_retries):
67
+ try:
68
+ with DDGS() as ddgs:
69
+ for r in ddgs.text(query, region="in-en", safesearch="Moderate", max_results=max_results):
70
+ results.append({"title": r['title'], "url": r['href']})
71
+ formatted_results = "\n\n".join(f"- {r['title']}\n {r['url']}" for r in results)
72
+ search_cache[cache_key] = (time.time(), formatted_results)
73
+ return formatted_results
74
+ except Exception as e:
75
+ print(f"Search attempt {attempt + 1} failed: {str(e)}")
76
+ if attempt + 1 == max_retries:
77
+ return f"⚠️ Web search failed after {max_retries} attempts: {str(e)}"
78
+ time.sleep(1)
79
 
80
  # ReAct agent response with thought process
81
+ def respond(message, history, system_message, max_tokens, temperature, top_p, vehicle_profile):
82
  try:
83
  # Initialize messages with ReAct system prompt
84
  react_prompt = (
85
  f"{system_message}\n\n"
86
  "You are using the ReAct framework. For each user query, follow these steps:\n"
87
+ "1. **Thought**: Reason about the query and decide the next step. Check the diagnostics database first for known issues. For location-specific queries (e.g., garages, repair shops) or real-time data (e.g., pricing, availability), prioritize web search. For community questions, check the Q&A store.\n"
88
+ "2. **Observation**: Note relevant information (e.g., user input, vehicle profile, tool results, or context).\n"
89
+ "3. **Action**: Choose an action: 'search' (web search), 'respond' (final answer), 'clarify' (ask for details), 'add_qa' (add to Q&A store), or 'get_qa' (retrieve Q&A).\n"
90
+ "Format your response as a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'. Example:\n"
91
  "{\n"
92
  " \"thought\": \"User asks for garages in Dehradun, need to search.\",\n"
93
  " \"observation\": \"Location: Dehradun\",\n"
94
  " \"action\": \"search\",\n"
95
  " \"search_query\": \"car repair shops Dehradun\"\n"
96
  "}\n"
97
+ f"User vehicle profile: {json.dumps(vehicle_profile)}\n"
98
+ "Use the search tool for locations, prices, or real-time data. Ensure valid JSON."
99
  )
100
  messages = [{"role": "system", "content": react_prompt}]
101
 
102
+ # Add history
103
  for msg in history:
104
  role = msg.get("role")
105
  content = msg.get("content")
 
124
  "ac not cooling", "car breakdown", "pickup and drop car service"
125
  ]
126
 
127
+ # Check diagnostics database
128
+ if vehicle_profile.get("make_model") and any(kw in message.lower() for kw in diagnostics_db.get(vehicle_profile["make_model"], {})):
129
+ for issue, details in diagnostics_db[vehicle_profile["make_model"]].items():
130
+ if issue in message.lower():
131
+ response = (
132
+ f"**Diagnosed Issue**: {issue}\n"
133
+ f"- **Possible Causes**: {', '.join(details['causes'])}\n"
134
+ f"- **Solutions**: {', '.join(details['solutions'])}\n"
135
+ f"- **Severity**: {details['severity']}\n"
136
+ f"Would you like to search for garages to address this issue or learn more?"
137
+ )
138
+ yield response
139
+ return
140
+
141
+ # Check for community Q&A keywords
142
+ if any(kw in message.lower() for kw in ["community", "forum", "discussion", "share advice", "ask community"]):
143
+ if "post" in message.lower() or "share" in message.lower():
144
+ community_qa.append({"question": message, "answers": []})
145
+ yield "Your question has been posted to the community! Check back for answers."
146
+ return
147
+ elif "view" in message.lower() or "see" in message.lower():
148
+ if community_qa:
149
+ response = "Community Q&A:\n" + "\n".join(
150
+ f"Q: {qa['question']}\nA: {', '.join(qa['answers']) or 'No answers yet'}"
151
+ for qa in community_qa
152
+ )
153
+ else:
154
+ response = "No community questions yet. Post one with 'share' or 'post'!"
155
+ yield response
156
+ return
157
+
158
  # Check for trigger keywords to directly perform search
159
  if any(keyword in message.lower() for keyword in trigger_keywords):
160
  print(f"Trigger keyword detected in query: {message}")
161
  search_results = web_search_duckduckgo(message)
162
  print(f"Search Results:\n{search_results}")
163
+ final_response = f"🔍 Here are some results I found:\n\n{search_results}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
164
+ for i in range(0, len(final_response), 10):
165
  yield final_response[:i + 10]
166
  return
167
 
168
  # ReAct loop (up to 3 iterations)
169
  max_iterations = 3
170
+ max_json_retries = 2
171
  current_response = ""
172
  for iteration in range(max_iterations):
173
  print(f"\n--- ReAct Iteration {iteration + 1} ---")
174
 
175
  # Call LLM with current messages
176
  for retry in range(max_json_retries):
 
 
 
 
 
 
 
 
 
 
 
177
  try:
178
+ completion = client.chat.completions.create(
179
+ model=MODEL_NAME,
180
+ messages=messages,
181
+ temperature=temperature,
182
+ max_completion_tokens=max_tokens,
183
+ top_p=top_p,
184
+ stream=False,
185
+ )
186
+ raw_response = completion.choices[0].message.content
187
 
188
+ # Parse LLM response
189
+ try:
190
+ react_step = json.loads(raw_response)
191
+ thought = react_step.get("thought", "")
192
+ observation = react_step.get("observation", "")
193
+ action = react_step.get("action", "")
194
+
195
+ # Log to console
196
+ print("Thought:", thought)
197
+ print("Observation:", observation)
198
+ print("Action:", action)
199
+ break
200
+ except json.JSONDecodeError:
201
+ print(f"Error: LLM response is not valid JSON (attempt {retry + 1}/{max_json_retries}).")
202
+ if retry + 1 == max_json_retries:
203
+ print("Max retries reached. Treating as direct response.")
204
+ react_step = {"response": raw_response, "action": "respond"}
205
+ thought = "N/A (Invalid JSON)"
206
+ observation = "N/A (Invalid JSON)"
207
+ action = "respond"
208
+ else:
209
+ messages.append({
210
+ "role": "system",
211
+ "content": "Previous response was not valid JSON. Please provide a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'."
212
+ })
213
+ except Exception as e:
214
+ print(f"LLM call failed (attempt {retry + 1}/{max_json_retries}): {str(e)}")
215
  if retry + 1 == max_json_retries:
216
+ react_step = {"response": f"⚠️ Failed to process query: {str(e)}", "action": "respond"}
217
+ thought = "N/A (LLM error)"
218
+ observation = "N/A (LLM error)"
 
219
  action = "respond"
220
  else:
221
+ time.sleep(1)
 
 
 
222
 
223
  # Handle action
224
  if action == "search":
 
234
 
235
  elif action == "respond":
236
  final_response = react_step.get("response", raw_response)
237
+ current_response = f"{final_response}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
238
  print(f"Final Response:\n{current_response}")
239
  break
240
  elif action == "clarify":
 
242
  messages.append({"role": "assistant", "content": raw_response})
243
  current_response = clarification
244
  print(f"Clarification Request:\n{current_response}")
245
+ elif action == "add_qa":
246
+ qa_content = react_step.get("qa_content", message)
247
+ community_qa.append({"question": qa_content, "answers": []})
248
+ current_response = "Your question has been posted to the community! Check back for answers."
249
+ print(f"Community Q&A Added:\n{qa_content}")
250
+ break
251
+ elif action == "get_qa":
252
+ if community_qa:
253
+ current_response = "Community Q&A:\n" + "\n".join(
254
+ f"Q: {qa['question']}\nA: {', '.join(qa['answers']) or 'No answers yet'}"
255
+ for qa in community_qa
256
+ )
257
+ else:
258
+ current_response = "No community questions yet. Post one with 'share' or 'post'!"
259
+ print(f"Community Q&A Retrieved:\n{current_response}")
260
+ break
261
  else:
262
  print("Unknown action, continuing to next iteration.")
263
  messages.append({"role": "assistant", "content": raw_response})
 
271
  print(error_msg)
272
  yield error_msg
273
 
274
+ # Gradio interface with vehicle profile
275
+ with gr.Blocks(title="CarMaa - India's AI Car Doctor") as demo:
276
+ gr.Markdown("# CarMaa - India's AI Car Doctor")
277
+ gr.Markdown("Your trusted AI for car diagnostics, garage searches, and community advice.")
278
+
279
+ # Vehicle profile inputs
280
+ with gr.Row():
281
+ make_model = gr.Textbox(label="Vehicle Make and Model (e.g., Maruti Alto)", placeholder="Enter your car's make and model")
282
+ year = gr.Textbox(label="Year", placeholder="Enter the year of manufacture")
283
+ city = gr.Textbox(label="City", placeholder="Enter your city")
284
+ vehicle_profile = gr.State(value={"make_model": "", "year": "", "city": ""})
285
+
286
+ # Update vehicle profile
287
+ def update_vehicle_profile(make_model, year, city):
288
+ return {"make_model": make_model, "year": year, "city": city}
289
+
290
+ gr.Button("Save Vehicle Profile").click(
291
+ fn=update_vehicle_profile,
292
+ inputs=[make_model, year, city],
293
+ outputs=vehicle_profile
294
+ )
295
+
296
+ # Chat interface
297
+ chatbot = gr.ChatInterface(
298
+ fn=respond,
299
+ additional_inputs=[
300
+ gr.Textbox(value=(
301
+ "You are CarMaa, a highly intelligent and trusted AI Car Doctor trained on comprehensive automobile data, diagnostics, "
302
+ "and service records with specialized knowledge of Indian vehicles, road conditions, and market pricing. Your role is to "
303
+ "guide car owners with accurate insights, including service intervals, symptoms, estimated repair costs, garage locations, "
304
+ "climate effects, and fuel-efficiency tips. Personalize answers by vehicle details and city. Engage users as a community by "
305
+ "allowing Q&A posts and sharing maintenance tips."
306
+ ), label="System message"),
307
+ gr.Slider(minimum=1, maximum=4096, value=1024, step=1, label="Max new tokens"),
308
+ gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
309
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
310
+ vehicle_profile
311
+ ],
312
+ type="messages"
313
+ )
314
 
315
  if __name__ == "__main__":
316
  demo.launch()