CarMaa / app.py
Ritvik
Updated app 5
add1d5a
raw
history blame contribute delete
16.4 kB
import gradio as gr
from groq import Groq
from dotenv import load_dotenv
from duckduckgo_search import DDGS
import os
import traceback
import json
import time
from collections import defaultdict
import requests
# Load .env environment variables
load_dotenv()
api_key = os.getenv("GROQ_API_KEY")
client = Groq(api_key=api_key)
MODEL_NAME = "llama-3.3-70b-versatile"
# In-memory cache for search results
search_cache = defaultdict(str)
cache_timeout = 3600 # 1 hour
# In-memory Q&A store for community simulation
community_qa = []
# Diagnostics knowledge base (simplified)
diagnostics_db = {
"Maruti Alto": {
"check engine light": {
"causes": ["Faulty oxygen sensor", "Loose fuel cap", "Spark plug issues"],
"solutions": ["Run OBD-II scan (₹500-₹1500)", "Tighten/replace fuel cap (₹100-₹500)", "Replace spark plugs (₹1000-₹2000)"],
"severity": "Moderate"
},
"poor fuel efficiency": {
"causes": ["Clogged air filter", "Tire underinflation", "Fuel injector issues"],
"solutions": ["Replace air filter (₹300-₹800)", "Check tire pressure (free)", "Clean/replace injectors (₹2000-₹5000)"],
"severity": "Low"
}
},
"Hyundai i20": {
"ac not cooling": {
"causes": ["Low refrigerant", "Faulty compressor", "Clogged condenser"],
"solutions": ["Refill refrigerant (₹1500-₹3000)", "Repair/replace compressor (₹5000-₹15000)", "Clean condenser (₹1000-₹2000)"],
"severity": "High"
}
}
}
# Maintenance tips
maintenance_tips = [
"Check tire pressure monthly to improve fuel efficiency.",
"Change engine oil every 10,000 km or 6 months for Indian road conditions.",
"Inspect brakes regularly, especially during monsoon seasons.",
"Keep your car clean to prevent rust in humid climates."
]
# Tool: DuckDuckGo web search with retry and structured output
def web_search_duckduckgo(query: str, max_results: int = 5, max_retries: int = 2):
cache_key = query.lower()
if cache_key in search_cache:
cached_time, cached_results = search_cache[cache_key]
if time.time() - cached_time < cache_timeout:
print(f"Using cached results for: {query}")
return cached_results
results = []
for attempt in range(max_retries):
try:
with DDGS() as ddgs:
for r in ddgs.text(query, region="in-en", safesearch="Moderate", max_results=max_results):
results.append({"title": r['title'], "url": r['href']})
formatted_results = "\n\n".join(f"- {r['title']}\n {r['url']}" for r in results)
search_cache[cache_key] = (time.time(), formatted_results)
return formatted_results
except Exception as e:
print(f"Search attempt {attempt + 1} failed: {str(e)}")
if attempt + 1 == max_retries:
return f"⚠️ Web search failed after {max_retries} attempts: {str(e)}"
time.sleep(1)
# ReAct agent response with thought process
def respond(message, history, system_message, max_tokens, temperature, top_p, vehicle_profile):
try:
# Initialize messages with ReAct system prompt
react_prompt = (
f"{system_message}\n\n"
"You are using the ReAct framework. For each user query, follow these steps:\n"
"1. **Thought**: Reason about the query and decide the next step. Check the diagnostics database first for known issues. For location-specific queries (e.g., garages, repair shops) or real-time data (e.g., pricing, availability), prioritize web search. For community questions, check the Q&A store.\n"
"2. **Observation**: Note relevant information (e.g., user input, vehicle profile, tool results, or context).\n"
"3. **Action**: Choose an action: 'search' (web search), 'respond' (final answer), 'clarify' (ask for details), 'add_qa' (add to Q&A store), or 'get_qa' (retrieve Q&A).\n"
"Format your response as a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'. Example:\n"
"{\n"
" \"thought\": \"User asks for garages in Dehradun, need to search.\",\n"
" \"observation\": \"Location: Dehradun\",\n"
" \"action\": \"search\",\n"
" \"search_query\": \"car repair shops Dehradun\"\n"
"}\n"
f"User vehicle profile: {json.dumps(vehicle_profile)}\n"
"Use the search tool for locations, prices, or real-time data. Ensure valid JSON."
)
messages = [{"role": "system", "content": react_prompt}]
# Add history
for msg in history:
role = msg.get("role")
content = msg.get("content")
if role in ["user", "assistant"] and content:
messages.append({"role": role, "content": content})
messages.append({"role": "user", "content": message})
# Trigger keywords for garage search
trigger_keywords = [
"garage near", "car service near", "repair shop in", "mechanic in", "car workshop near",
"tyre change near", "puncture repair near", "engine repair near", "car wash near",
"car ac repair", "suspension work", "car battery replacement", "headlight change",
"oil change near", "nearby service center", "wheel alignment near", "wheel balancing",
"car painting service", "denting and painting", "car insurance repair", "maruti workshop",
"hyundai service", "honda repair center", "toyota garage", "tata motors service",
"mahindra car repair", "nexa service center", "kia workshop", "ev charging near",
"ev repair", "gearbox repair", "clutch repair", "brake pad replacement",
"windshield repair", "car glass replacement", "coolant top up", "engine tuning",
"car noise issue", "check engine light", "dashboard warning light", "local garage",
"trusted mechanic", "authorized service center", "car towing service near me",
"car not starting", "flat battery", "jump start service", "roadside assistance",
"ac not cooling", "car breakdown", "pickup and drop car service"
]
# Check diagnostics database
if vehicle_profile.get("make_model") and any(kw in message.lower() for kw in diagnostics_db.get(vehicle_profile["make_model"], {})):
for issue, details in diagnostics_db[vehicle_profile["make_model"]].items():
if issue in message.lower():
response = (
f"**Diagnosed Issue**: {issue}\n"
f"- **Possible Causes**: {', '.join(details['causes'])}\n"
f"- **Solutions**: {', '.join(details['solutions'])}\n"
f"- **Severity**: {details['severity']}\n"
f"Would you like to search for garages to address this issue or learn more?"
)
yield response
return
# Check for community Q&A keywords
if any(kw in message.lower() for kw in ["community", "forum", "discussion", "share advice", "ask community"]):
if "post" in message.lower() or "share" in message.lower():
community_qa.append({"question": message, "answers": []})
yield "Your question has been posted to the community! Check back for answers."
return
elif "view" in message.lower() or "see" in message.lower():
if community_qa:
response = "Community Q&A:\n" + "\n".join(
f"Q: {qa['question']}\nA: {', '.join(qa['answers']) or 'No answers yet'}"
for qa in community_qa
)
else:
response = "No community questions yet. Post one with 'share' or 'post'!"
yield response
return
# Check for trigger keywords to directly perform search
if any(keyword in message.lower() for keyword in trigger_keywords):
print(f"Trigger keyword detected in query: {message}")
search_results = web_search_duckduckgo(message)
print(f"Search Results:\n{search_results}")
final_response = f"🔍 Here are some results I found:\n\n{search_results}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
for i in range(0, len(final_response), 10):
yield final_response[:i + 10]
return
# ReAct loop (up to 3 iterations)
max_iterations = 3
max_json_retries = 2
current_response = ""
for iteration in range(max_iterations):
print(f"\n--- ReAct Iteration {iteration + 1} ---")
# Call LLM with current messages
for retry in range(max_json_retries):
try:
completion = client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
temperature=temperature,
max_completion_tokens=max_tokens,
top_p=top_p,
stream=False,
)
raw_response = completion.choices[0].message.content
# Parse LLM response
try:
react_step = json.loads(raw_response)
thought = react_step.get("thought", "")
observation = react_step.get("observation", "")
action = react_step.get("action", "")
# Log to console
print("Thought:", thought)
print("Observation:", observation)
print("Action:", action)
break
except json.JSONDecodeError:
print(f"Error: LLM response is not valid JSON (attempt {retry + 1}/{max_json_retries}).")
if retry + 1 == max_json_retries:
print("Max retries reached. Treating as direct response.")
react_step = {"response": raw_response, "action": "respond"}
thought = "N/A (Invalid JSON)"
observation = "N/A (Invalid JSON)"
action = "respond"
else:
messages.append({
"role": "system",
"content": "Previous response was not valid JSON. Please provide a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'."
})
except Exception as e:
print(f"LLM call failed (attempt {retry + 1}/{max_json_retries}): {str(e)}")
if retry + 1 == max_json_retries:
react_step = {"response": f"⚠️ Failed to process query: {str(e)}", "action": "respond"}
thought = "N/A (LLM error)"
observation = "N/A (LLM error)"
action = "respond"
else:
time.sleep(1)
# Handle action
if action == "search":
search_query = react_step.get("search_query", message)
print(f"Performing web search for: {search_query}")
search_results = web_search_duckduckgo(search_query)
messages.append({"role": "assistant", "content": raw_response})
messages.append({
"role": "system",
"content": f"Search results for '{search_query}':\n{search_results}"
})
print(f"Search Results:\n{search_results}")
elif action == "respond":
final_response = react_step.get("response", raw_response)
current_response = f"{final_response}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
print(f"Final Response:\n{current_response}")
break
elif action == "clarify":
clarification = react_step.get("response", "Please provide more details.")
messages.append({"role": "assistant", "content": raw_response})
current_response = clarification
print(f"Clarification Request:\n{current_response}")
elif action == "add_qa":
qa_content = react_step.get("qa_content", message)
community_qa.append({"question": qa_content, "answers": []})
current_response = "Your question has been posted to the community! Check back for answers."
print(f"Community Q&A Added:\n{qa_content}")
break
elif action == "get_qa":
if community_qa:
current_response = "Community Q&A:\n" + "\n".join(
f"Q: {qa['question']}\nA: {', '.join(qa['answers']) or 'No answers yet'}"
for qa in community_qa
)
else:
current_response = "No community questions yet. Post one with 'share' or 'post'!"
print(f"Community Q&A Retrieved:\n{current_response}")
break
else:
print("Unknown action, continuing to next iteration.")
messages.append({"role": "assistant", "content": raw_response})
# Stream final response to Gradio
for i in range(0, len(current_response), 10):
yield current_response[:i + 10]
except Exception as e:
error_msg = f"❌ Error: {str(e)}\n{traceback.format_exc()}"
print(error_msg)
yield error_msg
# Gradio interface with vehicle profile
with gr.Blocks(title="CarMaa - India's AI Car Doctor") as demo:
gr.Markdown("# CarMaa - India's AI Car Doctor")
gr.Markdown("Your trusted AI for car diagnostics, garage searches, and community advice.")
# Vehicle profile inputs
with gr.Row():
make_model = gr.Textbox(label="Vehicle Make and Model (e.g., Maruti Alto)", placeholder="Enter your car's make and model")
year = gr.Textbox(label="Year", placeholder="Enter the year of manufacture")
city = gr.Textbox(label="City", placeholder="Enter your city")
vehicle_profile = gr.State(value={"make_model": "", "year": "", "city": ""})
# Update vehicle profile
def update_vehicle_profile(make_model, year, city):
return {"make_model": make_model, "year": year, "city": city}
gr.Button("Save Vehicle Profile").click(
fn=update_vehicle_profile,
inputs=[make_model, year, city],
outputs=vehicle_profile
)
# Chat interface
chatbot = gr.ChatInterface(
fn=respond,
additional_inputs=[
gr.Textbox(value=(
"You are CarMaa, a highly intelligent and trusted AI Car Doctor trained on comprehensive automobile data, diagnostics, "
"and service records with specialized knowledge of Indian vehicles, road conditions, and market pricing. Your role is to "
"guide car owners with accurate insights, including service intervals, symptoms, estimated repair costs, garage locations, "
"climate effects, and fuel-efficiency tips. Personalize answers by vehicle details and city. Engage users as a community by "
"allowing Q&A posts and sharing maintenance tips."
), label="System message"),
gr.Slider(minimum=1, maximum=4096, value=1024, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
vehicle_profile
],
type="messages"
)
if __name__ == "__main__":
demo.launch()