akhaliq HF Staff commited on
Commit
9658f75
·
1 Parent(s): 8f1230c
Files changed (2) hide show
  1. app.py +38 -2
  2. requirements.txt +2 -1
app.py CHANGED
@@ -2,27 +2,63 @@ import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
  from dotenv import load_dotenv
 
5
 
6
  # Load environment variables
7
  load_dotenv()
8
 
9
- # Initialize Hugging Face client
10
  client = InferenceClient(
11
  provider="novita",
12
  api_key=os.getenv("HF_TOKEN"),
13
  bill_to="huggingface"
14
  )
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def chat(message, history):
17
  """
18
- Process chat messages using Hugging Face's Inference Provider
19
  """
20
  try:
 
 
 
21
  # Format the conversation history
22
  messages = []
23
  for human, assistant in history:
24
  messages.append({"role": "user", "content": human})
25
  messages.append({"role": "assistant", "content": assistant})
 
 
 
 
 
 
 
 
26
  messages.append({"role": "user", "content": message})
27
 
28
  # Get response from the model
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
  from dotenv import load_dotenv
5
+ from tavily import TavilyClient
6
 
7
  # Load environment variables
8
  load_dotenv()
9
 
10
+ # Initialize clients
11
  client = InferenceClient(
12
  provider="novita",
13
  api_key=os.getenv("HF_TOKEN"),
14
  bill_to="huggingface"
15
  )
16
 
17
+ tavily_client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
18
+
19
+ def get_web_context(query):
20
+ """
21
+ Get relevant web search results using Tavily
22
+ """
23
+ try:
24
+ response = tavily_client.search(
25
+ query=query,
26
+ search_depth="advanced",
27
+ max_results=3
28
+ )
29
+
30
+ # Format the search results
31
+ context = "Web Search Results:\n\n"
32
+ for result in response['results']:
33
+ context += f"Title: {result['title']}\n"
34
+ context += f"URL: {result['url']}\n"
35
+ context += f"Content: {result['content']}\n\n"
36
+
37
+ return context
38
+ except Exception as e:
39
+ return f"Error getting web context: {str(e)}"
40
+
41
  def chat(message, history):
42
  """
43
+ Process chat messages using Hugging Face's Inference Provider with web context
44
  """
45
  try:
46
+ # Get web context
47
+ web_context = get_web_context(message)
48
+
49
  # Format the conversation history
50
  messages = []
51
  for human, assistant in history:
52
  messages.append({"role": "user", "content": human})
53
  messages.append({"role": "assistant", "content": assistant})
54
+
55
+ # Add system message with web context
56
+ messages.append({
57
+ "role": "system",
58
+ "content": f"You are a helpful AI assistant. Use the following web search results to inform your response:\n\n{web_context}"
59
+ })
60
+
61
+ # Add user message
62
  messages.append({"role": "user", "content": message})
63
 
64
  # Get response from the model
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  gradio>=4.0.0
2
  huggingface-hub>=0.20.0
3
  python-dotenv>=1.0.0
4
- requests>=2.31.0
 
 
1
  gradio>=4.0.0
2
  huggingface-hub>=0.20.0
3
  python-dotenv>=1.0.0
4
+ requests>=2.31.0
5
+ tavily-python>=0.2.0