aidevhund commited on
Commit
c7f8a5d
·
verified ·
1 Parent(s): 661e3f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +151 -76
app.py CHANGED
@@ -1,106 +1,68 @@
1
  import gradio as gr
2
- import requests
3
  from openai import OpenAI
4
  import os
5
 
6
- # Load your API keys from environment variables
7
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
8
- TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
9
  print("Access token loaded.")
 
10
  client = OpenAI(
11
  base_url="https://api-inference.huggingface.co/v1/",
12
  api_key=ACCESS_TOKEN,
13
  )
14
  print("OpenAI client initialized.")
15
 
16
- # Define a comprehensive system prompt (global)
17
- SYSTEM_PROMPT = """
18
- You are a highly knowledgeable and reliable Crypto Trading Advisor and Analyzer.
19
- Your goal is to assist users in understanding, analyzing, and making informed decisions about cryptocurrency trading.
20
- """
21
-
22
- # Binance API - Fetch Market Data
23
- def get_binance_data(symbol: str):
24
- # Base URL for Binance API
25
- url = f'https://api.binance.com/api/v3/ticker/24hr?symbol={symbol.upper()}'
26
-
27
- try:
28
- # Send GET request to Binance API
29
- response = requests.get(url)
30
- data = response.json()
31
-
32
- if response.status_code != 200:
33
- return {"error": "Error fetching data from Binance"}
34
-
35
- # Extract relevant information from the API response
36
- price = float(data['lastPrice'])
37
- volume = float(data['volume'])
38
- market_cap = float(data['quoteVolume']) # Binance doesn't provide market cap directly, so we use quote volume as a proxy
39
- change_24h = float(data['priceChangePercent'])
40
-
41
- return {
42
- 'price': price,
43
- 'volume': volume,
44
- 'market_cap': market_cap,
45
- 'change_24h': change_24h
46
- }
47
- except Exception as e:
48
- return {"error": f"An error occurred: {str(e)}"}
49
-
50
- # Function to handle chatbot responses
51
  def respond(
52
  message,
53
- history: list[tuple[str, str]]
 
 
 
 
 
 
 
54
  ):
 
55
  print(f"Received message: {message}")
56
  print(f"History: {history}")
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- # Default values for the parameters
59
- max_tokens = 512
60
- temperature = 0.7
61
- top_p = 0.95
62
- frequency_penalty = 0.0
63
- seed = None
64
-
65
- # Check for cryptocurrency symbol in the message
66
- if "crypto" in message.lower():
67
- # Extract the cryptocurrency symbol from the message
68
- crypto_symbol = message.split()[0].upper() + "USDT" # Example: "Bitcoin" -> "BTCUSDT"
69
- market_data = get_binance_data(crypto_symbol)
70
-
71
- if 'error' in market_data:
72
- response = "Error fetching data for this cryptocurrency."
73
- yield response
74
- return
75
-
76
- # Update the system prompt with real-time data
77
- updated_prompt = SYSTEM_PROMPT + f"""
78
- Current Data for {crypto_symbol}:
79
- - Price: ${market_data['price']}
80
- - 24h Change: {market_data['change_24h']}%
81
- - Volume: {market_data['volume']}
82
- - Market Cap (proxy via quote volume): ${market_data['market_cap']}
83
- """
84
- else:
85
- updated_prompt = SYSTEM_PROMPT # Keep the original system prompt if no crypto-related message
86
-
87
- # Prepare messages for the assistant
88
- messages = [{"role": "system", "content": updated_prompt}]
89
  for val in history:
90
  user_part = val[0]
91
  assistant_part = val[1]
92
  if user_part:
93
  messages.append({"role": "user", "content": user_part})
 
94
  if assistant_part:
95
  messages.append({"role": "assistant", "content": assistant_part})
 
96
 
97
- # Add the latest user message
98
  messages.append({"role": "user", "content": message})
 
99
 
100
- # Start response generation
 
 
 
 
101
  response = ""
 
 
102
  for message_chunk in client.chat.completions.create(
103
- model="meta-llama/Llama-3.3-70B-Instruct",
104
  max_tokens=max_tokens,
105
  stream=True,
106
  temperature=temperature,
@@ -110,19 +72,132 @@ def respond(
110
  messages=messages,
111
  ):
112
  token_text = message_chunk.choices[0].delta.content
 
113
  response += token_text
114
  yield response
115
 
116
  print("Completed response generation.")
117
 
118
- # Gradio UI
119
- chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Ask about crypto trading or analysis.", likeable=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  demo = gr.ChatInterface(
122
  fn=respond,
 
 
 
 
 
 
 
 
 
123
  fill_height=True,
124
  chatbot=chatbot,
 
125
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  if __name__ == "__main__":
128
- demo.launch()
 
 
1
  import gradio as gr
 
2
  from openai import OpenAI
3
  import os
4
 
 
5
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
 
6
  print("Access token loaded.")
7
+
8
  client = OpenAI(
9
  base_url="https://api-inference.huggingface.co/v1/",
10
  api_key=ACCESS_TOKEN,
11
  )
12
  print("OpenAI client initialized.")
13
 
14
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def respond(
16
  message,
17
+ history: list[tuple[str, str]],
18
+ system_message,
19
+ max_tokens,
20
+ temperature,
21
+ top_p,
22
+ frequency_penalty,
23
+ seed,
24
+ custom_model
25
  ):
26
+
27
  print(f"Received message: {message}")
28
  print(f"History: {history}")
29
+ print(f"System message: {system_message}")
30
+ print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
31
+ print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
32
+ print(f"Selected model (custom_model): {custom_model}")
33
+
34
+ # Convert seed to None if -1 (meaning random)
35
+ if seed == -1:
36
+ seed = None
37
+
38
+ messages = [{"role": "system", "content": system_message}]
39
+ print("Initial messages array constructed.")
40
 
41
+ # Add conversation history to the context
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  for val in history:
43
  user_part = val[0]
44
  assistant_part = val[1]
45
  if user_part:
46
  messages.append({"role": "user", "content": user_part})
47
+ print(f"Added user message to context: {user_part}")
48
  if assistant_part:
49
  messages.append({"role": "assistant", "content": assistant_part})
50
+ print(f"Added assistant message to context: {assistant_part}")
51
 
52
+ # Append the latest user message
53
  messages.append({"role": "user", "content": message})
54
+ print("Latest user message appended.")
55
 
56
+ # If user provided a model, use that; otherwise, fall back to a default model
57
+ model_to_use = custom_model.strip() if custom_model.strip() != "" else "meta-llama/Llama-3.3-70B-Instruct"
58
+ print(f"Model selected for inference: {model_to_use}")
59
+
60
+ # Start with an empty string to build the response as tokens stream in
61
  response = ""
62
+ print("Sending request to OpenAI API.")
63
+
64
  for message_chunk in client.chat.completions.create(
65
+ model=model_to_use,
66
  max_tokens=max_tokens,
67
  stream=True,
68
  temperature=temperature,
 
72
  messages=messages,
73
  ):
74
  token_text = message_chunk.choices[0].delta.content
75
+ print(f"Received token: {token_text}")
76
  response += token_text
77
  yield response
78
 
79
  print("Completed response generation.")
80
 
81
+ # GRADIO UI
82
+
83
+ chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Select a model and begin chatting", likeable=True, layout="panel")
84
+ print("Chatbot interface created.")
85
+
86
+ system_message_box = gr.Textbox(value="", placeholder="You are a helpful assistant.", label="System Prompt")
87
+
88
+ max_tokens_slider = gr.Slider(
89
+ minimum=1,
90
+ maximum=4096,
91
+ value=512,
92
+ step=1,
93
+ label="Max new tokens"
94
+ )
95
+ temperature_slider = gr.Slider(
96
+ minimum=0.1,
97
+ maximum=4.0,
98
+ value=0.7,
99
+ step=0.1,
100
+ label="Temperature"
101
+ )
102
+ top_p_slider = gr.Slider(
103
+ minimum=0.1,
104
+ maximum=1.0,
105
+ value=0.95,
106
+ step=0.05,
107
+ label="Top-P"
108
+ )
109
+ frequency_penalty_slider = gr.Slider(
110
+ minimum=-2.0,
111
+ maximum=2.0,
112
+ value=0.0,
113
+ step=0.1,
114
+ label="Frequency Penalty"
115
+ )
116
+ seed_slider = gr.Slider(
117
+ minimum=-1,
118
+ maximum=65535,
119
+ value=-1,
120
+ step=1,
121
+ label="Seed (-1 for random)"
122
+ )
123
+
124
+ # The custom_model_box is what the respond function sees as "custom_model"
125
+ custom_model_box = gr.Textbox(
126
+ value="",
127
+ label="Custom Model",
128
+ info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
129
+ placeholder="meta-llama/Llama-3.3-70B-Instruct"
130
+ )
131
+
132
+ def set_custom_model_from_radio(selected):
133
+ """
134
+ This function will get triggered whenever someone picks a model from the 'Featured Models' radio.
135
+ We will update the Custom Model text box with that selection automatically.
136
+ """
137
+ print(f"Featured model selected: {selected}")
138
+ return selected
139
 
140
  demo = gr.ChatInterface(
141
  fn=respond,
142
+ additional_inputs=[
143
+ system_message_box,
144
+ max_tokens_slider,
145
+ temperature_slider,
146
+ top_p_slider,
147
+ frequency_penalty_slider,
148
+ seed_slider,
149
+ custom_model_box,
150
+ ],
151
  fill_height=True,
152
  chatbot=chatbot,
153
+ theme="Nymbo/Nymbo_Theme",
154
  )
155
+ print("ChatInterface object created.")
156
+
157
+ with demo:
158
+ with gr.Accordion("Model Selection", open=False):
159
+ model_search_box = gr.Textbox(
160
+ label="Filter Models",
161
+ placeholder="Search for a featured model...",
162
+ lines=1
163
+ )
164
+ print("Model search box created.")
165
+
166
+ models_list = [
167
+ "meta-llama/Llama-3.3-70B-Instruct"
168
+ ]
169
+ print("Models list initialized.")
170
+
171
+ featured_model_radio = gr.Radio(
172
+ label="Select a model below",
173
+ choices=models_list,
174
+ value="meta-llama/Llama-3.3-70B-Instruct",
175
+ interactive=True
176
+ )
177
+ print("Featured models radio button created.")
178
+
179
+ def filter_models(search_term):
180
+ print(f"Filtering models with search term: {search_term}")
181
+ filtered = [m for m in models_list if search_term.lower() in m.lower()]
182
+ print(f"Filtered models: {filtered}")
183
+ return gr.update(choices=filtered)
184
+
185
+ model_search_box.change(
186
+ fn=filter_models,
187
+ inputs=model_search_box,
188
+ outputs=featured_model_radio
189
+ )
190
+ print("Model search box change event linked.")
191
+
192
+ featured_model_radio.change(
193
+ fn=set_custom_model_from_radio,
194
+ inputs=featured_model_radio,
195
+ outputs=custom_model_box
196
+ )
197
+ print("Featured model radio button change event linked.")
198
+
199
+ print("Gradio interface initialized.")
200
 
201
  if __name__ == "__main__":
202
+ print("Launching the demo application.")
203
+ demo.launch()