Update backend/query_llm.py
Browse files- backend/query_llm.py +19 -3
backend/query_llm.py
CHANGED
@@ -5,7 +5,7 @@ import gradio as gr
|
|
5 |
|
6 |
from os import getenv
|
7 |
from typing import Any, Dict, Generator, List
|
8 |
-
|
9 |
from huggingface_hub import InferenceClient
|
10 |
from transformers import AutoTokenizer
|
11 |
|
@@ -83,8 +83,24 @@ def generate_hf(prompt: str, history: str, temperature: float = 0.5, max_new_tok
|
|
83 |
formatted_prompt = format_prompt(prompt, "hf")
|
84 |
|
85 |
try:
|
86 |
-
stream = hf_client.text_generation(formatted_prompt, **generate_kwargs,
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
output = ""
|
89 |
for response in stream:
|
90 |
output += response.token.text
|
|
|
5 |
|
6 |
from os import getenv
|
7 |
from typing import Any, Dict, Generator, List
|
8 |
+
from gradio_client import Client
|
9 |
from huggingface_hub import InferenceClient
|
10 |
from transformers import AutoTokenizer
|
11 |
|
|
|
83 |
formatted_prompt = format_prompt(prompt, "hf")
|
84 |
|
85 |
try:
|
86 |
+
# stream = hf_client.text_generation(formatted_prompt, **generate_kwargs,
|
87 |
+
# stream=True, details=True, return_full_text=False)
|
88 |
+
client_Qwen = Client("Qwen/Qwen1.5-110B-Chat-demo")
|
89 |
+
response = client_Qwen.predict(
|
90 |
+
query=prompt,
|
91 |
+
history=[],
|
92 |
+
system="You are a helpful assistant.",
|
93 |
+
api_name="/model_chat"
|
94 |
+
)
|
95 |
+
# Extract the API output text
|
96 |
+
api_output = response[1] if response and len(response) > 1 else "No output received from the API."
|
97 |
+
|
98 |
+
#chatbot_responses.append((txt, api_output[0][1]))
|
99 |
+
print(response)
|
100 |
+
|
101 |
+
# Print the generated code
|
102 |
+
print(api_output[0][1])
|
103 |
+
stream=api_output[0][1]
|
104 |
output = ""
|
105 |
for response in stream:
|
106 |
output += response.token.text
|