Update backend/query_llm.py
Browse files- backend/query_llm.py +5 -5
backend/query_llm.py
CHANGED
@@ -18,10 +18,10 @@ repetition_penalty = 1.2
|
|
18 |
OPENAI_KEY = getenv("OPENAI_API_KEY")
|
19 |
HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
|
20 |
|
21 |
-
hf_client = InferenceClient(
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
|
26 |
|
27 |
# hf_client = InferenceClient(
|
@@ -87,7 +87,7 @@ def generate_hf(prompt: str, history: str, temperature: float = 0.5, max_new_tok
|
|
87 |
# stream=True, details=True, return_full_text=False)
|
88 |
client_Qwen = Client("Qwen/Qwen1.5-110B-Chat-demo")
|
89 |
response = client_Qwen.predict(
|
90 |
-
query=
|
91 |
history=[],
|
92 |
system="You are a helpful assistant.",
|
93 |
api_name="/model_chat"
|
|
|
18 |
OPENAI_KEY = getenv("OPENAI_API_KEY")
|
19 |
HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
|
20 |
|
21 |
+
# hf_client = InferenceClient(
|
22 |
+
# "mistralai/Mistral-7B-Instruct-v0.1",
|
23 |
+
# token=HF_TOKEN
|
24 |
+
# )
|
25 |
|
26 |
|
27 |
# hf_client = InferenceClient(
|
|
|
87 |
# stream=True, details=True, return_full_text=False)
|
88 |
client_Qwen = Client("Qwen/Qwen1.5-110B-Chat-demo")
|
89 |
response = client_Qwen.predict(
|
90 |
+
query=prompt,
|
91 |
history=[],
|
92 |
system="You are a helpful assistant.",
|
93 |
api_name="/model_chat"
|