fikriazain commited on
Commit
0282d73
·
1 Parent(s): cfa4af6
Files changed (2) hide show
  1. server.py +8 -3
  2. tools/tools_llm.py +20 -5
server.py CHANGED
@@ -3,7 +3,7 @@ from bot import llm
3
  from flask import Flask, request, jsonify
4
  # from langchain_core.messages import HumanMessage, AIMessage
5
  # from langchain.prompts import ChatPromptTemplate
6
- # from deep_translator import GoogleTranslator
7
 
8
 
9
  app = Flask(__name__)
@@ -55,12 +55,17 @@ def query_llm():
55
  data = request.json
56
  user_input = data.get('input')
57
  username = data.get('username')
 
 
58
 
59
  if not user_input:
60
  return jsonify({"error": "No input provided"}), 400
61
 
62
- response = simulate_llm_query(user_input, username)
63
- return jsonify({"response": response})
 
 
 
64
 
65
  if __name__ == '__main__':
66
  app.run(host='0.0.0.0', port=7860)
 
3
  from flask import Flask, request, jsonify
4
  # from langchain_core.messages import HumanMessage, AIMessage
5
  # from langchain.prompts import ChatPromptTemplate
6
+ from deep_translator import GoogleTranslator
7
 
8
 
9
  app = Flask(__name__)
 
55
  data = request.json
56
  user_input = data.get('input')
57
  username = data.get('username')
58
+
59
+ input_translate = GoogleTranslator(source='id', target='en').translate(user_input)
60
 
61
  if not user_input:
62
  return jsonify({"error": "No input provided"}), 400
63
 
64
+ response = simulate_llm_query(input_translate, username)
65
+
66
+ output_translate = GoogleTranslator(source='en', target='id').translate(response)
67
+
68
+ return jsonify({"response": output_translate})
69
 
70
  if __name__ == '__main__':
71
  app.run(host='0.0.0.0', port=7860)
tools/tools_llm.py CHANGED
@@ -6,10 +6,10 @@ import json
6
  # from tools import db, reranker_model, model_llm_rag
7
  # from langchain.retrievers import ContextualCompressionRetriever
8
  # from langchain.retrievers.document_compressors import CrossEncoderReranker
9
- # from deep_translator import GoogleTranslator
10
  # from langchain.prompts import ChatPromptTemplate
11
 
12
- PROMPT_TEMPLATE = PROMPT_TEMPLATE = """### Instruction:
13
  Your job is to answer the question based on the given pieces of information. All you have to do is answer the question. Not all of the information provided may be relevant to the question. the answer you create must be logical. Each piece of information will be separated by '---'.
14
 
15
  ### Example:
@@ -83,12 +83,27 @@ def send_emergency_message_to_medic(query: str) -> str:
83
 
84
  @tool
85
  def search_information_for_question(query: str) -> str:
86
- """Function that searches for information based on the user query. You must use this function if there are questions related to medical topics.
87
- # The query is the message that the patient send to Panda, YOU MUST NOT CHANGE IT."""
 
88
  # compressor = CrossEncoderReranker(model=reranker_model, top_n=3)
89
  # compression_retriever = ContextualCompressionRetriever(
90
  # base_compressor=compressor, base_retriever=db
91
  # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  # query_translate = GoogleTranslator(source='english', target='id').translate(query)
94
  # results = compression_retriever.invoke(query)
@@ -99,7 +114,7 @@ def search_information_for_question(query: str) -> str:
99
  # print(target)
100
  # result = model_llm_rag.invoke(prompt)
101
  # return GoogleTranslator(source='id', target='english').translate(result)
102
- return GoogleSerperAPIWrapper().run(query)
103
 
104
  # @tool
105
  # def search_medic_info(query: str) -> str:
 
6
  # from tools import db, reranker_model, model_llm_rag
7
  # from langchain.retrievers import ContextualCompressionRetriever
8
  # from langchain.retrievers.document_compressors import CrossEncoderReranker
9
+ from deep_translator import GoogleTranslator
10
  # from langchain.prompts import ChatPromptTemplate
11
 
12
+ PROMPT_TEMPLATE = """### Instruction:
13
  Your job is to answer the question based on the given pieces of information. All you have to do is answer the question. Not all of the information provided may be relevant to the question. the answer you create must be logical. Each piece of information will be separated by '---'.
14
 
15
  ### Example:
 
83
 
84
  @tool
85
  def search_information_for_question(query: str) -> str:
86
+ """Function that searches for information based on the user query. You must use this function if a patient asking you a question related to the hemodialysis or medical topics.
87
+ You will get a list of 3 information that you can use to answer the patient's question. IF the information is not enough or not relevant, you must tell the patient that you don't have enough information to answer the question.
88
+ Please make sure the information is relevant to the user query."""
89
  # compressor = CrossEncoderReranker(model=reranker_model, top_n=3)
90
  # compression_retriever = ContextualCompressionRetriever(
91
  # base_compressor=compressor, base_retriever=db
92
  # )
93
+
94
+ url = f"https://adhin-hemovdb.hf.space/search?query={query}"
95
+
96
+ response = requests.get(url)
97
+
98
+ data = response.json()
99
+
100
+ #Translate the data from Indonesian to English. Data is a list of 3 information
101
+ translated_data = []
102
+ for i in range(3):
103
+ translated_data.append(GoogleTranslator(source='id', target='en').translate(data[i]))
104
+
105
+ return translated_data
106
+
107
 
108
  # query_translate = GoogleTranslator(source='english', target='id').translate(query)
109
  # results = compression_retriever.invoke(query)
 
114
  # print(target)
115
  # result = model_llm_rag.invoke(prompt)
116
  # return GoogleTranslator(source='id', target='english').translate(result)
117
+ # return GoogleSerperAPIWrapper().run(query)
118
 
119
  # @tool
120
  # def search_medic_info(query: str) -> str: