KrishP-12 commited on
Commit
28b261d
·
verified ·
1 Parent(s): e47c0cc

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +294 -0
app.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import pytesseract
5
+ from pdf2image import convert_from_path
6
+ from langchain_community.embeddings import HuggingFaceEmbeddings
7
+ from langchain.prompts import PromptTemplate
8
+ from langchain.chains import RetrievalQA
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain_groq import ChatGroq
11
+ from langchain_community.vectorstores import FAISS
12
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
13
+ import base64
14
+ from io import BytesIO
15
+
16
+ # Set up Groq API Key and LLM
17
+ os.environ["GROQ_API_KEY"] = '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'
18
+ llm = ChatGroq(
19
+ model='llama3-70b-8192',
20
+ temperature=0.5,
21
+ max_tokens=None,
22
+ timeout=None,
23
+ max_retries=2
24
+ )
25
+
26
+ # OCR Functions
27
+ def ocr_image(image_path, language='eng+guj'):
28
+ img = Image.open(image_path)
29
+ text = pytesseract.image_to_string(img, lang=language)
30
+ return text
31
+
32
+ def ocr_pdf(pdf_path, language='eng+guj'):
33
+ images = convert_from_path(pdf_path)
34
+ all_text = ""
35
+ for img in images:
36
+ text = pytesseract.image_to_string(img, lang=language)
37
+ all_text += text + "\n"
38
+ return all_text
39
+
40
+ def ocr_file(file_path):
41
+ file_extension = os.path.splitext(file_path)[1].lower()
42
+
43
+ if file_extension == ".pdf":
44
+ text_re = ocr_pdf(file_path, language='guj+eng')
45
+ elif file_extension in [".jpg", ".jpeg", ".png", ".bmp"]:
46
+ text_re = ocr_image(file_path, language='guj+eng')
47
+ else:
48
+ raise ValueError("Unsupported file format. Supported formats are PDF, JPG, JPEG, PNG, BMP.")
49
+
50
+ return text_re
51
+
52
+ def get_text_chunks(text):
53
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
54
+ chunks = text_splitter.split_text(text)
55
+ return chunks
56
+
57
+ def get_vector_store(text_chunks):
58
+ embeddings = HuggingFaceEmbeddings(
59
+ model_name="sentence-transformers/all-MiniLM-L6-v2",
60
+ model_kwargs={'device': 'cpu'},
61
+ encode_kwargs={'normalize_embeddings': True}
62
+ )
63
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
64
+
65
+ os.makedirs("faiss_index", exist_ok=True)
66
+ vector_store.save_local("faiss_index")
67
+
68
+ return vector_store
69
+
70
+ def process_ocr_and_pdf_files(file_paths):
71
+ raw_text = ""
72
+ for file_path in file_paths:
73
+ raw_text += ocr_file(file_path) + "\n"
74
+ text_chunks = get_text_chunks(raw_text)
75
+ return get_vector_store(text_chunks)
76
+
77
+ def get_conversational_chain():
78
+ template = """You are an intelligent educational assistant specialized in handling queries about documents. You have been provided with OCR-processed text from the uploaded files that contains important educational information.
79
+
80
+ Core Responsibilities:
81
+ 1. Language Processing:
82
+ - Identify the language of the user's query (English or Gujarati)
83
+ - Respond in the same language as the query
84
+ - If the query is in Gujarati, ensure the response maintains proper Gujarati grammar and terminology
85
+ - For technical terms, provide both English and Gujarati versions when relevant
86
+
87
+ 2. Document Understanding:
88
+ - Analyze the OCR-processed text from the uploaded files
89
+ - Account for potential OCR errors or misinterpretations
90
+ - Focus on extracting accurate information despite possible OCR imperfections
91
+
92
+ 3. Response Guidelines:
93
+ - Provide direct, clear answers based solely on the document content
94
+ - If information is unclear due to OCR quality, mention this limitation
95
+ - For numerical data (dates, percentages, marks), double-check accuracy before responding
96
+ - If information is not found in the documents, clearly state: "This information is not present in the uploaded documents"
97
+
98
+ 4. Educational Context:
99
+ - Maintain focus on educational queries related to the document content
100
+ - For admission-related queries, emphasize important deadlines and requirements
101
+ - For scholarship information, highlight eligibility criteria and application processes
102
+ - For course-related queries, provide detailed, accurate information from the documents
103
+
104
+ 5. Response Format:
105
+ - Structure responses clearly with relevant subpoints when necessary
106
+ - For complex information, break down the answer into digestible parts
107
+ - Include relevant reference points from the documents when applicable
108
+ - Format numerical data and dates clearly
109
+
110
+ 6. Quality Control:
111
+ - Verify that responses align with the document content
112
+ - Don't make assumptions beyond the provided information
113
+ - If multiple interpretations are possible due to OCR quality, mention all possibilities
114
+ - Maintain consistency in terminology throughout the conversation
115
+
116
+ Important Rules:
117
+ - Never make up information not present in the documents
118
+ - Don't combine information from previous conversations or external knowledge
119
+ - Always indicate if certain parts of the documents are unclear due to OCR quality
120
+ - Maintain professional tone while being accessible to students and parents
121
+ - If the query is out of scope of the uploaded documents, politely redirect to relevant official sources
122
+
123
+ Context from uploaded documents:
124
+ {context}
125
+
126
+ Chat History:
127
+ {history}
128
+
129
+ Current Question: {question}
130
+ Assistant: Let me provide a clear and accurate response based on the uploaded documents...
131
+ """
132
+ embeddings = HuggingFaceEmbeddings(
133
+ model_name="sentence-transformers/paraphrase-MiniLM-L6-v2",
134
+ model_kwargs={'device': 'cpu'},
135
+ encode_kwargs={'normalize_embeddings': True}
136
+ )
137
+
138
+ new_vector_store = FAISS.load_local(
139
+ "faiss_index", embeddings, allow_dangerous_deserialization=True
140
+ )
141
+
142
+ QA_CHAIN_PROMPT = PromptTemplate(
143
+ input_variables=["history", "context", "question"],
144
+ template=template
145
+ )
146
+
147
+ qa_chain = RetrievalQA.from_chain_type(
148
+ llm,
149
+ retriever=new_vector_store.as_retriever(),
150
+ chain_type='stuff',
151
+ verbose=True,
152
+ chain_type_kwargs={
153
+ "verbose": True,
154
+ "prompt": QA_CHAIN_PROMPT,
155
+ "memory": ConversationBufferMemory(memory_key="history", input_key="question"),
156
+ }
157
+ )
158
+
159
+ return qa_chain
160
+ def process_files_and_query(files, query):
161
+ if len(files) > 5:
162
+ return "Error: You can upload a maximum of 5 files only."
163
+
164
+ # Ensure temp directory exists
165
+ os.makedirs("temp", exist_ok=True)
166
+
167
+ # Save uploaded files
168
+ file_paths = []
169
+ for file in files:
170
+ file_path = os.path.join("temp", os.path.basename(file))
171
+ with open(file_path, "wb") as f:
172
+ f.write(open(file, 'rb').read())
173
+ file_paths.append(file_path)
174
+
175
+ # Process files and create vector store
176
+ process_ocr_and_pdf_files(file_paths)
177
+
178
+ # Perform query
179
+ embeddings = HuggingFaceEmbeddings(
180
+ model_name="sentence-transformers/all-MiniLM-L6-v2",
181
+ model_kwargs={'device': 'cpu'},
182
+ encode_kwargs={'normalize_embeddings': True}
183
+ )
184
+
185
+ new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
186
+ docs = new_db.similarity_search(query)
187
+
188
+ chain = get_conversational_chain()
189
+ response = chain({"input_documents": docs, "query": query}, return_only_outputs=True)
190
+ result = response.get("result", "No result found")
191
+
192
+ return result
193
+ def handle_uploaded_file(uploaded_files, show_in_sidebar=False):
194
+ sidebar_content = ""
195
+
196
+ if len(uploaded_files) > 5:
197
+ return "Error: You can upload a maximum of 5 files only."
198
+
199
+ # If the uploaded_files is a list, process each file
200
+ for uploaded_file in uploaded_files:
201
+ # Determine the file extension
202
+ file_extension = os.path.splitext(uploaded_file.name)[1].lower()
203
+ file_path = os.path.join("temp", uploaded_file.name)
204
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
205
+
206
+ # Check if the uploaded file is in 'NamedString' format (Gradio sometimes returns it this way)
207
+ if isinstance(uploaded_file, gr.File):
208
+ # In this case, read the file directly from the 'data' attribute
209
+ file_data = uploaded_file.read() # This is the file content in bytes
210
+
211
+ # Save the file content to a local file
212
+ with open(file_path, "wb") as f:
213
+ f.write(file_data)
214
+
215
+ if file_extension == ".pdf":
216
+ # Read and encode the PDF as base64 to embed in the sidebar
217
+ with open(file_path, "rb") as pdf_file:
218
+ pdf_data = pdf_file.read()
219
+ pdf_base64 = base64.b64encode(pdf_data).decode('utf-8')
220
+ sidebar_content += f'<iframe src="data:application/pdf;base64,{pdf_base64}" width="500" height="500"></iframe>'
221
+
222
+ elif file_extension in ['.jpg', '.jpeg', '.png', '.bmp']:
223
+ # Display image in the sidebar
224
+ img = Image.open(file_path)
225
+ img_byte_array = BytesIO()
226
+ img.save(img_byte_array, format="PNG")
227
+ img_byte_array.seek(0)
228
+ sidebar_content += f'<img src="data:image/png;base64,{base64.b64encode(img_byte_array.getvalue()).decode()}" width="400" height="400"/>'
229
+
230
+ else:
231
+ # For text files, show the file content
232
+ with open(file_path, 'r', encoding='utf-8') as f:
233
+ content = f.read()
234
+ sidebar_content += f"<pre>{content}</pre>"
235
+
236
+ return sidebar_content
237
+
238
+ # Gradio interface setup
239
+ def upload_and_display(files):
240
+
241
+ if len(files) > 5:
242
+ return "Error: You can upload a maximum of 5 files only."
243
+
244
+ sidebar_content = handle_uploaded_file(files, show_in_sidebar=True)
245
+ return sidebar_content
246
+
247
+ def launch_gradio_app():
248
+ with gr.Blocks() as demo:
249
+ gr.Markdown("# Document OCR and Q&A Assistant")
250
+
251
+ with gr.Row():
252
+ with gr.Column(scale=1): # Main content area (adjusted scale to an integer)
253
+ file_input = gr.File(
254
+ file_count="multiple",
255
+ type="filepath", # Changed from 'filepath' to 'file'
256
+ file_types=[".pdf", ".jpg", ".jpeg", ".png", ".bmp"],
257
+ label="Upload Documents (PDF/Images)"
258
+ )
259
+
260
+ query_input = gr.Textbox(
261
+ label="Ask a Question about the Documents",
262
+ lines=3
263
+ )
264
+
265
+ submit_btn = gr.Button("Process and Query")
266
+
267
+ output = gr.Textbox(label="Answer", lines=5)
268
+
269
+ submit_btn.click(
270
+ fn=process_files_and_query,
271
+ inputs=[file_input, query_input],
272
+ outputs=[output]
273
+ )
274
+
275
+ with gr.Column(scale=1): # Sidebar (adjusted scale to an integer)
276
+ gr.Markdown("## Sidebar")
277
+ file_preview = gr.HTML(label="File Preview") # Display the preview content here
278
+ file_input.change(fn=upload_and_display, inputs=file_input, outputs=file_preview)
279
+
280
+ return demo
281
+
282
+ # Launch the Gradio app
283
+ if __name__ == "__main__":
284
+ app = launch_gradio_app()
285
+ app.launch(share=True) # Set share=True to create a public link
286
+
287
+
288
+
289
+ # # Launch the Gradio app
290
+ # if __name__ == "__main__":
291
+ # app = launch_gradio_app()
292
+ # # app.launch()
293
+ # app.launch(share=True)
294
+ # demo.launch()