import streamlit as st import os import os.path from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, ServiceContext, StorageContext from llama_index.readers.file import PDFReader from llama_index.llms.openai import OpenAI from dotenv import load_dotenv from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.agent.openai import OpenAIAgent load_dotenv() storage_path = "./vectorstore" documents_path = "./documents" llm = OpenAI(model="gpt-3.5-turbo") service_context = ServiceContext.from_defaults(llm=llm) @st.cache_resource(show_spinner=False) def initialize(): if not os.path.exists(storage_path): documents = SimpleDirectoryReader(documents_path).load_data() index = VectorStoreIndex.from_documents(documents) index.storage_context.persist(persist_dir=storage_path) else: storage_context = StorageContext.from_defaults(persist_dir=storage_path) index = load_index_from_storage(storage_context) return index index = initialize() fitness_engine = index.as_query_engine(similarity_top_k=3,similarity_cutoff=0.5) tools = [ QueryEngineTool( query_engine=fitness_engine, metadata=ToolMetadata( name="fitnessdata", description="this gives information about diets , workoutplan and mental health", ), )] agent = OpenAIAgent.from_tools(tools, verbose=True) # # Retrieve input (e.g., from user input) # input_text = "what are your views on processed food?" # # Query the index # results = agent.query(input_text) # # Process results (e.g., select the most relevant result) # if results: # response = results # else: # response = "Sorry, I couldn't find a relevant response." # # Display response # print("Response:", response) st.title("Fitness App") input_text = st.text_input("Enter your question here:") if st.button("Ask"): if input_text: # Query the index results = agent.query(input_text) # Process results if results: response = results.response else: response = "Sorry, I couldn't find a relevant response." # Display response st.write("Response:", response)