Spaces:
Sleeping
Sleeping
## Conversational Q&A Chatbot | |
import streamlit as st | |
from langchain.schema import HumanMessage, SystemMessage, AIMessage | |
# from openai import AzureChatOpenAI | |
from langchain_openai import AzureChatOpenAI | |
import os | |
# import dotenv | |
# dotenv.load_dotenv() | |
AZURE_OPENAI_KEY = "7a8f58dd922e4c78b1de2b660ebe61d6" | |
AZURE_OPENAI_ENDPOINT = "https://mlsdaiinstance.openai.azure.com/" | |
AZURE_OPENAI_VERSION = "2024-05-01-preview" | |
EMBEDDING_MODEL = "text-embedding-ada-002" | |
CHAT_MODEL = "gpt-35-turbo" | |
# Initialize the Azure OpenAI client | |
llm = AzureChatOpenAI( | |
# azure_endpoint="https://azureopenai16.openai.azure.com/", | |
# api_key="75db73a3b9da40b0b6e0e98273a6029f", | |
# api_version="2024-05-01-preview", | |
# deployment_name="gpt-35-turbo", | |
# temperature=0.5 | |
openai_api_type="azure", | |
openai_api_version=AZURE_OPENAI_VERSION, | |
openai_api_key=AZURE_OPENAI_KEY, | |
azure_endpoint=AZURE_OPENAI_ENDPOINT, | |
deployment_name=CHAT_MODEL, | |
temperature=0 | |
) | |
# ''' | |
## Streamlit UI | |
st.set_page_config(page_title="Conversational Q&A Chatbot") | |
st.header("Hey, Let's Chat") | |
if 'flow_messages' not in st.session_state: | |
st.session_state['flow_messages'] = [ | |
SystemMessage(content="You are an AI assitant who answers the questions asked truthfully!") | |
] | |
## Function to load OpenAI model and get respones | |
def get_chatmodel_response(question): | |
st.session_state['flow_messages'].append(HumanMessage(content=question)) | |
response = llm(st.session_state['flow_messages']) | |
st.session_state['flow_messages'].append(AIMessage(content=response.content)) | |
return response.content | |
input = st.text_input("Input: ", key="input") | |
response = get_chatmodel_response(input) | |
submit = st.button("Ask the question") | |
## If ask button is clicked | |
if submit: | |
st.subheader("The Response is") | |
st.write(response) | |