Upload 5 files
Browse files- .env +6 -0
- Dockerfile +17 -0
- Web-Chatbot-Speech-En-app.py +70 -0
- requirements.txt +7 -0
- templates/index.html +154 -0
.env
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
AI_SERVICE_ENDPOINT=https://iti109-sectionb.cognitiveservices.azure.com/
|
2 |
+
AI_SERVICE_KEY=2ou0CMAjUutj0D4In8U8AkxEIXtCrvYFOBMhqSW4rZ7x6yZ033GdJQQJ99ALACqBBLyXJ3w3AAAaACOGtVJj
|
3 |
+
QA_PROJECT_NAME=ITI109-SectionB-FAQ
|
4 |
+
QA_DEPLOYMENT_NAME=production
|
5 |
+
SPEECH_KEY=BHMIZNZ8xH7JQHXaGAoaOlwdx3bjxvhyuLxpHumSiRPXxpo1Rpb5JQQJ99BAACqBBLyXJ3w3AAAYACOGvOBV
|
6 |
+
SPEECH_REGION=southeastasia
|
Dockerfile
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use an official Python runtime as the base image
|
2 |
+
FROM python:3.9
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Copy all project files to the container
|
8 |
+
COPY . /app
|
9 |
+
|
10 |
+
# Install dependencies
|
11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
12 |
+
|
13 |
+
# Expose the port Flask will run on
|
14 |
+
EXPOSE 7860
|
15 |
+
|
16 |
+
# Command to start the application
|
17 |
+
CMD ["gunicorn", "--bind", "0.0.0.0:7860", "Web-Chatbot-Speech-En-app:app"]
|
Web-Chatbot-Speech-En-app.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, render_template, request, jsonify, send_file
|
2 |
+
import requests
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
import os
|
5 |
+
|
6 |
+
# import namespaces
|
7 |
+
from azure.core.credentials import AzureKeyCredential
|
8 |
+
from azure.ai.language.questionanswering import QuestionAnsweringClient
|
9 |
+
from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, AudioConfig
|
10 |
+
from azure.cognitiveservices.speech.audio import AudioOutputConfig
|
11 |
+
|
12 |
+
# Create a Flask app
|
13 |
+
app = Flask(__name__)
|
14 |
+
|
15 |
+
# Azure Bot Service configuration
|
16 |
+
AZURE_BOT_ENDPOINT = "https://iti109-sectionb.cognitiveservices.azure.com/"
|
17 |
+
AZURE_BOT_KEY = "2ou0CMAjUutj0D4In8U8AkxEIXtCrvYFOBMhqSW4rZ7x6yZ033GdJQQJ99ALACqBBLyXJ3w3AAAaACOGtVJj"
|
18 |
+
|
19 |
+
# Get Configuration Settings
|
20 |
+
load_dotenv()
|
21 |
+
ai_endpoint = os.getenv('AI_SERVICE_ENDPOINT')
|
22 |
+
ai_key = os.getenv('AI_SERVICE_KEY')
|
23 |
+
ai_project_name = os.getenv('QA_PROJECT_NAME')
|
24 |
+
ai_deployment_name = os.getenv('QA_DEPLOYMENT_NAME')
|
25 |
+
speech_key = os.getenv('SPEECH_KEY')
|
26 |
+
speech_region = os.getenv('SPEECH_REGION')
|
27 |
+
|
28 |
+
# Create client using endpoint and key
|
29 |
+
credential = AzureKeyCredential(ai_key)
|
30 |
+
ai_client = QuestionAnsweringClient(endpoint=ai_endpoint, credential=credential)
|
31 |
+
|
32 |
+
# Web Interface
|
33 |
+
@app.route('/')
|
34 |
+
def home():
|
35 |
+
return render_template('index.html') # HTML file for the web interface
|
36 |
+
|
37 |
+
@app.route('/ask', methods=['POST'])
|
38 |
+
def ask_bot():
|
39 |
+
user_question = request.json.get("question", "") # Get the question from the request
|
40 |
+
|
41 |
+
if not user_question:
|
42 |
+
return jsonify({"error": "No question provided"}), 400 # Return an error if no question is provided
|
43 |
+
|
44 |
+
try:
|
45 |
+
# Get the answer from the bot
|
46 |
+
response = ai_client.get_answers(question=user_question,
|
47 |
+
project_name=ai_project_name,
|
48 |
+
deployment_name=ai_deployment_name)
|
49 |
+
|
50 |
+
# Get the answer from the response
|
51 |
+
bot_response = response.answers[0].answer if response.answers else "No response from bot"
|
52 |
+
|
53 |
+
# Text-to-Speech
|
54 |
+
speech_config = SpeechConfig(subscription=speech_key, region=speech_region) # Create a speech config
|
55 |
+
audio_config = AudioConfig(filename="response.wav") # Save the audio to a file
|
56 |
+
synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config) # Create a synthesizer
|
57 |
+
synthesizer.speak_text(bot_response)
|
58 |
+
|
59 |
+
# Return the answer from the bot
|
60 |
+
return jsonify({"answer": bot_response, "audio": "/response.wav"})
|
61 |
+
except requests.exceptions.RequestException as e:
|
62 |
+
return jsonify({"error": str(e)}), 500
|
63 |
+
|
64 |
+
# Return the audio file
|
65 |
+
@app.route('/response.wav')
|
66 |
+
def get_audio():
|
67 |
+
return send_file("response.wav", mimetype="audio/wav")
|
68 |
+
|
69 |
+
if __name__ == '__main__':
|
70 |
+
app.run(debug=True)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Flask
|
2 |
+
requests
|
3 |
+
azure-ai-language-questionanswering
|
4 |
+
azure-cognitiveservices-speech
|
5 |
+
azure-core
|
6 |
+
python-dotenv
|
7 |
+
gunicorn
|
templates/index.html
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Asian Food Chatbot</title>
|
7 |
+
<style>
|
8 |
+
body {
|
9 |
+
font-family: Arial, sans-serif;
|
10 |
+
background-color: #f4f4f9;
|
11 |
+
margin: 0;
|
12 |
+
padding: 0;
|
13 |
+
display: flex;
|
14 |
+
justify-content: center;
|
15 |
+
align-items: center;
|
16 |
+
height: 100vh;
|
17 |
+
}
|
18 |
+
.chat-container {
|
19 |
+
width: 500px;
|
20 |
+
background-color: #fff;
|
21 |
+
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
|
22 |
+
border-radius: 8px;
|
23 |
+
overflow: hidden;
|
24 |
+
display: flex;
|
25 |
+
flex-direction: column;
|
26 |
+
}
|
27 |
+
.chat-header {
|
28 |
+
background-color: #007bff;
|
29 |
+
color: #fff;
|
30 |
+
padding: 10px;
|
31 |
+
text-align: center;
|
32 |
+
font-size: 1.2em;
|
33 |
+
}
|
34 |
+
.chat-messages {
|
35 |
+
flex: 1;
|
36 |
+
padding: 10px;
|
37 |
+
overflow-y: auto;
|
38 |
+
}
|
39 |
+
.chat-input {
|
40 |
+
display: flex;
|
41 |
+
border-top: 1px solid #ddd;
|
42 |
+
}
|
43 |
+
.chat-input input {
|
44 |
+
flex: 1;
|
45 |
+
padding: 10px;
|
46 |
+
border: none;
|
47 |
+
border-radius: 0;
|
48 |
+
outline: none;
|
49 |
+
}
|
50 |
+
.chat-input button {
|
51 |
+
padding: 10px;
|
52 |
+
background-color: #007bff;
|
53 |
+
color: #fff;
|
54 |
+
border: none;
|
55 |
+
cursor: pointer;
|
56 |
+
}
|
57 |
+
.chat-input button:hover {
|
58 |
+
background-color: #0056b3;
|
59 |
+
}
|
60 |
+
.message {
|
61 |
+
margin-bottom: 10px;
|
62 |
+
}
|
63 |
+
.message.user {
|
64 |
+
text-align: right;
|
65 |
+
}
|
66 |
+
.message.bot {
|
67 |
+
text-align: left;
|
68 |
+
}
|
69 |
+
.message .text {
|
70 |
+
display: inline-block;
|
71 |
+
padding: 10px;
|
72 |
+
border-radius: 8px;
|
73 |
+
max-width: 70%;
|
74 |
+
}
|
75 |
+
.message.user .text {
|
76 |
+
background-color: #007bff;
|
77 |
+
color: #fff;
|
78 |
+
}
|
79 |
+
.message.bot .text {
|
80 |
+
background-color: #f1f1f1;
|
81 |
+
color: #333;
|
82 |
+
}
|
83 |
+
</style>
|
84 |
+
<script src="https://cdn.jsdelivr.net/npm/axios/dist/axios.min.js"></script>
|
85 |
+
<script src="https://aka.ms/csspeech/jsbrowserpackageraw"></script>
|
86 |
+
</head>
|
87 |
+
<body>
|
88 |
+
<div class="chat-container">
|
89 |
+
<div class="chat-header">Asian Food Chatbot - English only</div>
|
90 |
+
<div class="chat-messages" id="chat-messages"></div>
|
91 |
+
<div class="chat-input">
|
92 |
+
<input type="text" id="question" placeholder="Type a message...">
|
93 |
+
<button onclick="askQuestion()">Ask</button>
|
94 |
+
<button onclick="startListening()" style="background-color:green">🎤 Speak</button>
|
95 |
+
</div>
|
96 |
+
</div>
|
97 |
+
|
98 |
+
<audio id="audio" controls style="display:none;"></audio>
|
99 |
+
|
100 |
+
<script>
|
101 |
+
const speechKey = 'BHMIZNZ8xH7JQHXaGAoaOlwdx3bjxvhyuLxpHumSiRPXxpo1Rpb5JQQJ99BAACqBBLyXJ3w3AAAYACOGvOBV';
|
102 |
+
const speechRegion = 'southeastasia';
|
103 |
+
|
104 |
+
function askQuestion() {
|
105 |
+
const question = document.getElementById('question').value;
|
106 |
+
if (!question) return;
|
107 |
+
|
108 |
+
axios.post('/ask', { question })
|
109 |
+
.then(response => {
|
110 |
+
console.log("Response:", response.data); // Debugging log
|
111 |
+
|
112 |
+
const chatMessages = document.getElementById('chat-messages');
|
113 |
+
const userMessage = document.createElement('div');
|
114 |
+
userMessage.textContent = `You: ${question}`;
|
115 |
+
chatMessages.appendChild(userMessage);
|
116 |
+
|
117 |
+
const botMessage = document.createElement('div');
|
118 |
+
botMessage.textContent = `Bot: ${response.data.answer}`;
|
119 |
+
botMessage.style.marginBottom = "10px"; // Add margin-bottom for spacing
|
120 |
+
chatMessages.appendChild(botMessage);
|
121 |
+
|
122 |
+
const audioElement = document.getElementById('audio');
|
123 |
+
audioElement.src = response.data.audio + '?t=' + new Date().getTime();
|
124 |
+
audioElement.style.display = 'block';
|
125 |
+
audioElement.play();
|
126 |
+
|
127 |
+
document.getElementById('question').value = '';
|
128 |
+
})
|
129 |
+
.catch(error => {
|
130 |
+
console.error("Error:", error); // Debugging log
|
131 |
+
});
|
132 |
+
}
|
133 |
+
|
134 |
+
function startListening() {
|
135 |
+
const speechConfig = SpeechSDK.SpeechConfig.fromSubscription(speechKey, speechRegion);
|
136 |
+
const audioConfig = SpeechSDK.AudioConfig.fromDefaultMicrophoneInput();
|
137 |
+
const recognizer = new SpeechSDK.SpeechRecognizer(speechConfig, audioConfig);
|
138 |
+
|
139 |
+
recognizer.recognizeOnceAsync(result => {
|
140 |
+
if (result.reason === SpeechSDK.ResultReason.RecognizedSpeech) {
|
141 |
+
console.log("Recognized Text:", result.text); // Debugging log
|
142 |
+
document.getElementById('question').value = result.text;
|
143 |
+
askQuestion();
|
144 |
+
} else {
|
145 |
+
console.error("Speech Recognition Error:", result.errorDetails); // Debugging log
|
146 |
+
}
|
147 |
+
}, err => {
|
148 |
+
console.error("Error recognizing speech:", err); // Debugging log
|
149 |
+
});
|
150 |
+
}
|
151 |
+
</script>
|
152 |
+
|
153 |
+
</body>
|
154 |
+
</html>
|