Update app.py
Browse files
app.py
CHANGED
@@ -1,166 +1,99 @@
|
|
1 |
import streamlit as st
|
2 |
-
|
3 |
-
import matplotlib.pyplot as plt
|
4 |
-
from datetime import date
|
5 |
-
from transformers import pipeline, set_seed
|
6 |
from gtts import gTTS
|
7 |
import tempfile
|
8 |
-
import
|
|
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
|
13 |
-
# Initialize
|
14 |
-
|
15 |
|
16 |
-
# Function for
|
17 |
-
def
|
18 |
try:
|
19 |
-
response
|
|
|
20 |
return response[0]['generated_text']
|
21 |
except Exception as e:
|
22 |
-
return f"Error
|
23 |
|
24 |
-
# Function for text-to-speech
|
25 |
-
def text_to_speech(text):
|
26 |
try:
|
27 |
-
tts = gTTS(text=text, lang=
|
|
|
28 |
temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
29 |
tts.save(temp_audio.name)
|
30 |
return temp_audio.name
|
31 |
except Exception as e:
|
32 |
return f"Error generating speech: {e}"
|
33 |
|
34 |
-
# MedBot chatbot function
|
35 |
-
def med_chatbot(user_input):
|
36 |
-
llm_response = query_huggingface(user_input)
|
37 |
-
if "Error" in llm_response:
|
38 |
-
return llm_response, None
|
39 |
-
audio_path = text_to_speech(llm_response)
|
40 |
-
return llm_response, audio_path
|
41 |
-
|
42 |
-
# Function to fetch the latest healthcare/therapist news
|
43 |
-
def get_healthcare_news():
|
44 |
-
url = f'https://newsapi.org/v2/everything?q=healthcare OR therapy&apiKey={news_api_key}'
|
45 |
-
try:
|
46 |
-
response = requests.get(url)
|
47 |
-
news_data = response.json()
|
48 |
-
articles = news_data.get('articles', [])
|
49 |
-
news_list = []
|
50 |
-
for article in articles[:5]: # Get the top 5 articles
|
51 |
-
title = article.get('title', 'No Title')
|
52 |
-
description = article.get('description', 'No Description')
|
53 |
-
url = article.get('url', '')
|
54 |
-
news_list.append(f"**{title}**\n{description}\n[Read more]({url})\n\n")
|
55 |
-
return news_list
|
56 |
-
except Exception as e:
|
57 |
-
return [f"Error fetching news: {e}"]
|
58 |
-
|
59 |
# Streamlit Page Configuration
|
60 |
-
st.set_page_config(page_title="
|
61 |
-
|
62 |
-
# Sidebar
|
63 |
-
menu = st.sidebar.radio("Navigation", ["
|
64 |
-
|
65 |
-
#
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
st.audio(audio_path, format="audio/mp3")
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
st.header("Welcome to the Therapist Dashboard")
|
90 |
-
st.write("This application provides tools to enhance your therapy practice.")
|
91 |
-
|
92 |
-
elif dashboard_menu == "Session Tracker":
|
93 |
-
st.header("Session Tracker")
|
94 |
-
session_date = st.date_input("Session Date", value=date.today())
|
95 |
-
session_time = st.time_input("Session Time")
|
96 |
-
patient_name = st.text_input("Patient Name")
|
97 |
-
session_notes = st.text_area("Session Notes")
|
98 |
-
|
99 |
-
if st.button("Save Session"):
|
100 |
-
st.success(f"Session for {patient_name} on {session_date} at {session_time} saved successfully!")
|
101 |
-
|
102 |
-
st.write("### Recent Sessions")
|
103 |
-
sessions_data = pd.DataFrame({
|
104 |
-
"Date": ["2024-12-20", "2024-12-21"],
|
105 |
-
"Time": ["10:00 AM", "3:00 PM"],
|
106 |
-
"Patient": ["John Doe", "Jane Smith"],
|
107 |
-
"Notes": ["Discussed anxiety triggers.", "CBT exercise on thought patterns."]
|
108 |
-
})
|
109 |
-
st.table(sessions_data)
|
110 |
-
|
111 |
-
elif dashboard_menu == "Mood Tracker":
|
112 |
-
st.header("Mood Tracker")
|
113 |
-
mood_date = st.date_input("Date", value=date.today(), key="mood_date")
|
114 |
-
mood_level = st.slider("Mood Level (1-10)", 1, 10, 5)
|
115 |
-
mood_notes = st.text_area("Notes", key="mood_notes")
|
116 |
-
|
117 |
-
if st.button("Save Mood Entry"):
|
118 |
-
st.success("Mood entry saved!")
|
119 |
-
|
120 |
-
st.write("### Mood Trends")
|
121 |
-
mood_data = pd.DataFrame({
|
122 |
-
"Date": pd.date_range(start="2024-12-15", periods=7),
|
123 |
-
"Mood Level": [5, 6, 7, 5, 8, 9, 7]
|
124 |
-
})
|
125 |
-
fig, ax = plt.subplots()
|
126 |
-
ax.plot(mood_data["Date"], mood_data["Mood Level"], marker="o")
|
127 |
-
ax.set_title("Mood Trends")
|
128 |
-
ax.set_xlabel("Date")
|
129 |
-
ax.set_ylabel("Mood Level")
|
130 |
-
st.pyplot(fig)
|
131 |
-
|
132 |
-
elif dashboard_menu == "Therapy Tools":
|
133 |
-
st.header("Therapy Tools")
|
134 |
-
tools = st.selectbox("Select a Tool", ["CBT Worksheet", "Guided Relaxation", "Stress Management Tips"])
|
135 |
-
|
136 |
-
if tools == "CBT Worksheet":
|
137 |
-
st.text_area("Identify Negative Thoughts")
|
138 |
-
st.text_area("Challenge Negative Thoughts")
|
139 |
-
st.text_area("Replace with Positive Thoughts")
|
140 |
-
|
141 |
-
elif tools == "Guided Relaxation":
|
142 |
-
st.audio("https://example.com/guided_relaxation.mp3") # Replace with actual file URL
|
143 |
-
|
144 |
-
elif tools == "Stress Management Tips":
|
145 |
-
st.write("- Practice deep breathing exercises.\n- Maintain a regular sleep schedule.\n- Engage in physical activity.")
|
146 |
-
|
147 |
-
elif dashboard_menu == "Patient Analytics":
|
148 |
-
st.header("Patient Analytics")
|
149 |
-
analytics_data = pd.DataFrame({
|
150 |
-
"Patient": ["John Doe", "Jane Smith"],
|
151 |
-
"Sessions Conducted": [10, 8],
|
152 |
-
"Average Mood": [7.2, 8.1]
|
153 |
-
})
|
154 |
-
st.bar_chart(analytics_data.set_index("Patient"))
|
155 |
-
|
156 |
-
if st.button("Generate Report"):
|
157 |
-
st.success("Report generated successfully!")
|
158 |
-
|
159 |
-
# Healthcare News Section
|
160 |
-
elif menu == "Healthcare News":
|
161 |
-
st.title("Latest Healthcare & Therapist News")
|
162 |
-
st.write("Stay updated with the latest news and trends in healthcare and therapy.")
|
163 |
|
164 |
-
|
165 |
-
|
166 |
-
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import pipeline
|
|
|
|
|
|
|
3 |
from gtts import gTTS
|
4 |
import tempfile
|
5 |
+
import os
|
6 |
+
from googletrans import Translator
|
7 |
|
8 |
+
# Initialize Hugging Face text-generation model pipeline (you can choose a different model like GPT-2, GPT-3, etc.)
|
9 |
+
generator = pipeline("text-generation", model="gpt2")
|
10 |
|
11 |
+
# Initialize Translator for language detection and translation
|
12 |
+
translator = Translator()
|
13 |
|
14 |
+
# Function for text generation
|
15 |
+
def generate_text(user_input):
|
16 |
try:
|
17 |
+
# Generate response using the GPT-2 model
|
18 |
+
response = generator(user_input, max_length=150, num_return_sequences=1)
|
19 |
return response[0]['generated_text']
|
20 |
except Exception as e:
|
21 |
+
return f"Error generating text: {e}"
|
22 |
|
23 |
+
# Function for text-to-speech with language support
|
24 |
+
def text_to_speech(text, lang):
|
25 |
try:
|
26 |
+
tts = gTTS(text=text, lang=lang)
|
27 |
+
# Create a temporary file to store the audio
|
28 |
temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
29 |
tts.save(temp_audio.name)
|
30 |
return temp_audio.name
|
31 |
except Exception as e:
|
32 |
return f"Error generating speech: {e}"
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
# Streamlit Page Configuration
|
35 |
+
st.set_page_config(page_title="GPT-2 Chatbot with Text-to-Speech (Multi-language)", layout="wide")
|
36 |
+
|
37 |
+
# Sidebar navigation
|
38 |
+
menu = st.sidebar.radio("Navigation", ["Chatbot", "About"])
|
39 |
+
|
40 |
+
# Language options
|
41 |
+
languages = {
|
42 |
+
"English": "en",
|
43 |
+
"Spanish": "es",
|
44 |
+
"French": "fr",
|
45 |
+
"German": "de",
|
46 |
+
"Italian": "it",
|
47 |
+
"Portuguese": "pt",
|
48 |
+
"Dutch": "nl",
|
49 |
+
"Russian": "ru",
|
50 |
+
"Chinese": "zh",
|
51 |
+
"Japanese": "ja",
|
52 |
+
"Hindi": "hi"
|
53 |
+
}
|
54 |
+
|
55 |
+
# Chatbot Interface
|
56 |
+
if menu == "Chatbot":
|
57 |
+
st.title("GPT-2 Chatbot with Multi-language Support")
|
58 |
+
st.write("Welcome to the GPT-2 powered chatbot with multiple language support. Ask any question, and I will generate a response.")
|
59 |
+
|
60 |
+
# Language selection
|
61 |
+
lang_choice = st.selectbox("Select Language for Chatbot", list(languages.keys()))
|
62 |
+
lang_code = languages[lang_choice]
|
63 |
+
|
64 |
+
# Text input for the user
|
65 |
+
user_input = st.text_input(f"Ask your question in {lang_choice}:", "")
|
66 |
+
|
67 |
+
if st.button("Generate Response"):
|
68 |
+
if user_input:
|
69 |
+
# Translate user input to English (or the language of the model)
|
70 |
+
translated_input = translator.translate(user_input, src=lang_code, dest="en").text
|
71 |
+
# Generate response from GPT-2 model
|
72 |
+
generated_text = generate_text(translated_input)
|
73 |
+
|
74 |
+
# Translate the response back to the selected language
|
75 |
+
translated_response = translator.translate(generated_text, src="en", dest=lang_code).text
|
76 |
+
st.write(f"### Response in {lang_choice}:")
|
77 |
+
st.write(translated_response)
|
78 |
+
|
79 |
+
# Convert response to speech in selected language
|
80 |
+
audio_path = text_to_speech(translated_response, lang_code)
|
81 |
st.audio(audio_path, format="audio/mp3")
|
82 |
+
else:
|
83 |
+
st.warning("Please enter a question.")
|
84 |
+
|
85 |
+
# About Section
|
86 |
+
elif menu == "About":
|
87 |
+
st.title("About")
|
88 |
+
st.write("""
|
89 |
+
This is a simple web application powered by GPT-2 (Hugging Face Transformers) and Google Text-to-Speech (gTTS).
|
90 |
+
|
91 |
+
**Features:**
|
92 |
+
- Ask a question in your preferred language, and GPT-2 will generate a response.
|
93 |
+
- The response will be translated back to your language and also converted to speech.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
+
**How it works:**
|
96 |
+
- GPT-2 model generates text based on your input in any language.
|
97 |
+
- The text is then translated to English, processed by GPT-2, and translated back to your preferred language.
|
98 |
+
- The response is converted to speech using the `gTTS` library in the selected language.
|
99 |
+
""")
|