Spaces:
Running
Running
Added plot explanation - App running locally (w/ gemma3:12b from LMStudio) (#4)
#4
by
angelicaporto
- opened
app.py
CHANGED
@@ -1,173 +1,168 @@
|
|
1 |
-
# ---------------------------------------------------------------------------------
|
2 |
-
# Aplicación principal para cargar el modelo, generar prompts y explicar los datos
|
3 |
-
# ---------------------------------------------------------------------------------
|
4 |
-
|
5 |
-
import streamlit as st # type: ignore
|
6 |
-
import os
|
7 |
-
import re
|
8 |
-
import pandas as pd # type: ignore
|
9 |
-
from dotenv import load_dotenv # type: ignore # Para cambios locales
|
10 |
-
from supabase import create_client, Client # type: ignore
|
11 |
-
|
12 |
-
|
13 |
-
from pandasai import SmartDataframe # type: ignore
|
14 |
-
from pandasai.llm.local_llm import LocalLLM
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
# ---------------------------------------------------------------------------------
|
19 |
-
|
20 |
-
#
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
data
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
""
|
98 |
-
|
99 |
-
|
100 |
-
#
|
101 |
-
#
|
102 |
-
|
103 |
-
|
104 |
-
#
|
105 |
-
|
106 |
-
|
107 |
-
#
|
108 |
-
|
109 |
-
#
|
110 |
-
#
|
111 |
-
#
|
112 |
-
#
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
#
|
118 |
-
#
|
119 |
-
|
120 |
-
|
121 |
-
#
|
122 |
-
#
|
123 |
-
#
|
124 |
-
|
125 |
-
""
|
126 |
-
|
127 |
-
#
|
128 |
-
|
129 |
-
# ---------------------------------------------------------------------------------
|
130 |
-
|
131 |
-
#
|
132 |
-
|
133 |
-
#
|
134 |
-
|
135 |
-
|
136 |
-
#
|
137 |
-
|
138 |
-
|
139 |
-
#
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
#
|
151 |
-
#
|
152 |
-
#
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
except Exception as e:
|
170 |
-
st.error(f"Error generating answer: {e}")
|
171 |
-
|
172 |
-
|
173 |
-
# TODO: Output estructurado si vemos que es necesario.
|
|
|
1 |
+
# ---------------------------------------------------------------------------------
|
2 |
+
# Aplicación principal para cargar el modelo, generar prompts y explicar los datos
|
3 |
+
# ---------------------------------------------------------------------------------
|
4 |
+
|
5 |
+
import streamlit as st # type: ignore
|
6 |
+
import os
|
7 |
+
import re
|
8 |
+
import pandas as pd # type: ignore
|
9 |
+
from dotenv import load_dotenv # type: ignore # Para cambios locales
|
10 |
+
from supabase import create_client, Client # type: ignore
|
11 |
+
from pandasai import Agent
|
12 |
+
|
13 |
+
# from pandasai import SmartDataframe # type: ignore
|
14 |
+
from pandasai.llm.local_llm import LocalLLM
|
15 |
+
from pandasai import Agent
|
16 |
+
import matplotlib.pyplot as plt
|
17 |
+
|
18 |
+
# ---------------------------------------------------------------------------------
|
19 |
+
# Funciones auxiliares
|
20 |
+
# ---------------------------------------------------------------------------------
|
21 |
+
|
22 |
+
|
23 |
+
# Ejemplo de prompt generado:
|
24 |
+
# generate_graph_prompt("Germany", "France", "fertility rate", 2020, 2030)
|
25 |
+
def generate_graph_prompt(user_query):
|
26 |
+
prompt = f"""
|
27 |
+
You are a highly skilled data scientist working with European demographic data.
|
28 |
+
|
29 |
+
Given the user's request: "{user_query}"
|
30 |
+
|
31 |
+
1. Plot the relevant data according to the user's request.
|
32 |
+
2. After generating the plot, write a clear, human-readable explanation of the plot (no code).
|
33 |
+
3. Save the explanation in a variable called "explanation".
|
34 |
+
|
35 |
+
VERY IMPORTANT:
|
36 |
+
- Declare a result variable as a dictionary that includes:
|
37 |
+
- type = "plot"
|
38 |
+
- value = the path to the saved plot
|
39 |
+
- explanation = the explanation text you wrote
|
40 |
+
|
41 |
+
Example of expected result dictionary:
|
42 |
+
result = {{
|
43 |
+
"type": "plot",
|
44 |
+
"value": "temp_chart.png",
|
45 |
+
"explanation": explanation
|
46 |
+
}}
|
47 |
+
|
48 |
+
Only respond with valid Python code.
|
49 |
+
|
50 |
+
IMPORTANT: Stick strictly to using the data available in the database.
|
51 |
+
"""
|
52 |
+
return prompt
|
53 |
+
|
54 |
+
# TODO: Mejorar prompt
|
55 |
+
|
56 |
+
# ---------------------------------------------------------------------------------
|
57 |
+
# Configuración de conexión a Supabase
|
58 |
+
# ---------------------------------------------------------------------------------
|
59 |
+
|
60 |
+
# Cargar variables de entorno desde archivo .env
|
61 |
+
load_dotenv()
|
62 |
+
|
63 |
+
# Conectar las credenciales de Supabase (ubicadas en "Secrets" en Streamlit)
|
64 |
+
SUPABASE_URL = os.getenv("SUPABASE_URL")
|
65 |
+
SUPABASE_KEY = os.getenv("SUPABASE_KEY")
|
66 |
+
|
67 |
+
# Crear cliente Supabase
|
68 |
+
supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
|
69 |
+
|
70 |
+
# Función para cargar datos de una tabla de Supabase
|
71 |
+
# Tablas posibles: fertility, geo data, labor, population, predictions
|
72 |
+
def load_data(table):
|
73 |
+
try:
|
74 |
+
if supabase:
|
75 |
+
response = supabase.from_(table).select("*").execute()
|
76 |
+
print(f"Response object: {response}") # Inspeccionar objeto completo
|
77 |
+
print(f"Response type: {type(response)}") # Verificar tipo de objeto
|
78 |
+
|
79 |
+
# Acceder a atributos relacionados a error o data
|
80 |
+
if hasattr(response, 'data'):
|
81 |
+
print(f"Response data: {response.data}")
|
82 |
+
return pd.DataFrame(response.data)
|
83 |
+
elif hasattr(response, 'status_code'):
|
84 |
+
print(f"Response status code: {response.status_code}")
|
85 |
+
elif hasattr(response, '_error'): # Versiones antiguas
|
86 |
+
print(f"Older error attribute: {response._error}")
|
87 |
+
st.error(f"Error fetching data: {response._error}")
|
88 |
+
return pd.DataFrame()
|
89 |
+
else:
|
90 |
+
st.info("Response object does not have 'data' or known error attributes. Check the logs.")
|
91 |
+
return pd.DataFrame()
|
92 |
+
|
93 |
+
else:
|
94 |
+
st.error("Supabase client not initialized. Check environment variables.")
|
95 |
+
return pd.DataFrame()
|
96 |
+
except Exception as e:
|
97 |
+
st.error(f"An error occurred during data loading: {e}")
|
98 |
+
return pd.DataFrame()
|
99 |
+
|
100 |
+
# ---------------------------------------------------------------------------------
|
101 |
+
# Cargar datos iniciales
|
102 |
+
# ---------------------------------------------------------------------------------
|
103 |
+
|
104 |
+
# # Cargar datos desde la tabla "labor"
|
105 |
+
data = load_data("labor")
|
106 |
+
|
107 |
+
# TODO: La idea es luego usar todas las tablas, cuando ya funcione.
|
108 |
+
# Se puede si el modelo funciona con las gráficas, sino que toca mejorarlo
|
109 |
+
# porque serían consultas más complejas.
|
110 |
+
# labor_data = load_data("labor")
|
111 |
+
# fertility_data = load_data("fertility")
|
112 |
+
# population_data = load_data("population")
|
113 |
+
# predictions_data = load_data("predictions")
|
114 |
+
|
115 |
+
|
116 |
+
# ---------------------------------------------------------------------------------
|
117 |
+
# Inicializar modelo
|
118 |
+
# ---------------------------------------------------------------------------------
|
119 |
+
|
120 |
+
# ollama_llm = LocalLLM(api_base="http://localhost:11434/v1",
|
121 |
+
# model="gemma3:12b",
|
122 |
+
# temperature=0.1,
|
123 |
+
# max_tokens=8000)
|
124 |
+
|
125 |
+
lm_studio_llm = LocalLLM(api_base="http://localhost:1234/v1") # el modelo es gemma-3-12b-it-qat
|
126 |
+
|
127 |
+
agent = Agent([labor_data], config={"llm": lm_studio_llm}) # Inicializar agent
|
128 |
+
|
129 |
+
# ---------------------------------------------------------------------------------
|
130 |
+
# Configuración de la app en Streamlit
|
131 |
+
# ---------------------------------------------------------------------------------
|
132 |
+
|
133 |
+
# Título de la app
|
134 |
+
st.title("_Europe GraphGen_ :blue[Graph generator] :flag-eu:")
|
135 |
+
|
136 |
+
# TODO: Poner instrucciones al usuario sobre cómo hacer un muy buen prompt (sin tecnisismos, pensando en el usuario final)
|
137 |
+
|
138 |
+
|
139 |
+
# Entrada de usuario para describir el gráfico
|
140 |
+
user_input = st.text_input("What graphics do you have in mind")
|
141 |
+
generate_button = st.button("Generate")
|
142 |
+
|
143 |
+
# Procesar el input del usuario con PandasAI
|
144 |
+
if generate_button and user_input:
|
145 |
+
with st.spinner('Generating answer...'):
|
146 |
+
try:
|
147 |
+
prompt = generate_graph_prompt(user_input)
|
148 |
+
answer = agent.chat(prompt)
|
149 |
+
explanation = agent.explain()
|
150 |
+
print(f"\nAnswer type: {type(answer)}\n") # Verificar tipo de objeto
|
151 |
+
print(f"\nAnswer content: {answer}\n") # Inspeccionar contenido de la respuesta
|
152 |
+
print(f"\n explanation type: {type(explanation)}\n") # Verificar tipo de objeto
|
153 |
+
print(f"\n explanation content: {explanation}\n")
|
154 |
+
|
155 |
+
if isinstance(answer, str) and os.path.isfile(answer):
|
156 |
+
# Si el output es una ruta válida a imagen
|
157 |
+
im = plt.imread(answer)
|
158 |
+
st.image(im)
|
159 |
+
os.remove(answer) # Limpiar archivo temporal
|
160 |
+
st.markdown(str(explanation))
|
161 |
+
else:
|
162 |
+
# Si no es una ruta válida, mostrar como texto
|
163 |
+
st.markdown(str(answer))
|
164 |
+
|
165 |
+
except Exception as e:
|
166 |
+
st.error(f"Error generating answer: {e}")
|
167 |
+
|
168 |
+
# TODO: Output estructurado si vemos que es necesario.
|
|
|
|
|
|
|
|
|
|