import streamlit as st import datetime import pickle import numpy as np import rdflib import torch import os import requests from rdflib import Graph as RDFGraph, Namespace from sentence_transformers import SentenceTransformer from dotenv import load_dotenv # === CONFIGURATION === load_dotenv() ENDPOINT_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3" HF_API_TOKEN = os.getenv("HF_API_TOKEN") EMBEDDING_MODEL = "intfloat/multilingual-e5-base" DEVICE = "cuda" if torch.cuda.is_available() else "cpu" EX = Namespace("http://example.org/lang/") # === STREAMLIT UI CONFIG === st.set_page_config( page_title="Language Atlas: South American Indigenous Languages", page_icon="๐ŸŒ", layout="wide", initial_sidebar_state="expanded", menu_items={ 'About': "## AI-powered analysis of endangered indigenous languages\n" "Developed by Departamento Acadรฉmico de Humanidades" } ) # === CUSTOM CSS === st.markdown(""" """, unsafe_allow_html=True) # === CORE FUNCTIONS === @st.cache_resource(show_spinner="Loading AI models and knowledge graphs...") def load_all_components(): embedder = SentenceTransformer(EMBEDDING_MODEL, device=DEVICE) methods = {} for label, suffix, ttl, matrix_path in [ ("InfoMatch", "_hybrid", "grafo_ttl_hibrido.ttl", "embed_matrix_hybrid.npy"), ("LinkGraph", "_hybrid_graphsage", "grafo_ttl_hibrido_graphsage.ttl", "embed_matrix_hybrid_graphsage.npy") ]: with open(f"id_map{suffix}.pkl", "rb") as f: id_map = pickle.load(f) with open(f"grafo_embed{suffix}.pickle", "rb") as f: G = pickle.load(f) matrix = np.load(matrix_path) rdf = RDFGraph() rdf.parse(ttl, format="ttl") methods[label] = (matrix, id_map, G, rdf) return methods, embedder def get_top_k(matrix, id_map, query, k, embedder): vec = embedder.encode(f"query: {query}", convert_to_tensor=True, device=DEVICE) vec = vec.cpu().numpy().astype("float32") sims = np.dot(matrix, vec) / (np.linalg.norm(matrix, axis=1) * np.linalg.norm(vec) + 1e-10) top_k_idx = np.argsort(sims)[-k:][::-1] return [id_map[i] for i in top_k_idx] def get_context(G, lang_id): node = G.nodes.get(lang_id, {}) lines = [f"**Language:** {node.get('label', lang_id)}"] if node.get("wikipedia_summary"): lines.append(f"**Wikipedia:** {node['wikipedia_summary']}") if node.get("wikidata_description"): lines.append(f"**Wikidata:** {node['wikidata_description']}") if node.get("wikidata_countries"): lines.append(f"**Countries:** {node['wikidata_countries']}") return "\n\n".join(lines) def query_rdf(rdf, lang_id): q = f""" PREFIX ex: SELECT ?property ?value WHERE {{ ex:{lang_id} ?property ?value }} """ try: return [(str(row[0]).split("/")[-1], str(row[1])) for row in rdf.query(q)] except Exception as e: return [("error", str(e))] def generate_response(matrix, id_map, G, rdf, user_question, k, embedder): ids = get_top_k(matrix, id_map, user_question, k, embedder) context = [get_context(G, i) for i in ids] rdf_facts = [] for i in ids: rdf_facts.extend([f"{p}: {v}" for p, v in query_rdf(rdf, i)]) prompt = f"""[INST] You are an expert in South American indigenous languages. Use strictly and only the information below to answer the user question in **English**. - Do not infer or assume facts that are not explicitly stated. - If the answer is unknown or insufficient, say \"I cannot answer with the available data.\" - Limit your answer to 100 words. ### CONTEXT: {chr(10).join(context)} ### RDF RELATIONS: {chr(10).join(rdf_facts)} ### QUESTION: {user_question} Answer: [/INST]""" try: res = requests.post( ENDPOINT_URL, headers={"Authorization": f"Bearer {HF_API_TOKEN}", "Content-Type": "application/json"}, json={"inputs": prompt}, timeout=60 ) out = res.json() if isinstance(out, list) and "generated_text" in out[0]: return out[0]["generated_text"].replace(prompt.strip(), "").strip(), ids, context, rdf_facts return str(out), ids, context, rdf_facts except Exception as e: return str(e), ids, context, rdf_facts # === MAIN APP === def main(): methods, embedder = load_all_components() st.markdown("""

๐ŸŒ Language Atlas: South American Indigenous Languages

""", unsafe_allow_html=True) with st.expander("๐Ÿ“Œ **Overview**", expanded=True): st.markdown(""" This app provides **AI-powered analysis** of endangered indigenous languages in South America, integrating knowledge graphs from **Glottolog, Wikipedia, and Wikidata**. \n\n*This is version 1 and currently English-only. Spanish version coming soon!* """) with st.sidebar: st.markdown("### ๐Ÿ“š Pontificia Universidad Catรณlica del Perรบ") st.markdown(""" - Departamento de Humanidades - jveraz@pucp.edu.pe - Suggestions? Contact us """, unsafe_allow_html=True) st.markdown("---") st.markdown("### ๐Ÿš€ Quick Start") st.markdown(""" 1. **Type a question** in the input box 2. **Click 'Analyze'** to compare methods 3. **Explore results** with expandable details """) st.markdown("---") st.markdown("### ๐Ÿ” Example Queries") questions = [ "What languages are endangered in Brazil?", "What languages are spoken in Perรบ?", "Which languages are related to Quechua?", "Where is Mapudungun spoken?" ] for q in questions: if st.markdown(f"
{q}
", unsafe_allow_html=True): st.session_state.query = q st.markdown("---") st.markdown("### โš™๏ธ Technical Details") st.markdown(""" - Embeddings Node2Vec vs. GraphSAGE - Language Model Mistral-7B-Instruct - Knowledge Graph RDF-based integration """, unsafe_allow_html=True) st.markdown("---") st.markdown("### ๐Ÿ“‚ Data Sources") st.markdown(""" - **Glottolog** (Language classification) - **Wikipedia** (Textual summaries) - **Wikidata** (Structured facts) """) st.markdown("---") st.markdown("### ๐Ÿ“Š Analysis Parameters") k = st.slider("Number of languages to analyze", 1, 10, 3) st.markdown("---") st.markdown("### ๐Ÿ”ง Advanced Options") show_ctx = st.checkbox("Show context information", False) show_rdf = st.checkbox("Show structured facts", False) st.markdown("### ๐Ÿ“ Ask About Indigenous Languages") query = st.text_input( "Enter your question:", value=st.session_state.get("query", ""), label_visibility="collapsed", placeholder="e.g. What languages are spoken in Peru?" ) if st.button("Analyze", type="primary", use_container_width=True): if not query: st.warning("Please enter a question") return col1, col2 = st.columns(2) for col, (label, method) in zip([col1, col2], methods.items()): with col: st.markdown(f"#### {label} Method") st.caption({ "InfoMatch": "Node2Vec embeddings combining text and graph structure", "LinkGraph": "GraphSAGE embeddings capturing network patterns" }[label]) start = datetime.datetime.now() response, lang_ids, context, rdf_data = generate_response(*method, query, k, embedder) duration = (datetime.datetime.now() - start).total_seconds() st.markdown(f"""
{response}
โฑ๏ธ {duration:.2f}s ๐ŸŒ {len(lang_ids)} languages
""", unsafe_allow_html=True) if show_ctx: with st.expander(f"๐Ÿ“– Context from {len(lang_ids)} languages"): for lang_id, ctx in zip(lang_ids, context): st.markdown(f"
{ctx}
", unsafe_allow_html=True) if show_rdf: with st.expander("๐Ÿ”— Structured facts (RDF)"): st.code("\n".join(rdf_data)) st.markdown("---") st.markdown("""
๐Ÿ“Œ Note: This tool is designed for researchers, linguists, and cultural preservationists. For best results, use specific questions about languages, families, or regions.
""", unsafe_allow_html=True) if __name__ == "__main__": main()