Spaces:
Running
Running
| # modules/text_analysis/discourse_analysis.py | |
| # Configuración de matplotlib | |
| import streamlit as st | |
| import spacy | |
| import networkx as nx | |
| import matplotlib.pyplot as plt | |
| import pandas as pd | |
| import numpy as np | |
| import logging | |
| logger = logging.getLogger(__name__) | |
| from .semantic_analysis import ( | |
| create_concept_graph, | |
| visualize_concept_graph, | |
| identify_key_concepts | |
| ) | |
| from .stopwords import ( | |
| get_custom_stopwords, | |
| process_text, | |
| get_stopwords_for_spacy | |
| ) | |
| ##################### | |
| # Define colors for grammatical categories | |
| POS_COLORS = { | |
| 'ADJ': '#FFA07A', 'ADP': '#98FB98', 'ADV': '#87CEFA', 'AUX': '#DDA0DD', | |
| 'CCONJ': '#F0E68C', 'DET': '#FFB6C1', 'INTJ': '#FF6347', 'NOUN': '#90EE90', | |
| 'NUM': '#FAFAD2', 'PART': '#D3D3D3', 'PRON': '#FFA500', 'PROPN': '#20B2AA', | |
| 'SCONJ': '#DEB887', 'SYM': '#7B68EE', 'VERB': '#FF69B4', 'X': '#A9A9A9', | |
| } | |
| POS_TRANSLATIONS = { | |
| 'es': { | |
| 'ADJ': 'Adjetivo', 'ADP': 'Preposición', 'ADV': 'Adverbio', 'AUX': 'Auxiliar', | |
| 'CCONJ': 'Conjunción Coordinante', 'DET': 'Determinante', 'INTJ': 'Interjección', | |
| 'NOUN': 'Sustantivo', 'NUM': 'Número', 'PART': 'Partícula', 'PRON': 'Pronombre', | |
| 'PROPN': 'Nombre Propio', 'SCONJ': 'Conjunción Subordinante', 'SYM': 'Símbolo', | |
| 'VERB': 'Verbo', 'X': 'Otro', | |
| }, | |
| 'en': { | |
| 'ADJ': 'Adjective', 'ADP': 'Preposition', 'ADV': 'Adverb', 'AUX': 'Auxiliary', | |
| 'CCONJ': 'Coordinating Conjunction', 'DET': 'Determiner', 'INTJ': 'Interjection', | |
| 'NOUN': 'Noun', 'NUM': 'Number', 'PART': 'Particle', 'PRON': 'Pronoun', | |
| 'PROPN': 'Proper Noun', 'SCONJ': 'Subordinating Conjunction', 'SYM': 'Symbol', | |
| 'VERB': 'Verb', 'X': 'Other', | |
| }, | |
| 'fr': { | |
| 'ADJ': 'Adjectif', 'ADP': 'Préposition', 'ADV': 'Adverbe', 'AUX': 'Auxiliaire', | |
| 'CCONJ': 'Conjonction de Coordination', 'DET': 'Déterminant', 'INTJ': 'Interjection', | |
| 'NOUN': 'Nom', 'NUM': 'Nombre', 'PART': 'Particule', 'PRON': 'Pronom', | |
| 'PROPN': 'Nom Propre', 'SCONJ': 'Conjonction de Subordination', 'SYM': 'Symbole', | |
| 'VERB': 'Verbe', 'X': 'Autre', | |
| } | |
| } | |
| ENTITY_LABELS = { | |
| 'es': { | |
| "Personas": "lightblue", | |
| "Lugares": "lightcoral", | |
| "Inventos": "lightgreen", | |
| "Fechas": "lightyellow", | |
| "Conceptos": "lightpink" | |
| }, | |
| 'en': { | |
| "People": "lightblue", | |
| "Places": "lightcoral", | |
| "Inventions": "lightgreen", | |
| "Dates": "lightyellow", | |
| "Concepts": "lightpink" | |
| }, | |
| 'fr': { | |
| "Personnes": "lightblue", | |
| "Lieux": "lightcoral", | |
| "Inventions": "lightgreen", | |
| "Dates": "lightyellow", | |
| "Concepts": "lightpink" | |
| } | |
| } | |
| ################# | |
| def compare_semantic_analysis(text1, text2, nlp, lang): | |
| """ | |
| Realiza el análisis semántico comparativo entre dos textos | |
| """ | |
| try: | |
| logger.info(f"Iniciando análisis comparativo para idioma: {lang}") | |
| # Obtener stopwords | |
| stopwords = get_custom_stopwords(lang) | |
| logger.info(f"Obtenidas {len(stopwords)} stopwords para el idioma {lang}") | |
| # Procesar los textos | |
| doc1 = nlp(text1) | |
| doc2 = nlp(text2) | |
| # Identificar conceptos clave | |
| logger.info("Identificando conceptos clave del primer texto...") | |
| key_concepts1 = identify_key_concepts(doc1, stopwords=stopwords, min_freq=2, min_length=3) | |
| logger.info("Identificando conceptos clave del segundo texto...") | |
| key_concepts2 = identify_key_concepts(doc2, stopwords=stopwords, min_freq=2, min_length=3) | |
| if not key_concepts1 or not key_concepts2: | |
| raise ValueError("No se pudieron identificar conceptos clave en uno o ambos textos") | |
| # Crear grafos | |
| logger.info("Creando grafos de conceptos...") | |
| G1 = create_concept_graph(doc1, key_concepts1) | |
| G2 = create_concept_graph(doc2, key_concepts2) | |
| # Visualizar grafos | |
| logger.info("Visualizando grafos...") | |
| # Primer grafo | |
| plt.figure(figsize=(12, 8)) | |
| fig1 = visualize_concept_graph(G1, lang) | |
| plt.title("Análisis del primer texto", pad=20) | |
| plt.tight_layout() | |
| # Segundo grafo | |
| plt.figure(figsize=(12, 8)) | |
| fig2 = visualize_concept_graph(G2, lang) | |
| plt.title("Análisis del segundo texto", pad=20) | |
| plt.tight_layout() | |
| logger.info("Análisis comparativo completado exitosamente") | |
| return fig1, fig2, key_concepts1, key_concepts2 | |
| except Exception as e: | |
| logger.error(f"Error en compare_semantic_analysis: {str(e)}") | |
| plt.close('all') # Limpiar recursos en caso de error | |
| raise | |
| finally: | |
| plt.close('all') # Asegurar limpieza en todos los casos | |
| ############################################ | |
| def create_concept_table(key_concepts): | |
| """ | |
| Crea una tabla de conceptos clave con sus frecuencias | |
| Args: | |
| key_concepts: Lista de tuplas (concepto, frecuencia) | |
| Returns: | |
| pandas.DataFrame: Tabla formateada de conceptos | |
| """ | |
| try: | |
| if not key_concepts: | |
| logger.warning("Lista de conceptos vacía") | |
| return pd.DataFrame(columns=['Concepto', 'Frecuencia']) | |
| df = pd.DataFrame(key_concepts, columns=['Concepto', 'Frecuencia']) | |
| df['Frecuencia'] = df['Frecuencia'].round(2) | |
| return df | |
| except Exception as e: | |
| logger.error(f"Error en create_concept_table: {str(e)}") | |
| return pd.DataFrame(columns=['Concepto', 'Frecuencia']) | |
| ########################################################## | |
| def perform_discourse_analysis(text1, text2, nlp, lang): | |
| """ | |
| Realiza el análisis completo del discurso | |
| """ | |
| try: | |
| logger.info("Iniciando análisis del discurso...") | |
| # Verificar inputs | |
| if not text1 or not text2: | |
| raise ValueError("Los textos de entrada no pueden estar vacíos") | |
| if not nlp: | |
| raise ValueError("Modelo de lenguaje no inicializado") | |
| # Realizar análisis comparativo | |
| try: | |
| fig1, fig2, key_concepts1, key_concepts2 = compare_semantic_analysis( | |
| text1, text2, nlp, lang | |
| ) | |
| except Exception as e: | |
| logger.error(f"Error en el análisis comparativo: {str(e)}") | |
| raise | |
| # Crear tablas de resultados | |
| try: | |
| table1 = create_concept_table(key_concepts1) | |
| table2 = create_concept_table(key_concepts2) | |
| except Exception as e: | |
| logger.error(f"Error creando tablas de conceptos: {str(e)}") | |
| raise | |
| result = { | |
| 'graph1': fig1, | |
| 'graph2': fig2, | |
| 'key_concepts1': key_concepts1, | |
| 'key_concepts2': key_concepts2, | |
| 'table1': table1, | |
| 'table2': table2, | |
| 'success': True | |
| } | |
| logger.info("Análisis del discurso completado exitosamente") | |
| return result | |
| except Exception as e: | |
| logger.error(f"Error en perform_discourse_analysis: {str(e)}") | |
| return { | |
| 'success': False, | |
| 'error': str(e) | |
| } | |
| finally: | |
| plt.close('all') # Asegurar limpieza en todos los casos | |
| ################################################################# | |
| def create_concept_table(key_concepts): | |
| """ | |
| Crea una tabla de conceptos clave con sus frecuencias | |
| Args: | |
| key_concepts: Lista de tuplas (concepto, frecuencia) | |
| Returns: | |
| pandas.DataFrame: Tabla formateada de conceptos | |
| """ | |
| try: | |
| df = pd.DataFrame(key_concepts, columns=['Concepto', 'Frecuencia']) | |
| df['Frecuencia'] = df['Frecuencia'].round(2) | |
| return df | |
| except Exception as e: | |
| logger.error(f"Error en create_concept_table: {str(e)}") | |
| raise | |
| ################# | |
| def perform_discourse_analysis(text1, text2, nlp, lang): | |
| """ | |
| Realiza el análisis completo del discurso | |
| Args: | |
| text1: Primer texto a analizar | |
| text2: Segundo texto a analizar | |
| nlp: Modelo de spaCy cargado | |
| lang: Código de idioma | |
| Returns: | |
| dict: Resultados del análisis | |
| """ | |
| try: | |
| # Realizar análisis comparativo | |
| fig1, fig2, key_concepts1, key_concepts2 = compare_semantic_analysis( | |
| text1, text2, nlp, lang | |
| ) | |
| # Crear tablas de resultados | |
| table1 = create_concept_table(key_concepts1) | |
| table2 = create_concept_table(key_concepts2) | |
| return { | |
| 'graph1': fig1, | |
| 'graph2': fig2, | |
| 'key_concepts1': key_concepts1, | |
| 'key_concepts2': key_concepts2, | |
| 'table1': table1, | |
| 'table2': table2, | |
| 'success': True | |
| } | |
| except Exception as e: | |
| logger.error(f"Error en perform_discourse_analysis: {str(e)}") | |
| return { | |
| 'success': False, | |
| 'error': str(e) | |
| } |