Update modules/semantic/semantic_process.py
Browse files
modules/semantic/semantic_process.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
#modules/semantic/semantic_process.py
|
|
|
2 |
import streamlit as st
|
3 |
from ..text_analysis.semantic_analysis import (
|
4 |
perform_semantic_analysis,
|
@@ -16,7 +17,6 @@ from ..text_analysis.semantic_analysis import (
|
|
16 |
analyze_sentiment,
|
17 |
extract_topics
|
18 |
)
|
19 |
-
|
20 |
from ..database.semantic_mongo_db import store_student_semantic_result
|
21 |
|
22 |
import logging
|
@@ -25,29 +25,28 @@ logger = logging.getLogger(__name__)
|
|
25 |
def process_semantic_input(text, lang_code, nlp_models, t):
|
26 |
"""
|
27 |
Procesa el texto ingresado para realizar el análisis semántico.
|
28 |
-
|
29 |
-
Args:
|
30 |
-
text: Texto a analizar
|
31 |
-
lang_code: Código del idioma
|
32 |
-
nlp_models: Diccionario de modelos spaCy
|
33 |
-
t: Diccionario de traducciones
|
34 |
-
|
35 |
-
Returns:
|
36 |
-
dict: Resultados del análisis
|
37 |
"""
|
38 |
try:
|
39 |
-
|
40 |
-
doc = nlp_models[lang_code](text)
|
41 |
|
42 |
-
#
|
|
|
43 |
analysis = perform_semantic_analysis(text, nlp_models[lang_code], lang_code)
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
return {
|
53 |
'analysis': analysis,
|
@@ -56,53 +55,52 @@ def process_semantic_input(text, lang_code, nlp_models, t):
|
|
56 |
}
|
57 |
|
58 |
except Exception as e:
|
59 |
-
logger.error(f"Error en
|
60 |
return {
|
61 |
'analysis': None,
|
62 |
'success': False,
|
63 |
-
'message':
|
64 |
}
|
65 |
|
66 |
def format_semantic_results(analysis_result, t):
|
67 |
"""
|
68 |
Formatea los resultados del análisis para su visualización.
|
69 |
-
|
70 |
-
Args:
|
71 |
-
analysis_result: Resultado del análisis semántico
|
72 |
-
t: Diccionario de traducciones
|
73 |
-
|
74 |
-
Returns:
|
75 |
-
dict: Resultados formateados para visualización
|
76 |
"""
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
return {
|
79 |
-
'formatted_text':
|
80 |
-
'visualizations':
|
|
|
|
|
|
|
81 |
}
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
concepts_section = [f"### {t.get('key_concepts', 'Key Concepts')}"]
|
89 |
-
concepts_section.extend([
|
90 |
-
f"- {concept}: {frequency:.2f}"
|
91 |
-
for concept, frequency in analysis_result['analysis']['key_concepts']
|
92 |
-
])
|
93 |
-
formatted_sections.append('\n'.join(concepts_section))
|
94 |
-
|
95 |
-
return {
|
96 |
-
'formatted_text': '\n\n'.join(formatted_sections),
|
97 |
-
'visualizations': {
|
98 |
-
'concept_graph': analysis_result['analysis'].get('concept_graph'),
|
99 |
-
'entity_graph': analysis_result['analysis'].get('entity_graph')
|
100 |
}
|
101 |
-
}
|
102 |
|
103 |
-
# Re-exportar funciones necesarias
|
104 |
__all__ = [
|
105 |
'process_semantic_input',
|
106 |
'format_semantic_results'
|
107 |
-
]
|
108 |
-
|
|
|
1 |
#modules/semantic/semantic_process.py
|
2 |
+
#modules/semantic/semantic_process.py
|
3 |
import streamlit as st
|
4 |
from ..text_analysis.semantic_analysis import (
|
5 |
perform_semantic_analysis,
|
|
|
17 |
analyze_sentiment,
|
18 |
extract_topics
|
19 |
)
|
|
|
20 |
from ..database.semantic_mongo_db import store_student_semantic_result
|
21 |
|
22 |
import logging
|
|
|
25 |
def process_semantic_input(text, lang_code, nlp_models, t):
|
26 |
"""
|
27 |
Procesa el texto ingresado para realizar el análisis semántico.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
"""
|
29 |
try:
|
30 |
+
logger.info(f"Iniciando análisis semántico para texto de {len(text)} caracteres")
|
|
|
31 |
|
32 |
+
# Realizar el análisis
|
33 |
+
doc = nlp_models[lang_code](text)
|
34 |
analysis = perform_semantic_analysis(text, nlp_models[lang_code], lang_code)
|
35 |
|
36 |
+
logger.info("Análisis semántico completado. Guardando resultados...")
|
37 |
+
|
38 |
+
# Intentar guardar en la base de datos
|
39 |
+
try:
|
40 |
+
store_result = store_student_semantic_result(
|
41 |
+
st.session_state.username,
|
42 |
+
text,
|
43 |
+
analysis
|
44 |
+
)
|
45 |
+
if not store_result:
|
46 |
+
logger.warning("No se pudo guardar el análisis en la base de datos")
|
47 |
+
except Exception as db_error:
|
48 |
+
logger.error(f"Error al guardar en base de datos: {str(db_error)}")
|
49 |
+
# Continuamos aunque falle el guardado
|
50 |
|
51 |
return {
|
52 |
'analysis': analysis,
|
|
|
55 |
}
|
56 |
|
57 |
except Exception as e:
|
58 |
+
logger.error(f"Error en process_semantic_input: {str(e)}")
|
59 |
return {
|
60 |
'analysis': None,
|
61 |
'success': False,
|
62 |
+
'message': str(e)
|
63 |
}
|
64 |
|
65 |
def format_semantic_results(analysis_result, t):
|
66 |
"""
|
67 |
Formatea los resultados del análisis para su visualización.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
"""
|
69 |
+
try:
|
70 |
+
if not analysis_result['success']:
|
71 |
+
return {
|
72 |
+
'formatted_text': analysis_result['message'],
|
73 |
+
'visualizations': None
|
74 |
+
}
|
75 |
+
|
76 |
+
formatted_sections = []
|
77 |
+
analysis = analysis_result['analysis']
|
78 |
+
|
79 |
+
# Formatear conceptos clave
|
80 |
+
if 'key_concepts' in analysis:
|
81 |
+
concepts_section = [f"### {t.get('key_concepts', 'Key Concepts')}"]
|
82 |
+
concepts_section.extend([
|
83 |
+
f"- {concept}: {frequency:.2f}"
|
84 |
+
for concept, frequency in analysis['key_concepts']
|
85 |
+
])
|
86 |
+
formatted_sections.append('\n'.join(concepts_section))
|
87 |
+
|
88 |
return {
|
89 |
+
'formatted_text': '\n\n'.join(formatted_sections),
|
90 |
+
'visualizations': {
|
91 |
+
'concept_graph': analysis.get('concept_graph'),
|
92 |
+
'entity_graph': analysis.get('entity_graph')
|
93 |
+
}
|
94 |
}
|
95 |
+
|
96 |
+
except Exception as e:
|
97 |
+
logger.error(f"Error en format_semantic_results: {str(e)}")
|
98 |
+
return {
|
99 |
+
'formatted_text': str(e),
|
100 |
+
'visualizations': None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
}
|
|
|
102 |
|
|
|
103 |
__all__ = [
|
104 |
'process_semantic_input',
|
105 |
'format_semantic_results'
|
106 |
+
]
|
|