Spaces:
Running
Running
Update modules/discourse/discourse_live_interface.py
Browse files
modules/discourse/discourse_live_interface.py
CHANGED
|
@@ -32,10 +32,9 @@ def fig_to_bytes(fig):
|
|
| 32 |
return None
|
| 33 |
|
| 34 |
#################################################################################################
|
| 35 |
-
|
| 36 |
def display_discourse_live_interface(lang_code, nlp_models, discourse_t):
|
| 37 |
"""
|
| 38 |
-
Interfaz para el análisis del discurso en vivo
|
| 39 |
"""
|
| 40 |
try:
|
| 41 |
if 'discourse_live_state' not in st.session_state:
|
|
@@ -47,36 +46,39 @@ def display_discourse_live_interface(lang_code, nlp_models, discourse_t):
|
|
| 47 |
'text_changed': False
|
| 48 |
}
|
| 49 |
|
| 50 |
-
#
|
| 51 |
-
|
| 52 |
|
| 53 |
-
#
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
st.markdown("**Texto 1 (Patrón)**")
|
| 59 |
text_input1 = st.text_area(
|
| 60 |
"Texto 1",
|
| 61 |
-
height=
|
| 62 |
key="discourse_live_text1",
|
| 63 |
value=st.session_state.discourse_live_state.get('current_text1', ''),
|
| 64 |
label_visibility="collapsed"
|
| 65 |
)
|
| 66 |
st.session_state.discourse_live_state['current_text1'] = text_input1
|
| 67 |
|
| 68 |
-
|
|
|
|
| 69 |
st.markdown("**Texto 2 (Comparación)**")
|
| 70 |
text_input2 = st.text_area(
|
| 71 |
"Texto 2",
|
| 72 |
-
height=
|
| 73 |
key="discourse_live_text2",
|
| 74 |
value=st.session_state.discourse_live_state.get('current_text2', ''),
|
| 75 |
label_visibility="collapsed"
|
| 76 |
)
|
| 77 |
st.session_state.discourse_live_state['current_text2'] = text_input2
|
| 78 |
|
| 79 |
-
|
|
|
|
|
|
|
| 80 |
analyze_button = st.button(
|
| 81 |
discourse_t.get('analyze_button', 'Analizar'),
|
| 82 |
key="discourse_live_analyze",
|
|
@@ -86,59 +88,64 @@ def display_discourse_live_interface(lang_code, nlp_models, discourse_t):
|
|
| 86 |
use_container_width=True
|
| 87 |
)
|
| 88 |
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
text_input1,
|
| 94 |
text_input2,
|
| 95 |
-
|
| 96 |
-
lang_code
|
| 97 |
)
|
| 98 |
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
logger.error(f"Error en análisis: {str(e)}")
|
| 123 |
-
st.error(discourse_t.get('error_processing', f'Error al procesar el texto: {str(e)}'))
|
| 124 |
-
|
| 125 |
-
# Columna derecha: Visualización de resultados
|
| 126 |
-
with result_col:
|
| 127 |
-
st.subheader(discourse_t.get('live_results', 'Resultados en vivo'))
|
| 128 |
-
|
| 129 |
-
if 'last_result' in st.session_state.discourse_live_state and \
|
| 130 |
-
st.session_state.discourse_live_state['last_result'] is not None:
|
| 131 |
-
|
| 132 |
-
result = st.session_state.discourse_live_state['last_result']
|
| 133 |
-
if all(key in result for key in ['graph1', 'graph2']):
|
| 134 |
-
display_discourse_results(result, lang_code, discourse_t)
|
| 135 |
-
else:
|
| 136 |
-
logger.error(f"Faltan gráficos en el resultado: {list(result.keys())}")
|
| 137 |
-
st.error(discourse_t.get('missing_graphs', 'Error: No se pudieron generar todos los gráficos'))
|
| 138 |
-
else:
|
| 139 |
-
st.info(discourse_t.get('initial_message',
|
| 140 |
-
'Ingrese los textos y presione Analizar para ver los resultados.'))
|
| 141 |
|
| 142 |
except Exception as e:
|
| 143 |
logger.error(f"Error general en interfaz del discurso en vivo: {str(e)}")
|
| 144 |
-
st.error(discourse_t.get('general_error', "Se produjo un error. Por favor, intente de nuevo."))
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
return None
|
| 33 |
|
| 34 |
#################################################################################################
|
|
|
|
| 35 |
def display_discourse_live_interface(lang_code, nlp_models, discourse_t):
|
| 36 |
"""
|
| 37 |
+
Interfaz para el análisis del discurso en vivo con layout mejorado
|
| 38 |
"""
|
| 39 |
try:
|
| 40 |
if 'discourse_live_state' not in st.session_state:
|
|
|
|
| 46 |
'text_changed': False
|
| 47 |
}
|
| 48 |
|
| 49 |
+
# Título
|
| 50 |
+
st.subheader(discourse_t.get('enter_text', 'Ingrese sus textos'))
|
| 51 |
|
| 52 |
+
# Área de entrada de textos en dos columnas
|
| 53 |
+
text_col1, text_col2 = st.columns(2)
|
| 54 |
+
|
| 55 |
+
# Texto 1
|
| 56 |
+
with text_col1:
|
| 57 |
st.markdown("**Texto 1 (Patrón)**")
|
| 58 |
text_input1 = st.text_area(
|
| 59 |
"Texto 1",
|
| 60 |
+
height=200,
|
| 61 |
key="discourse_live_text1",
|
| 62 |
value=st.session_state.discourse_live_state.get('current_text1', ''),
|
| 63 |
label_visibility="collapsed"
|
| 64 |
)
|
| 65 |
st.session_state.discourse_live_state['current_text1'] = text_input1
|
| 66 |
|
| 67 |
+
# Texto 2
|
| 68 |
+
with text_col2:
|
| 69 |
st.markdown("**Texto 2 (Comparación)**")
|
| 70 |
text_input2 = st.text_area(
|
| 71 |
"Texto 2",
|
| 72 |
+
height=200,
|
| 73 |
key="discourse_live_text2",
|
| 74 |
value=st.session_state.discourse_live_state.get('current_text2', ''),
|
| 75 |
label_visibility="collapsed"
|
| 76 |
)
|
| 77 |
st.session_state.discourse_live_state['current_text2'] = text_input2
|
| 78 |
|
| 79 |
+
# Botón de análisis centrado
|
| 80 |
+
col1, col2, col3 = st.columns([1,2,1])
|
| 81 |
+
with col1:
|
| 82 |
analyze_button = st.button(
|
| 83 |
discourse_t.get('analyze_button', 'Analizar'),
|
| 84 |
key="discourse_live_analyze",
|
|
|
|
| 88 |
use_container_width=True
|
| 89 |
)
|
| 90 |
|
| 91 |
+
# Proceso y visualización de resultados
|
| 92 |
+
if analyze_button and text_input1 and text_input2:
|
| 93 |
+
try:
|
| 94 |
+
with st.spinner(discourse_t.get('processing', 'Procesando...')):
|
| 95 |
+
result = perform_discourse_analysis(
|
| 96 |
+
text_input1,
|
| 97 |
+
text_input2,
|
| 98 |
+
nlp_models[lang_code],
|
| 99 |
+
lang_code
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
if result['success']:
|
| 103 |
+
# Procesar ambos gráficos
|
| 104 |
+
for graph_key in ['graph1', 'graph2']:
|
| 105 |
+
if graph_key in result and result[graph_key] is not None:
|
| 106 |
+
bytes_key = f'{graph_key}_bytes'
|
| 107 |
+
graph_bytes = fig_to_bytes(result[graph_key])
|
| 108 |
+
if graph_bytes:
|
| 109 |
+
result[bytes_key] = graph_bytes
|
| 110 |
+
plt.close(result[graph_key])
|
| 111 |
+
|
| 112 |
+
st.session_state.discourse_live_state['last_result'] = result
|
| 113 |
+
st.session_state.discourse_live_state['analysis_count'] += 1
|
| 114 |
+
|
| 115 |
+
store_student_discourse_result(
|
| 116 |
+
st.session_state.username,
|
| 117 |
text_input1,
|
| 118 |
text_input2,
|
| 119 |
+
result
|
|
|
|
| 120 |
)
|
| 121 |
|
| 122 |
+
# Mostrar resultados
|
| 123 |
+
st.markdown("---")
|
| 124 |
+
st.subheader(discourse_t.get('results_title', 'Resultados del Análisis'))
|
| 125 |
+
display_discourse_results(result, lang_code, discourse_t)
|
| 126 |
+
|
| 127 |
+
else:
|
| 128 |
+
st.error(result.get('message', 'Error en el análisis'))
|
| 129 |
+
|
| 130 |
+
except Exception as e:
|
| 131 |
+
logger.error(f"Error en análisis: {str(e)}")
|
| 132 |
+
st.error(discourse_t.get('error_processing', f'Error al procesar el texto: {str(e)}'))
|
| 133 |
+
|
| 134 |
+
# Mostrar resultados previos si existen
|
| 135 |
+
elif 'last_result' in st.session_state.discourse_live_state and \
|
| 136 |
+
st.session_state.discourse_live_state['last_result'] is not None:
|
| 137 |
+
|
| 138 |
+
st.markdown("---")
|
| 139 |
+
st.subheader(discourse_t.get('previous_results', 'Resultados del Análisis Anterior'))
|
| 140 |
+
display_discourse_results(
|
| 141 |
+
st.session_state.discourse_live_state['last_result'],
|
| 142 |
+
lang_code,
|
| 143 |
+
discourse_t
|
| 144 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
except Exception as e:
|
| 147 |
logger.error(f"Error general en interfaz del discurso en vivo: {str(e)}")
|
| 148 |
+
st.error(discourse_t.get('general_error', "Se produjo un error. Por favor, intente de nuevo."))
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|