Update app/streamlit_app.py
Browse files- app/streamlit_app.py +13 -13
app/streamlit_app.py
CHANGED
|
@@ -977,7 +977,7 @@ def render_prediction_interface():
|
|
| 977 |
st.error(validation_message)
|
| 978 |
else:
|
| 979 |
# Make prediction with enhanced UI
|
| 980 |
-
result =
|
| 981 |
|
| 982 |
if 'error' in result:
|
| 983 |
st.error(f"Analysis failed: {result['error']}")
|
|
@@ -1032,7 +1032,7 @@ def render_prediction_results(result: Dict, user_text: str, show_advanced: bool)
|
|
| 1032 |
|
| 1033 |
with col1:
|
| 1034 |
# Advanced confidence gauge
|
| 1035 |
-
fig_gauge =
|
| 1036 |
st.plotly_chart(fig_gauge, use_container_width=True)
|
| 1037 |
|
| 1038 |
with col2:
|
|
@@ -1119,8 +1119,8 @@ def validate_text_input(text: str) -> tuple[bool, str]:
|
|
| 1119 |
if len(text) < 10:
|
| 1120 |
return False, "Text must be at least 10 characters long for reliable analysis."
|
| 1121 |
|
| 1122 |
-
if len(text) >
|
| 1123 |
-
return False, f"Text must be less than {
|
| 1124 |
|
| 1125 |
# Check for suspicious content
|
| 1126 |
suspicious_patterns = ['<script', 'javascript:', 'data:', 'onclick=']
|
|
@@ -1160,7 +1160,7 @@ def render_batch_analysis():
|
|
| 1160 |
with col1:
|
| 1161 |
max_articles = st.number_input("Max articles to process",
|
| 1162 |
min_value=1,
|
| 1163 |
-
max_value=min(len(df),
|
| 1164 |
value=min(len(df), 50))
|
| 1165 |
|
| 1166 |
with col2:
|
|
@@ -1184,7 +1184,7 @@ def render_batch_analysis():
|
|
| 1184 |
status_text.text(f"Processing article {i+1:,}/{len(df):,}...")
|
| 1185 |
progress_bar.progress((i + 1) / len(df))
|
| 1186 |
|
| 1187 |
-
result =
|
| 1188 |
|
| 1189 |
if 'error' not in result:
|
| 1190 |
results.append({
|
|
@@ -1325,7 +1325,7 @@ def render_analytics_dashboard():
|
|
| 1325 |
if st.session_state.prediction_history:
|
| 1326 |
st.subheader("Prediction History Analysis")
|
| 1327 |
|
| 1328 |
-
fig_history =
|
| 1329 |
if fig_history:
|
| 1330 |
st.plotly_chart(fig_history, use_container_width=True)
|
| 1331 |
|
|
@@ -1410,7 +1410,7 @@ def render_cv_results_dashboard():
|
|
| 1410 |
st.markdown("*Statistical validation and model performance analysis*")
|
| 1411 |
|
| 1412 |
# Get CV results
|
| 1413 |
-
cv_results =
|
| 1414 |
|
| 1415 |
if cv_results and 'error' not in cv_results:
|
| 1416 |
# Model information header
|
|
@@ -1431,7 +1431,7 @@ def render_cv_results_dashboard():
|
|
| 1431 |
cv_data = cv_results.get('cross_validation', {})
|
| 1432 |
if cv_data:
|
| 1433 |
# Create comprehensive CV visualization
|
| 1434 |
-
fig_cv =
|
| 1435 |
if fig_cv:
|
| 1436 |
st.plotly_chart(fig_cv, use_container_width=True)
|
| 1437 |
|
|
@@ -1799,7 +1799,7 @@ def render_health_monitoring():
|
|
| 1799 |
st.session_state.auto_refresh = auto_refresh
|
| 1800 |
|
| 1801 |
# System health overview
|
| 1802 |
-
|
| 1803 |
|
| 1804 |
# Environment information
|
| 1805 |
st.markdown("---")
|
|
@@ -1839,9 +1839,9 @@ def render_health_monitoring():
|
|
| 1839 |
st.markdown(f"{status} {name}")
|
| 1840 |
|
| 1841 |
# Model performance monitoring
|
| 1842 |
-
if
|
| 1843 |
try:
|
| 1844 |
-
metrics_response =
|
| 1845 |
if metrics_response.status_code == 200:
|
| 1846 |
metrics_data = metrics_response.json()
|
| 1847 |
|
|
@@ -1920,7 +1920,7 @@ def render_realtime_metrics():
|
|
| 1920 |
metrics_timeframe = st.selectbox("Timeframe", ["Last Hour", "Last 4 Hours", "Last 24 Hours"])
|
| 1921 |
|
| 1922 |
# Real-time monitoring data
|
| 1923 |
-
monitoring_data =
|
| 1924 |
|
| 1925 |
if monitoring_data:
|
| 1926 |
# Current performance metrics
|
|
|
|
| 977 |
st.error(validation_message)
|
| 978 |
else:
|
| 979 |
# Make prediction with enhanced UI
|
| 980 |
+
result = app_manager.make_prediction_request(user_text)
|
| 981 |
|
| 982 |
if 'error' in result:
|
| 983 |
st.error(f"Analysis failed: {result['error']}")
|
|
|
|
| 1032 |
|
| 1033 |
with col1:
|
| 1034 |
# Advanced confidence gauge
|
| 1035 |
+
fig_gauge = app_manager.create_advanced_confidence_gauge(confidence, prediction)
|
| 1036 |
st.plotly_chart(fig_gauge, use_container_width=True)
|
| 1037 |
|
| 1038 |
with col2:
|
|
|
|
| 1119 |
if len(text) < 10:
|
| 1120 |
return False, "Text must be at least 10 characters long for reliable analysis."
|
| 1121 |
|
| 1122 |
+
if len(text) > app_manager.config['max_text_length']:
|
| 1123 |
+
return False, f"Text must be less than {app_manager.config['max_text_length']:,} characters."
|
| 1124 |
|
| 1125 |
# Check for suspicious content
|
| 1126 |
suspicious_patterns = ['<script', 'javascript:', 'data:', 'onclick=']
|
|
|
|
| 1160 |
with col1:
|
| 1161 |
max_articles = st.number_input("Max articles to process",
|
| 1162 |
min_value=1,
|
| 1163 |
+
max_value=min(len(df), app_manager.config['max_batch_size']),
|
| 1164 |
value=min(len(df), 50))
|
| 1165 |
|
| 1166 |
with col2:
|
|
|
|
| 1184 |
status_text.text(f"Processing article {i+1:,}/{len(df):,}...")
|
| 1185 |
progress_bar.progress((i + 1) / len(df))
|
| 1186 |
|
| 1187 |
+
result = app_manager.make_prediction_request(row['text'])
|
| 1188 |
|
| 1189 |
if 'error' not in result:
|
| 1190 |
results.append({
|
|
|
|
| 1325 |
if st.session_state.prediction_history:
|
| 1326 |
st.subheader("Prediction History Analysis")
|
| 1327 |
|
| 1328 |
+
fig_history = app_manager.create_prediction_history_chart()
|
| 1329 |
if fig_history:
|
| 1330 |
st.plotly_chart(fig_history, use_container_width=True)
|
| 1331 |
|
|
|
|
| 1410 |
st.markdown("*Statistical validation and model performance analysis*")
|
| 1411 |
|
| 1412 |
# Get CV results
|
| 1413 |
+
cv_results = app_manager.get_cv_results_from_api()
|
| 1414 |
|
| 1415 |
if cv_results and 'error' not in cv_results:
|
| 1416 |
# Model information header
|
|
|
|
| 1431 |
cv_data = cv_results.get('cross_validation', {})
|
| 1432 |
if cv_data:
|
| 1433 |
# Create comprehensive CV visualization
|
| 1434 |
+
fig_cv = app_manager.create_cv_performance_visualization(cv_results)
|
| 1435 |
if fig_cv:
|
| 1436 |
st.plotly_chart(fig_cv, use_container_width=True)
|
| 1437 |
|
|
|
|
| 1799 |
st.session_state.auto_refresh = auto_refresh
|
| 1800 |
|
| 1801 |
# System health overview
|
| 1802 |
+
app_manager.render_system_health_dashboard()
|
| 1803 |
|
| 1804 |
# Environment information
|
| 1805 |
st.markdown("---")
|
|
|
|
| 1839 |
st.markdown(f"{status} {name}")
|
| 1840 |
|
| 1841 |
# Model performance monitoring
|
| 1842 |
+
if app_manager.api_available:
|
| 1843 |
try:
|
| 1844 |
+
metrics_response = app_manager.session.get(f"{app_manager.config['api_url']}/metrics", timeout=10)
|
| 1845 |
if metrics_response.status_code == 200:
|
| 1846 |
metrics_data = metrics_response.json()
|
| 1847 |
|
|
|
|
| 1920 |
metrics_timeframe = st.selectbox("Timeframe", ["Last Hour", "Last 4 Hours", "Last 24 Hours"])
|
| 1921 |
|
| 1922 |
# Real-time monitoring data
|
| 1923 |
+
monitoring_data = app_manager.get_monitoring_metrics_from_api()
|
| 1924 |
|
| 1925 |
if monitoring_data:
|
| 1926 |
# Current performance metrics
|