Spaces:
Running
Running
Commit
·
79e0ac2
1
Parent(s):
23ca4b9
Fix analyze_results dropdown choices to match MCP server
Browse files- Change dropdown from ['overall', 'failures', 'performance', 'tools']
to ['comprehensive', 'failures', 'performance', 'cost']
- Update default value from 'overall' to 'comprehensive'
- Update function signatures and docstrings to match
- Revert SSR mode change (not needed)
- app.py +3 -4
- screens/mcp_helpers.py +11 -3
app.py
CHANGED
|
@@ -1987,8 +1987,8 @@ with gr.Blocks(title="TraceMind-AI", theme=theme) as app:
|
|
| 1987 |
with gr.Column(scale=1):
|
| 1988 |
run_analysis_focus = gr.Dropdown(
|
| 1989 |
label="Analysis Focus",
|
| 1990 |
-
choices=["
|
| 1991 |
-
value="
|
| 1992 |
info="Choose what aspect to focus on in the AI analysis"
|
| 1993 |
)
|
| 1994 |
run_max_rows = gr.Slider(
|
|
@@ -2487,6 +2487,5 @@ if __name__ == "__main__":
|
|
| 2487 |
app.launch(
|
| 2488 |
server_name="0.0.0.0",
|
| 2489 |
server_port=7860,
|
| 2490 |
-
share=False
|
| 2491 |
-
ssr_mode=False # Disable SSR to fix MCP client compatibility
|
| 2492 |
)
|
|
|
|
| 1987 |
with gr.Column(scale=1):
|
| 1988 |
run_analysis_focus = gr.Dropdown(
|
| 1989 |
label="Analysis Focus",
|
| 1990 |
+
choices=["comprehensive", "failures", "performance", "cost"],
|
| 1991 |
+
value="comprehensive",
|
| 1992 |
info="Choose what aspect to focus on in the AI analysis"
|
| 1993 |
)
|
| 1994 |
run_max_rows = gr.Slider(
|
|
|
|
| 2487 |
app.launch(
|
| 2488 |
server_name="0.0.0.0",
|
| 2489 |
server_port=7860,
|
| 2490 |
+
share=False
|
|
|
|
| 2491 |
)
|
screens/mcp_helpers.py
CHANGED
|
@@ -120,7 +120,7 @@ async def call_compare_runs(
|
|
| 120 |
|
| 121 |
async def call_analyze_results(
|
| 122 |
results_repo: str,
|
| 123 |
-
focus_area: str = "
|
| 124 |
max_rows: int = 100
|
| 125 |
) -> str:
|
| 126 |
"""
|
|
@@ -128,7 +128,7 @@ async def call_analyze_results(
|
|
| 128 |
|
| 129 |
Args:
|
| 130 |
results_repo: HuggingFace dataset repository with results data
|
| 131 |
-
focus_area: Focus area - "
|
| 132 |
max_rows: Maximum number of test cases to analyze
|
| 133 |
|
| 134 |
Returns:
|
|
@@ -225,11 +225,19 @@ def call_compare_runs_sync(
|
|
| 225 |
|
| 226 |
def call_analyze_results_sync(
|
| 227 |
results_repo: str,
|
| 228 |
-
focus_area: str = "
|
| 229 |
max_rows: int = 100
|
| 230 |
) -> str:
|
| 231 |
"""
|
| 232 |
Synchronous version of call_analyze_results for Gradio event handlers
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 233 |
"""
|
| 234 |
try:
|
| 235 |
client = get_mcp_client()
|
|
|
|
| 120 |
|
| 121 |
async def call_analyze_results(
|
| 122 |
results_repo: str,
|
| 123 |
+
focus_area: str = "comprehensive",
|
| 124 |
max_rows: int = 100
|
| 125 |
) -> str:
|
| 126 |
"""
|
|
|
|
| 128 |
|
| 129 |
Args:
|
| 130 |
results_repo: HuggingFace dataset repository with results data
|
| 131 |
+
focus_area: Focus area - "comprehensive", "failures", "performance", or "cost"
|
| 132 |
max_rows: Maximum number of test cases to analyze
|
| 133 |
|
| 134 |
Returns:
|
|
|
|
| 225 |
|
| 226 |
def call_analyze_results_sync(
|
| 227 |
results_repo: str,
|
| 228 |
+
focus_area: str = "comprehensive",
|
| 229 |
max_rows: int = 100
|
| 230 |
) -> str:
|
| 231 |
"""
|
| 232 |
Synchronous version of call_analyze_results for Gradio event handlers
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
results_repo: HuggingFace dataset repository with results data
|
| 236 |
+
focus_area: Focus area - "comprehensive", "failures", "performance", or "cost"
|
| 237 |
+
max_rows: Maximum number of test cases to analyze
|
| 238 |
+
|
| 239 |
+
Returns:
|
| 240 |
+
Markdown-formatted results analysis from Gemini
|
| 241 |
"""
|
| 242 |
try:
|
| 243 |
client = get_mcp_client()
|