Spaces:
Sleeping
Sleeping
Commit
Β·
f217250
1
Parent(s):
0733fd6
everything is working fine but drift graph and history is giving fallback
Browse files
app.py
CHANGED
@@ -388,29 +388,128 @@ def save_new_model(model_name, selected_llm, original_prompt, enhanced_prompt, c
|
|
388 |
]
|
389 |
|
390 |
|
|
|
|
|
391 |
def chatbot_response(message, history, dropdown_value):
|
392 |
-
"""Generate chatbot response
|
|
|
|
|
|
|
|
|
393 |
if not message or not message.strip() or not dropdown_value:
|
|
|
394 |
return history, ""
|
395 |
|
396 |
try:
|
397 |
model_name = extract_model_name_from_dropdown(dropdown_value, current_model_mapping)
|
|
|
398 |
|
399 |
-
#
|
400 |
-
response_text = f"Hello! I'm {model_name}. You said: '{message}'. This is a demo response since the full LLM integration requires API keys."
|
401 |
-
|
402 |
-
# Append in messages format
|
403 |
if history is None:
|
404 |
history = []
|
405 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
406 |
history.append({"role": "user", "content": message})
|
407 |
history.append({"role": "assistant", "content": response_text})
|
|
|
|
|
408 |
return history, ""
|
|
|
409 |
except Exception as e:
|
410 |
-
print(f"
|
|
|
|
|
|
|
|
|
411 |
return history, ""
|
412 |
|
413 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
414 |
def calculate_drift(dropdown_value):
|
415 |
"""Calculate drift for model - simplified version"""
|
416 |
if not dropdown_value:
|
|
|
388 |
]
|
389 |
|
390 |
|
391 |
+
# Replace the chatbot_response function in your Gradio file with this:
|
392 |
+
|
393 |
def chatbot_response(message, history, dropdown_value):
|
394 |
+
"""Generate chatbot response using actual LLM"""
|
395 |
+
print(f"π DEBUG: Function called with message: '{message}'")
|
396 |
+
print(f"π DEBUG: LLM_AVAILABLE: {LLM_AVAILABLE}")
|
397 |
+
print(f"π DEBUG: GROQ_API_KEY exists: {'GROQ_API_KEY' in os.environ}")
|
398 |
+
|
399 |
if not message or not message.strip() or not dropdown_value:
|
400 |
+
print("π DEBUG: Empty message or dropdown")
|
401 |
return history, ""
|
402 |
|
403 |
try:
|
404 |
model_name = extract_model_name_from_dropdown(dropdown_value, current_model_mapping)
|
405 |
+
print(f"π DEBUG: Model name: {model_name}")
|
406 |
|
407 |
+
# Initialize history if needed
|
|
|
|
|
|
|
408 |
if history is None:
|
409 |
history = []
|
410 |
|
411 |
+
# Check if LLM is available and API key is set
|
412 |
+
if not LLM_AVAILABLE:
|
413 |
+
response_text = "β LLM not available - check ourllm.py import"
|
414 |
+
elif not os.getenv("GROQ_API_KEY"):
|
415 |
+
response_text = "β GROQ_API_KEY not found in environment variables"
|
416 |
+
else:
|
417 |
+
try:
|
418 |
+
print("π DEBUG: Attempting to call LLM...")
|
419 |
+
|
420 |
+
# Get model details to use system prompt if available
|
421 |
+
model_details = get_model_details(model_name)
|
422 |
+
system_prompt = model_details.get("system_prompt", "You are a helpful AI assistant.")
|
423 |
+
|
424 |
+
# Create a message with system context
|
425 |
+
full_message = f"System: {system_prompt}\n\nUser: {message}"
|
426 |
+
|
427 |
+
# Call the LLM
|
428 |
+
response = llm.invoke(full_message)
|
429 |
+
response_text = str(response.content).strip()
|
430 |
+
|
431 |
+
print(f"π DEBUG: LLM response received: {response_text[:100]}...")
|
432 |
+
|
433 |
+
if not response_text:
|
434 |
+
response_text = "β LLM returned empty response"
|
435 |
+
|
436 |
+
except Exception as e:
|
437 |
+
print(f"π DEBUG: LLM call failed: {e}")
|
438 |
+
response_text = f"β LLM Error: {str(e)}"
|
439 |
+
|
440 |
+
# Add to history
|
441 |
history.append({"role": "user", "content": message})
|
442 |
history.append({"role": "assistant", "content": response_text})
|
443 |
+
|
444 |
+
print(f"π DEBUG: Final response: {response_text}")
|
445 |
return history, ""
|
446 |
+
|
447 |
except Exception as e:
|
448 |
+
print(f"π DEBUG: General error in chatbot_response: {e}")
|
449 |
+
if history is None:
|
450 |
+
history = []
|
451 |
+
history.append({"role": "user", "content": message})
|
452 |
+
history.append({"role": "assistant", "content": f"β Error: {str(e)}"})
|
453 |
return history, ""
|
454 |
|
455 |
|
456 |
+
# Also add this helper function to test LLM connectivity:
|
457 |
+
def test_llm_connection():
|
458 |
+
"""Test if LLM is working properly"""
|
459 |
+
try:
|
460 |
+
if not LLM_AVAILABLE:
|
461 |
+
return "β LLM not imported"
|
462 |
+
|
463 |
+
if not os.getenv("GROQ_API_KEY"):
|
464 |
+
return "β GROQ_API_KEY not found"
|
465 |
+
|
466 |
+
# Test with a simple message
|
467 |
+
response = llm.invoke("Hello, please respond with 'LLM is working'")
|
468 |
+
return f"β
LLM working: {response.content}"
|
469 |
+
except Exception as e:
|
470 |
+
return f"β LLM test failed: {e}"
|
471 |
+
|
472 |
+
|
473 |
+
# Add this to your interface initialization to test LLM on startup:
|
474 |
+
def initialize_interface():
|
475 |
+
"""Initialize interface with LLM test"""
|
476 |
+
global current_model_mapping
|
477 |
+
|
478 |
+
# Test LLM first
|
479 |
+
llm_status = test_llm_connection()
|
480 |
+
print(f"π LLM Status: {llm_status}")
|
481 |
+
|
482 |
+
try:
|
483 |
+
models = get_models_from_db()
|
484 |
+
formatted_items, model_mapping = format_dropdown_items(models)
|
485 |
+
current_model_mapping = model_mapping
|
486 |
+
|
487 |
+
# Safe initialization
|
488 |
+
if formatted_items:
|
489 |
+
dropdown_value = formatted_items[0]
|
490 |
+
first_model_name = extract_model_name_from_dropdown(dropdown_value, model_mapping)
|
491 |
+
dropdown_update = gr.update(choices=formatted_items, value=dropdown_value)
|
492 |
+
else:
|
493 |
+
dropdown_value = None
|
494 |
+
first_model_name = ""
|
495 |
+
dropdown_update = gr.update(choices=[], value=None)
|
496 |
+
|
497 |
+
return (
|
498 |
+
dropdown_update, # dropdown update
|
499 |
+
"", # new_model_name
|
500 |
+
first_model_name, # selected_model_display
|
501 |
+
first_model_name # drift_model_display
|
502 |
+
)
|
503 |
+
except Exception as e:
|
504 |
+
print(f"β Error initializing interface: {e}")
|
505 |
+
return (
|
506 |
+
gr.update(choices=[], value=None),
|
507 |
+
"",
|
508 |
+
"",
|
509 |
+
""
|
510 |
+
)
|
511 |
+
|
512 |
+
|
513 |
def calculate_drift(dropdown_value):
|
514 |
"""Calculate drift for model - simplified version"""
|
515 |
if not dropdown_value:
|