Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -103,6 +103,9 @@ if __name__ == "__main__":
|
|
103 |
demo.launch()
|
104 |
'''
|
105 |
|
|
|
|
|
|
|
106 |
import os
|
107 |
import gradio as gr
|
108 |
import requests
|
@@ -349,4 +352,402 @@ if __name__ == "__main__":
|
|
349 |
})
|
350 |
|
351 |
# Start Gradio app
|
352 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
demo.launch()
|
104 |
'''
|
105 |
|
106 |
+
|
107 |
+
'''
|
108 |
+
working
|
109 |
import os
|
110 |
import gradio as gr
|
111 |
import requests
|
|
|
352 |
})
|
353 |
|
354 |
# Start Gradio app
|
355 |
+
demo.launch()
|
356 |
+
|
357 |
+
'''
|
358 |
+
|
359 |
+
|
360 |
+
import os
|
361 |
+
import gradio as gr
|
362 |
+
import requests
|
363 |
+
import mlflow
|
364 |
+
import dagshub
|
365 |
+
from pinecone import Pinecone
|
366 |
+
from langchain.prompts import PromptTemplate
|
367 |
+
from langchain.chains.llm import LLMChain
|
368 |
+
from langchain.llms.base import LLM
|
369 |
+
from typing import Optional, List, Mapping, Any
|
370 |
+
import time
|
371 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
372 |
+
from dotenv import load_dotenv
|
373 |
+
from datetime import datetime
|
374 |
+
|
375 |
+
# DeepEval imports
|
376 |
+
try:
|
377 |
+
from deepeval.test_case import LLMTestCase, LLMTestCaseParams
|
378 |
+
from deepeval.metrics import AnswerRelevancyMetric, HallucinationMetric
|
379 |
+
from deepeval.metrics import BaseMetric
|
380 |
+
except Exception:
|
381 |
+
raise
|
382 |
+
|
383 |
+
# Optional LangChain Google generative integration (Gemini)
|
384 |
+
try:
|
385 |
+
import google.generativeai as genai
|
386 |
+
from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings
|
387 |
+
except Exception:
|
388 |
+
ChatGoogleGenerativeAI = None
|
389 |
+
genai = None
|
390 |
+
|
391 |
+
# Load environment variables
|
392 |
+
load_dotenv()
|
393 |
+
|
394 |
+
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY", "")
|
395 |
+
MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "http://localhost:5000")
|
396 |
+
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "")
|
397 |
+
LITSERVE_ENDPOINT = os.environ.get("LITSERVE_ENDPOINT", "https://8001-01k2h9d9mervcmgfn66ybkpwvq.cloudspaces.litng.ai/predict")
|
398 |
+
|
399 |
+
# DagsHub & MLflow Setup (guarded)
|
400 |
+
try:
|
401 |
+
dagshub.init(
|
402 |
+
repo_owner='prathamesh.khade20',
|
403 |
+
repo_name='Maintenance_AI_website',
|
404 |
+
mlflow=True
|
405 |
+
)
|
406 |
+
except Exception:
|
407 |
+
pass
|
408 |
+
|
409 |
+
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
|
410 |
+
mlflow.set_experiment("Maintenance-RAG-Chatbot")
|
411 |
+
|
412 |
+
# ----------- App configuration logging -----------
|
413 |
+
with mlflow.start_run(run_name=f"App-Config-{datetime.now().strftime('%Y%m%d-%H%M%S')}") as setup_run:
|
414 |
+
mlflow.log_params({
|
415 |
+
"pinecone_index": "rag-granite-index",
|
416 |
+
"embedding_model": "all-MiniLM-L6-v2",
|
417 |
+
"namespace": "rag-ns",
|
418 |
+
"top_k": 3,
|
419 |
+
"llm_endpoint": LITSERVE_ENDPOINT
|
420 |
+
})
|
421 |
+
mlflow.log_text("""
|
422 |
+
You are a smart assistant. Based on the provided context, answer the question in 1–2 lines only.
|
423 |
+
If the context has more details, summarize it concisely.
|
424 |
+
Context:
|
425 |
+
{context}
|
426 |
+
Question: {question}
|
427 |
+
Answer:
|
428 |
+
""", "artifacts/prompt_template.txt")
|
429 |
+
|
430 |
+
# ----------- 1. Custom LLM for LitServe endpoint -----------
|
431 |
+
class LitServeLLM(LLM):
|
432 |
+
endpoint_url: str
|
433 |
+
|
434 |
+
@mlflow.trace
|
435 |
+
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
436 |
+
payload = {"prompt": prompt}
|
437 |
+
with mlflow.start_span("lit_serve_request"):
|
438 |
+
start_time = time.time()
|
439 |
+
response = requests.post(self.endpoint_url, json=payload)
|
440 |
+
latency = time.time() - start_time
|
441 |
+
mlflow.log_metric("lit_serve_latency", latency)
|
442 |
+
if response.status_code == 200:
|
443 |
+
data = response.json()
|
444 |
+
mlflow.log_metric("response_tokens", len(data.get("response", "").split()))
|
445 |
+
return data.get("response", "").strip()
|
446 |
+
else:
|
447 |
+
mlflow.log_metric("request_errors", 1)
|
448 |
+
error_info = {
|
449 |
+
"status_code": response.status_code,
|
450 |
+
"error": response.text,
|
451 |
+
"timestamp": datetime.now().isoformat()
|
452 |
+
}
|
453 |
+
mlflow.log_dict(error_info, "artifacts/error_log.json")
|
454 |
+
raise ValueError(f"Request failed: {response.status_code}")
|
455 |
+
|
456 |
+
@property
|
457 |
+
def _identifying_params(self) -> Mapping[str, Any]:
|
458 |
+
return {"endpoint_url": self.endpoint_url}
|
459 |
+
|
460 |
+
@property
|
461 |
+
def _llm_type(self) -> str:
|
462 |
+
return "litserve_llm"
|
463 |
+
|
464 |
+
# ----------- 2. Pinecone Connection -----------
|
465 |
+
@mlflow.trace
|
466 |
+
def init_pinecone():
|
467 |
+
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY")
|
468 |
+
pc = Pinecone(api_key=PINECONE_API_KEY)
|
469 |
+
return pc.Index("rag-granite-index")
|
470 |
+
|
471 |
+
try:
|
472 |
+
index = init_pinecone()
|
473 |
+
except Exception:
|
474 |
+
index = None
|
475 |
+
|
476 |
+
# ----------- 3. Embedding Model -----------
|
477 |
+
embeddings_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
|
478 |
+
|
479 |
+
# ----------- 4. Context Retrieval with Tracing -----------
|
480 |
+
@mlflow.trace
|
481 |
+
def get_retrieved_context(query: str, top_k=3):
|
482 |
+
with mlflow.start_span("embedding_generation"):
|
483 |
+
start_time = time.time()
|
484 |
+
query_embedding = embeddings_model.embed_query(query)
|
485 |
+
mlflow.log_metric("embedding_latency", time.time() - start_time)
|
486 |
+
|
487 |
+
if index is None:
|
488 |
+
return ""
|
489 |
+
|
490 |
+
with mlflow.start_span("pinecone_query"):
|
491 |
+
start_time = time.time()
|
492 |
+
results = index.query(
|
493 |
+
namespace="rag-ns",
|
494 |
+
vector=query_embedding,
|
495 |
+
top_k=top_k,
|
496 |
+
include_metadata=True
|
497 |
+
)
|
498 |
+
mlflow.log_metric("pinecone_latency", time.time() - start_time)
|
499 |
+
mlflow.log_metric("retrieved_chunks", len(results['matches']))
|
500 |
+
|
501 |
+
context_parts = [match['metadata']['text'] for match in results['matches']]
|
502 |
+
return "
|
503 |
+
".join(context_parts)
|
504 |
+
|
505 |
+
# ----------- 5. LLM Chain Setup -----------
|
506 |
+
model = LitServeLLM(endpoint_url=LITSERVE_ENDPOINT)
|
507 |
+
|
508 |
+
prompt = PromptTemplate(
|
509 |
+
input_variables=["context", "question"],
|
510 |
+
template="""
|
511 |
+
You are a smart assistant. Based on the provided context, answer the question in 1–2 lines only.
|
512 |
+
If the context has more details, summarize it concisely.
|
513 |
+
Context:
|
514 |
+
{context}
|
515 |
+
Question: {question}
|
516 |
+
Answer:
|
517 |
+
"""
|
518 |
+
)
|
519 |
+
|
520 |
+
llm_chain = LLMChain(llm=model, prompt=prompt)
|
521 |
+
|
522 |
+
# ----------- 6. RAG Pipeline with Full Tracing -----------
|
523 |
+
@mlflow.trace
|
524 |
+
def rag_pipeline(question):
|
525 |
+
try:
|
526 |
+
with mlflow.start_run(run_name=f"Query-{datetime.now().strftime('%H%M%S')}", nested=True):
|
527 |
+
mlflow.log_param("user_question", question)
|
528 |
+
retrieved_context = get_retrieved_context(question)
|
529 |
+
mlflow.log_text(retrieved_context, "artifacts/retrieved_context.txt")
|
530 |
+
|
531 |
+
start_time = time.time()
|
532 |
+
response_obj = llm_chain.invoke({
|
533 |
+
"context": retrieved_context,
|
534 |
+
"question": question
|
535 |
+
})
|
536 |
+
response = response_obj.get("text") if isinstance(response_obj, dict) else getattr(response_obj, "text", str(response_obj))
|
537 |
+
response = response.strip()
|
538 |
+
|
539 |
+
if "Answer:" in response:
|
540 |
+
response = response.split("Answer:", 1)[-1].strip()
|
541 |
+
|
542 |
+
mlflow.log_metric("response_latency", time.time() - start_time)
|
543 |
+
mlflow.log_metric("response_length", len(response))
|
544 |
+
mlflow.log_text(response, "artifacts/response.txt")
|
545 |
+
|
546 |
+
return response
|
547 |
+
except Exception as e:
|
548 |
+
mlflow.log_metric("pipeline_errors", 1)
|
549 |
+
error_info = {
|
550 |
+
"error": str(e),
|
551 |
+
"question": question,
|
552 |
+
"timestamp": datetime.now().isoformat()
|
553 |
+
}
|
554 |
+
mlflow.log_dict(error_info, "artifacts/pipeline_errors.json")
|
555 |
+
return f"Error: {str(e)}"
|
556 |
+
|
557 |
+
# ----------- 7. DeepEval Wrapper(s) and Metrics Integration (NO EXPECTED OUTPUT) -----------
|
558 |
+
from deepeval.models.base_model import DeepEvalBaseLLM
|
559 |
+
|
560 |
+
class GoogleVertexAI(DeepEvalBaseLLM):
|
561 |
+
def __init__(self, model):
|
562 |
+
self.model = model
|
563 |
+
|
564 |
+
def load_model(self):
|
565 |
+
return self.model
|
566 |
+
|
567 |
+
def generate(self, prompt: str) -> str:
|
568 |
+
chat_model = self.load_model()
|
569 |
+
res = chat_model.invoke(prompt)
|
570 |
+
if hasattr(res, 'content'):
|
571 |
+
return res.content
|
572 |
+
if isinstance(res, dict):
|
573 |
+
return res.get('content') or res.get('text') or str(res)
|
574 |
+
return str(res)
|
575 |
+
|
576 |
+
async def a_generate(self, prompt: str) -> str:
|
577 |
+
chat_model = self.load_model()
|
578 |
+
res = await chat_model.ainvoke(prompt)
|
579 |
+
return getattr(res, 'content', str(res))
|
580 |
+
|
581 |
+
def get_model_name(self):
|
582 |
+
return "Vertex AI Model"
|
583 |
+
|
584 |
+
class LitServeWrapper(DeepEvalBaseLLM):
|
585 |
+
def __init__(self, lit_llm: LitServeLLM):
|
586 |
+
self.lit_llm = lit_llm
|
587 |
+
|
588 |
+
def load_model(self):
|
589 |
+
return self.lit_llm
|
590 |
+
|
591 |
+
def generate(self, prompt: str) -> str:
|
592 |
+
return self.lit_llm._call(prompt)
|
593 |
+
|
594 |
+
async def a_generate(self, prompt: str) -> str:
|
595 |
+
return self.generate(prompt)
|
596 |
+
|
597 |
+
def get_model_name(self):
|
598 |
+
return "LitServeModel"
|
599 |
+
|
600 |
+
# Custom metric that DOES NOT require expected_output: Length-based utility metric
|
601 |
+
class LengthMetric(BaseMetric):
|
602 |
+
def __init__(self, min_tokens: int = 1, max_tokens: int = 200):
|
603 |
+
self.min_tokens = min_tokens
|
604 |
+
self.max_tokens = max_tokens
|
605 |
+
self.score = 0.0
|
606 |
+
self.success = False
|
607 |
+
|
608 |
+
def measure(self, test_case: LLMTestCase):
|
609 |
+
text = (test_case.actual_output or "")
|
610 |
+
tokens = len(text.split())
|
611 |
+
# Score between 0 and 1 based on how close tokens are to midpoint
|
612 |
+
mid = (self.min_tokens + self.max_tokens) / 2
|
613 |
+
dist = abs(tokens - mid)
|
614 |
+
max_dist = max(mid - self.min_tokens, self.max_tokens - mid)
|
615 |
+
self.score = max(0.0, 1.0 - (dist / max_dist))
|
616 |
+
# success if within [min_tokens, max_tokens]
|
617 |
+
self.success = (self.min_tokens <= tokens <= self.max_tokens)
|
618 |
+
return self.score
|
619 |
+
|
620 |
+
async def a_measure(self, test_case: LLMTestCase):
|
621 |
+
return self.measure(test_case)
|
622 |
+
|
623 |
+
def is_successful(self):
|
624 |
+
return self.success
|
625 |
+
|
626 |
+
@property
|
627 |
+
def name(self):
|
628 |
+
return "Length Metric"
|
629 |
+
|
630 |
+
# Helper to get eval model
|
631 |
+
def get_deepeval_model(choice: str):
|
632 |
+
if choice == 'gemini' and ChatGoogleGenerativeAI is not None and GOOGLE_API_KEY:
|
633 |
+
try:
|
634 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
635 |
+
except Exception:
|
636 |
+
pass
|
637 |
+
chat_model = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY)
|
638 |
+
return GoogleVertexAI(model=chat_model)
|
639 |
+
else:
|
640 |
+
return LitServeWrapper(lit_llm=model)
|
641 |
+
|
642 |
+
# Function to run Deepeval tests and log to mlflow (only metrics that don't need expected_output)
|
643 |
+
@mlflow.trace
|
644 |
+
def run_deepeval_tests(test_cases: List[LLMTestCase], eval_model_choice: str = 'litserve'):
|
645 |
+
model_wrapper = get_deepeval_model(eval_model_choice)
|
646 |
+
|
647 |
+
# Use only metrics that don't require expected output
|
648 |
+
answer_relevancy_metric = AnswerRelevancyMetric(threshold=0.5, model=model_wrapper)
|
649 |
+
hallucination_metric = HallucinationMetric(threshold=0.5, model=model_wrapper)
|
650 |
+
length_metric = LengthMetric(min_tokens=3, max_tokens=200)
|
651 |
+
|
652 |
+
results = []
|
653 |
+
with mlflow.start_run(run_name=f"DeepEval-{datetime.now().strftime('%H%M%S')}", nested=True):
|
654 |
+
for i, tc in enumerate(test_cases):
|
655 |
+
mlflow.log_param(f"tc_{i}_input", tc.input)
|
656 |
+
mlflow.log_param(f"tc_{i}_actual", tc.actual_output)
|
657 |
+
if tc.context:
|
658 |
+
mlflow.log_text("
|
659 |
+
".join(tc.context), f"artifacts/tc_{i}_context.txt")
|
660 |
+
|
661 |
+
# Measure metrics
|
662 |
+
answer_relevancy_metric.measure(tc)
|
663 |
+
hallucination_metric.measure(tc)
|
664 |
+
length_metric.measure(tc)
|
665 |
+
|
666 |
+
entry = {
|
667 |
+
"input": tc.input,
|
668 |
+
"actual_output": tc.actual_output,
|
669 |
+
"context": tc.context,
|
670 |
+
"answer_relevancy_score": answer_relevancy_metric.score,
|
671 |
+
"hallucination_score": hallucination_metric.score,
|
672 |
+
"length_score": length_metric.score
|
673 |
+
}
|
674 |
+
|
675 |
+
# Log metrics to mlflow
|
676 |
+
mlflow.log_metric(f"tc_{i}_answer_relevancy", answer_relevancy_metric.score)
|
677 |
+
mlflow.log_metric(f"tc_{i}_hallucination", hallucination_metric.score)
|
678 |
+
mlflow.log_metric(f"tc_{i}_length", length_metric.score)
|
679 |
+
|
680 |
+
results.append(entry)
|
681 |
+
|
682 |
+
return results
|
683 |
+
|
684 |
+
# ----------- 8. Gradio UI with Evaluation Tab (NO EXPECTED OUTPUT) -----------
|
685 |
+
with gr.Blocks() as demo:
|
686 |
+
gr.Markdown("# 🛠️ Maintenance AI Assistant + DeepEval (No expected output required)")
|
687 |
+
|
688 |
+
with gr.Tabs():
|
689 |
+
with gr.TabItem("Chat (RAG)"):
|
690 |
+
usage_counter = gr.State(value=0)
|
691 |
+
session_start = gr.State(value=datetime.now().isoformat())
|
692 |
+
|
693 |
+
question_input = gr.Textbox(label="Ask your maintenance question")
|
694 |
+
answer_output = gr.Textbox(label="AI Response")
|
695 |
+
ask_button = gr.Button("Get Answer")
|
696 |
+
feedback = gr.Radio(["Helpful", "Not Helpful"], label="Was this response helpful?")
|
697 |
+
|
698 |
+
def track_usage(question, count, session_start, feedback=None):
|
699 |
+
count += 1
|
700 |
+
with mlflow.start_run(run_name=f"User-Interaction-{count}", nested=True):
|
701 |
+
mlflow.log_param("question", question)
|
702 |
+
mlflow.log_param("session_start", session_start)
|
703 |
+
response = rag_pipeline(question)
|
704 |
+
if feedback:
|
705 |
+
mlflow.log_param("user_feedback", feedback)
|
706 |
+
mlflow.log_metric("helpful_responses", 1 if feedback == "Helpful" else 0)
|
707 |
+
mlflow.log_metric("total_queries", count)
|
708 |
+
return response, count, session_start
|
709 |
+
|
710 |
+
ask_button.click(
|
711 |
+
track_usage,
|
712 |
+
inputs=[question_input, usage_counter, session_start],
|
713 |
+
outputs=[answer_output, usage_counter, session_start]
|
714 |
+
)
|
715 |
+
|
716 |
+
feedback.change(
|
717 |
+
track_usage,
|
718 |
+
inputs=[question_input, usage_counter, session_start, feedback],
|
719 |
+
outputs=[answer_output, usage_counter, session_start]
|
720 |
+
)
|
721 |
+
|
722 |
+
with gr.TabItem("DeepEval — Model Tests"):
|
723 |
+
gr.Markdown("### Run DeepEval metrics (no expected output needed). Provide input + actual output (or paste model output), optional context.")
|
724 |
+
|
725 |
+
tc_input = gr.Textbox(label="Test Input (prompt)")
|
726 |
+
tc_actual = gr.Textbox(label="Actual Output (paste model response or custom)")
|
727 |
+
tc_context = gr.Textbox(label="Context (optional)")
|
728 |
+
|
729 |
+
model_choice = gr.Radio(["litserve", "gemini"], value="litserve", label="Evaluation backend")
|
730 |
+
run_button = gr.Button("Run DeepEval")
|
731 |
+
eval_output = gr.JSON(label="Evaluation Results")
|
732 |
+
|
733 |
+
def run_single_eval(inp, actual, context, eval_backend):
|
734 |
+
tc = LLMTestCase(input=inp, actual_output=actual, expected_output=None, context=[context] if context else None)
|
735 |
+
results = run_deepeval_tests([tc], eval_model_choice=eval_backend)
|
736 |
+
return results
|
737 |
+
|
738 |
+
run_button.click(
|
739 |
+
run_single_eval,
|
740 |
+
inputs=[tc_input, tc_actual, tc_context, model_choice],
|
741 |
+
outputs=[eval_output]
|
742 |
+
)
|
743 |
+
|
744 |
+
if __name__ == "__main__":
|
745 |
+
with mlflow.start_run(run_name="Deployment-Info"):
|
746 |
+
mlflow.log_params({
|
747 |
+
"app_version": "1.2.0",
|
748 |
+
"deployment_platform": "Lightning AI / HuggingFace Space",
|
749 |
+
"deployment_time": datetime.now().isoformat(),
|
750 |
+
"code_version": os.getenv("GIT_COMMIT", "dev")
|
751 |
+
})
|
752 |
+
|
753 |
+
demo.launch()
|