MultiAgentReasoningSystem / supervisor_agent.py
Rakesh2205's picture
Upload 3 files
bcd3f43 verified
raw
history blame
32.9 kB
import os
import json
from typing import Dict, Any, List
from pydantic import BaseModel, Field
from dotenv import load_dotenv
from langchain_google_genai import ChatGoogleGenerativeAI
from langgraph.prebuilt import create_react_agent
from langgraph_supervisor import create_supervisor
from langchain_core.tools import tool
from tavily import TavilyClient
from langgraph.graph import StateGraph, END
import gradio as gr
# Load environment variables
load_dotenv()
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
if not GEMINI_API_KEY:
raise ValueError("GEMINI_API_KEY not found in environment variables")
if not TAVILY_API_KEY:
raise ValueError("TAVILY_API_KEY not found in environment variables")
os.environ["GOOGLE_API_KEY"] = GEMINI_API_KEY
# Initialize Tavily client for real-time web search
tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
# =============================================================================
# STRUCTURED OUTPUT MODEL
# =============================================================================
class ProfileAnalysisResult(BaseModel):
"""Final structured output for profile analysis"""
fn: str = Field(description="First name")
ln: str = Field(description="Last name")
probableBusinessEmail: str = Field(description="Probable business email address")
title: str = Field(description="Current job title")
isAJobChange: bool = Field(description="Whether person changed jobs")
isAnICP: bool = Field(description="Whether person matches ICP criteria")
currentCompany: str = Field(description="Current company name")
# =============================================================================
# REACT AGENT TOOLS
# =============================================================================
@tool
def research_person_profile(first_name: str, last_name: str, known_company: str = "") -> Dict[str, Any]:
"""Research a person's current professional profile using real-time web search."""
try:
# Search for current professional information
search_query = f'"{first_name} {last_name}" current job title company LinkedIn'
search_results = tavily_client.search(
query=search_query,
search_depth="advanced",
include_domains=["linkedin.com", "crunchbase.com", "zoominfo.com"],
max_results=5
)
# Also search for recent news/articles about the person
news_query = f'"{first_name} {last_name}" new job company change recent'
news_results = tavily_client.search(
query=news_query,
search_depth="basic",
include_domains=["techcrunch.com", "linkedin.com", "twitter.com"],
max_results=3
)
# Return structured data, not hardcoded values
return {
"current_company": "Unknown", # Will be filled by AI analysis
"current_title": "Unknown", # Will be filled by AI analysis
"confidence": 0.7,
"search_results": search_results.get("results", []),
"news_results": news_results.get("results", []),
"research_notes": f"AI analyzed {len(search_results.get('results', []))} search results and {len(news_results.get('results', []))} news articles"
}
except Exception as e:
# Return Dict, not JSON string (fixes the type mismatch)
return {
"name": f"{first_name} {last_name}",
"error": f"Search failed: {str(e)}",
"data_source": "tavily_search_error"
}
@tool
def detect_job_change(person_name: str, previous_company: str, current_company: str) -> Dict[str, Any]:
"""Analyze if person has changed jobs using real-time company relationship research."""
try:
# Research company relationships and recent changes
relationship_query = f'"{previous_company}" "{current_company}" merger acquisition rebranding subsidiary parent company relationship'
relationship_results = tavily_client.search(
query=relationship_query,
search_depth="advanced",
include_domains=["crunchbase.com", "linkedin.com", "wikipedia.org", "bloomberg.com"],
max_results=5
)
# Search for recent news about company changes
news_query = f'"{previous_company}" "{current_company}" company change news announcement'
news_results = tavily_client.search(
query=news_query,
search_depth="basic",
include_domains=["techcrunch.com", "linkedin.com", "twitter.com", "news.ycombinator.com"],
max_results=3
)
# Return structured data for AI analysis
return {
"person": person_name,
"previous_company": previous_company,
"current_company": current_company,
"job_change_detected": "Unknown", # Will be determined by AI
"confidence": 0.8,
"reason": "Requires AI analysis of search results",
"relationship_search": relationship_results.get("results", []),
"news_search": news_results.get("results", []),
"ai_analysis": f"AI analyzed {len(relationship_results.get('results', []))} relationship results and {len(news_results.get('results', []))} news articles"
}
except Exception as e:
return {
"person": person_name,
"error": f"Company research failed: {str(e)}",
"data_source": "tavily_search_error"
}
@tool
def assess_icp_match(person_title: str, company: str, criteria: str = "senior engineering leadership") -> Dict[str, Any]:
"""Assess if person matches Ideal Customer Profile criteria."""
try:
title_lower = person_title.lower()
# Check for senior engineering roles
senior_roles = ["cto", "vp engineering", "engineering director", "principal engineer", "staff engineer"]
is_match = any(role in title_lower for role in senior_roles)
return {
"title": person_title,
"company": company,
"criteria": criteria,
"is_icp_match": is_match,
"confidence": 0.9 if is_match else 0.1,
"match_reason": "Senior engineering role" if is_match else "Not in target role"
}
except Exception as e:
return {
"title": person_title,
"error": f"ICP assessment failed: {str(e)}",
"data_source": "assessment_error"
}
@tool
def find_business_email(first_name: str, last_name: str, company: str) -> Dict[str, Any]:
"""Generate probable business email addresses using real-time company research and LLM intelligence."""
try:
# Research company website and email patterns
company_query = f'"{company}" company website contact email domain'
company_results = tavily_client.search(
query=company_query,
search_depth="advanced",
include_domains=["linkedin.com", "crunchbase.com", "company websites"],
max_results=3
)
# Search for existing employee emails or contact patterns
email_query = f'"{company}" employee email format "@company.com" contact'
email_results = tavily_client.search(
query=email_query,
search_depth="basic",
include_domains=["linkedin.com", "github.com", "company websites"],
max_results=3
)
# Use LLM to intelligently guess email based on gathered data
email_guess_prompt = f"""
Based on the following information, generate the most probable business email address:
Person: {first_name} {last_name}
Company: {company}
Company Research Results: {company_results.get('results', [])}
Email Pattern Results: {email_results.get('results', [])}
Common email patterns to consider:
1. [email protected]
2. [email protected]
3. [email protected]
4. [email protected]
5. [email protected]
Instructions:
- Analyze the search results for company domain information
- Use common email naming conventions
- If company domain is found, use it; otherwise make an educated guess
- Return ONLY the email address, nothing else
- If truly cannot determine, return "[email protected]" as placeholder
"""
try:
# Get LLM response for email guessing
email_response = llm.invoke(email_guess_prompt)
probable_email = email_response.content.strip()
# Clean up the response
if probable_email.startswith('"') and probable_email.endswith('"'):
probable_email = probable_email[1:-1]
# Validate it looks like an email
if '@' not in probable_email or '.' not in probable_email:
probable_email = f"{first_name.lower()}.{last_name.lower()}@{company.lower().replace(' ', '')}.com"
except Exception as llm_error:
# Fallback to common pattern if LLM fails
probable_email = f"{first_name.lower()}.{last_name.lower()}@{company.lower().replace(' ', '')}.com"
# Extract domain from the probable email
domain = probable_email.split('@')[1] if '@' in probable_email else "company.com"
return {
"person": f"{first_name} {last_name}",
"company": company,
"probable_email": probable_email,
"domain": domain,
"confidence": 0.7,
"company_search": company_results.get("results", []),
"email_search": email_results.get("results", []),
"ai_analysis": f"LLM generated email based on {len(company_results.get('results', []))} company results and {len(email_results.get('results', []))} email pattern results"
}
except Exception as e:
# Fallback to basic pattern if everything fails
fallback_email = f"{first_name.lower()}.{last_name.lower()}@{company.lower().replace(' ', '')}.com"
return {
"person": f"{first_name} {last_name}",
"company": company,
"probable_email": fallback_email,
"domain": company.lower().replace(' ', '') + ".com",
"confidence": 0.5,
"error": f"Email research failed: {str(e)}",
"data_source": "fallback_pattern",
"ai_analysis": "Used fallback email pattern due to search failure"
}
# =============================================================================
# CREATE REACT AGENTS
# =============================================================================
# Create LLM
llm = ChatGoogleGenerativeAI(
model="gemini-2.5-flash",
temperature=0,
google_api_key=GEMINI_API_KEY
)
# Create individual react agents
profile_researcher = create_react_agent(
model=llm,
tools=[research_person_profile],
prompt="""You are a Profile Research Agent. Research missing profile information using the research_person_profile tool.
IMPORTANT: When analyzing search results, provide your findings in this EXACT format:
1. Current Company Name: [specific company name]
2. Current Job Title: [specific job title]
3. Job Change Status: [Yes/No] - [brief reason]
4. ICP Criteria Match: [Yes/No] - [brief reason]
Be specific and clear. Use the exact format above for consistency.""",
name="profile_researcher"
)
job_analyst = create_react_agent(
model=llm,
tools=[detect_job_change],
prompt="""You are a Job Change Detection Agent. Analyze employment transitions using the detect_job_change tool.
IMPORTANT: Provide your analysis in this EXACT format:
1. Job Change Detected: [True/False]
2. Reason: [different companies, rebranding, acquisition, etc.]
3. Confidence Level: [High/Medium/Low]
Use the exact format above for consistency.""",
name="job_analyst"
)
icp_assessor = create_react_agent(
model=llm,
tools=[assess_icp_match],
prompt="""You are an ICP Assessment Agent. Evaluate if people fit the Ideal Customer Profile using the assess_icp_match tool.
IMPORTANT: Provide your assessment in this EXACT format:
1. ICP Match: [Yes/No]
2. Reason: [specific reason for your assessment]
3. Confidence Level: [High/Medium/Low]
Use the exact format above for consistency.""",
name="icp_assessor"
)
email_finder = create_react_agent(
model=llm,
tools=[find_business_email],
prompt="""You are an Email Discovery Agent. Find and validate business emails using the find_business_email tool.
IMPORTANT: Provide your findings in this EXACT format:
1. Most Probable Business Email: [email address]
2. Alternative Patterns: [if available]
3. Confidence Level: [High/Medium/Low]
Use the exact format above for consistency.""",
name="email_finder"
)
# =============================================================================
# CREATE SUPERVISOR
# =============================================================================
supervisor = create_supervisor(
agents=[profile_researcher, job_analyst, icp_assessor, email_finder],
model=llm,
prompt=(
"You manage a team of profile analysis agents with access to real-time web search data: "
"profile_researcher (researches current employment using LinkedIn and web search), "
"job_analyst (analyzes company relationships and job changes using business research), "
"icp_assessor (evaluates ICP fit based on current role), and "
"email_finder (discovers business email patterns using company research). "
"COORDINATION STRATEGY:"
"1. Start with profile_researcher to get current employment info"
"2. Use job_analyst to determine if there was a job change"
"3. Use icp_assessor to evaluate ICP fit based on current role"
"4. Use email_finder to discover business email at current company"
"CRITICAL REQUIREMENT: After all agents complete their work, you MUST provide a FINAL SYNTHESIS "
"that clearly states the following information in a structured format:"
"- Current Company Name: [company]"
"- Current Job Title: [title]"
"- Job Change Status: [Yes/No] with reason: [explanation]"
"- ICP Match Status: [Yes/No] with reason: [explanation]"
"- Most Probable Business Email: [email]"
"Each agent will provide search results that you need to analyze intelligently. "
"Coordinate their research efforts sequentially and ensure each agent has the context "
"they need from previous agents' findings. Your final synthesis is crucial for data extraction."
)
).compile()
# =============================================================================
# INTELLIGENT DATA EXTRACTION
# =============================================================================
def extract_data_with_ai(agent_responses: List[str], profile_input: Dict) -> ProfileAnalysisResult:
"""Use AI to extract structured data from agent responses"""
# Very simple, direct prompt
extraction_prompt = f"""
Extract profile data from this text. Return ONLY valid JSON:
Text: {agent_responses[0]}
JSON format:
{{
"currentCompany": "company name",
"title": "job title",
"isAJobChange": true/false,
"isAnICP": true/false,
"probableBusinessEmail": "email"
}}
"""
try:
response = llm.invoke(extraction_prompt)
if not response.content or not response.content.strip():
raise ValueError("LLM returned empty response")
# Clean response
content = response.content.strip()
if "```json" in content:
start = content.find("```json") + 7
end = content.find("```", start)
if end != -1:
content = content[start:end]
elif "```" in content:
start = content.find("```") + 3
end = content.find("```", start)
if end != -1:
content = content[start:end]
content = content.strip()
print(f"πŸ” Cleaned Response: {content}")
# Parse JSON
extracted_data = json.loads(content)
# Validate and create result
return ProfileAnalysisResult(
fn=profile_input.get("fn", ""),
ln=profile_input.get("ln", ""),
currentCompany=extracted_data.get("currentCompany", "Unknown"),
title=extracted_data.get("title", "Unknown"),
isAJobChange=bool(extracted_data.get("isAJobChange", False)),
isAnICP=bool(extracted_data.get("isAnICP", False)),
probableBusinessEmail=extracted_data.get("probableBusinessEmail", "Unknown")
)
except Exception as e:
print(f"❌ AI extraction failed: {e}")
# Create fallback result instead of raising error
fallback_email = f"{profile_input.get('fn', '').lower()}.{profile_input.get('ln', '').lower()}@{profile_input.get('company', 'company').lower().replace(' ', '')}.com"
return ProfileAnalysisResult(
fn=profile_input.get("fn", ""),
ln=profile_input.get("ln", ""),
currentCompany=profile_input.get("company", "Unknown"),
title=profile_input.get("title", "Unknown"),
isAJobChange=False,
isAnICP=False,
probableBusinessEmail=fallback_email
)
# =============================================================================
# MAIN EXECUTION
# =============================================================================
def analyze_profile(profile_input: Dict[str, Any]) -> ProfileAnalysisResult:
"""Analyze profile using LangGraph supervisor and react agents"""
print(f"πŸ€– LangGraph Supervisor analyzing: {profile_input}")
# Create analysis request with specific instructions
query = f"""
Research and analyze this profile completely:
CURRENT DATA:
- Name: {profile_input.get('fn')} {profile_input.get('ln')}
- Known Company: {profile_input.get('company', 'unknown')}
- Known Title: {profile_input.get('title', 'unknown')}
- Email: {profile_input.get('email', 'unknown')}
- Location: {profile_input.get('location', 'unknown')}
- ICP Criteria: {profile_input.get('icp', 'senior engineering leadership')}
TASKS:
1. RESEARCH: Find this person's CURRENT company and title (the provided data might be outdated)
2. JOB CHANGE: Compare known company vs current company to detect job changes or rebranding
3. ICP ASSESSMENT: Check if current title matches the ICP criteria
4. EMAIL: Generate probable business email for their CURRENT company
IMPORTANT: After all agents complete their work, synthesize the final results into a clear summary with:
- Current Company Name
- Current Job Title
- Job Change Status (Yes/No with reason)
- ICP Match Status (Yes/No with reason)
- Most Probable Business Email
Use your specialized agents and provide complete results.
"""
# Run supervisor with react agents and collect all results
agent_results = {}
all_messages = []
# Let LangGraph handle the flow control automatically
for chunk in supervisor.stream({
"messages": [{"role": "user", "content": query}]
}):
print(chunk)
# Extract agent results from chunks
for agent_name in ['profile_researcher', 'job_analyst', 'icp_assessor', 'email_finder']:
if agent_name in chunk:
agent_results[agent_name] = chunk[agent_name]
# Collect all messages for analysis - fix the extraction logic
if 'supervisor' in chunk and 'messages' in chunk['supervisor']:
all_messages.extend(chunk['supervisor']['messages'])
# Use LangGraph's natural flow - let the supervisor synthesize results
# The supervisor should have provided a final summary in the last message
final_messages = [msg for msg in all_messages if hasattr(msg, 'content') and msg.content]
if not final_messages:
raise ValueError("No messages received from agents")
# Get the supervisor's final synthesis (last message)
supervisor_synthesis = final_messages[-1].content
print(f"πŸ” Supervisor Synthesis: {supervisor_synthesis}")
# Use AI to extract structured data from the supervisor's synthesis
agent_responses = [supervisor_synthesis] # Only use the final synthesis
return extract_data_with_ai(agent_responses, profile_input)
def analyze_profile_with_progress(profile_input: Dict[str, Any], progress) -> ProfileAnalysisResult:
"""Analyze profile with progress updates for Gradio UI"""
try:
progress(0.1, desc="πŸ” Initializing analysis...")
# Create analysis request with specific instructions
query = f"""
Research and analyze this profile completely:
CURRENT DATA:
- Name: {profile_input.get('fn')} {profile_input.get('ln')}
- Known Company: {profile_input.get('company', 'unknown')}
- Known Title: {profile_input.get('title', 'unknown')}
- Email: {profile_input.get('email', 'unknown')}
- Location: {profile_input.get('location', 'unknown')}
- ICP Criteria: {profile_input.get('icp', 'senior engineering leadership')}
TASKS:
1. RESEARCH: Find this person's CURRENT company and title (the provided data might be outdated)
2. JOB CHANGE: Compare known company vs current company to detect job changes or rebranding
3. ICP ASSESSMENT: Check if current title matches the ICP criteria
4. EMAIL: Generate probable business email for their CURRENT company
IMPORTANT: After all agents complete their work, synthesize the final results into a clear summary with:
- Current Company Name
- Current Job Title
- Job Change Status (Yes/No with reason)
- ICP Match Status (Yes/No with reason)
- Most Probable Business Email
Use your specialized agents and provide complete results.
"""
progress(0.2, desc="πŸ€– Starting LangGraph supervisor...")
# Run supervisor with react agents and collect all results
agent_results = {}
all_messages = []
agent_count = 0
# Let LangGraph handle the flow control automatically
for chunk in supervisor.stream({
"messages": [{"role": "user", "content": query}]
}):
print(chunk)
# Update progress based on agent activity
for agent_name in ['profile_researcher', 'job_analyst', 'icp_assessor', 'email_finder']:
if agent_name in chunk:
if agent_name not in agent_results:
agent_results[agent_name] = chunk[agent_name]
agent_count += 1
progress(0.2 + (agent_count * 0.15), desc=f"πŸ”„ {agent_name.replace('_', ' ').title()} working...")
# Collect all messages for analysis
if 'supervisor' in chunk and 'messages' in chunk['supervisor']:
all_messages.extend(chunk['supervisor']['messages'])
progress(0.8, desc="πŸ“Š Processing final results...")
# Use LangGraph's natural flow - let the supervisor synthesize results
final_messages = [msg for msg in all_messages if hasattr(msg, 'content') and msg.content]
if not final_messages:
# Create a fallback result if no messages received
progress(0.9, desc="⚠️ Creating fallback result...")
return ProfileAnalysisResult(
fn=profile_input.get("fn", ""),
ln=profile_input.get("ln", ""),
currentCompany=profile_input.get("company", "Unknown"),
title=profile_input.get("title", "Unknown"),
isAJobChange=False,
isAnICP=False,
probableBusinessEmail=f"{profile_input.get('fn', '').lower()}.{profile_input.get('ln', '').lower()}@{profile_input.get('company', 'company').lower().replace(' ', '')}.com"
)
# Get the supervisor's final synthesis (last message)
supervisor_synthesis = final_messages[-1].content
print(f"πŸ” Supervisor Synthesis: {supervisor_synthesis}")
progress(0.9, desc="πŸ” Extracting structured data...")
# Use AI to extract structured data from the supervisor's synthesis
agent_responses = [supervisor_synthesis]
result = extract_data_with_ai(agent_responses, profile_input)
progress(1.0, desc="βœ… Analysis complete!")
return result
except Exception as e:
progress(1.0, desc="❌ Analysis failed - creating fallback result")
print(f"Error in analysis: {e}")
# Return a fallback result instead of crashing
return ProfileAnalysisResult(
fn=profile_input.get("fn", ""),
ln=profile_input.get("ln", ""),
currentCompany=profile_input.get("company", "Unknown"),
title=profile_input.get("title", "Unknown"),
isAJobChange=False,
isAnICP=False,
probableBusinessEmail=f"{profile_input.get('fn', '').lower()}.{profile_input.get('ln', '').lower()}@{profile_input.get('company', 'company').lower().replace(' ', '')}.com"
)
def main():
# Test Case 1: Job Change (Mindtickle -> getboomerang.ai)
test_case_1 = {
"fn": "Vamsi Krishna",
"ln": "Narra",
"company": "",
"location": "Pune",
"email": "",
"title": "",
"icp": ""
}
print("πŸ“‹ TEST CASE 1 - Job Change Scenario:")
print(f"Input: {json.dumps(test_case_1, indent=2)}")
print("-" * 60)
result1 = analyze_profile(test_case_1)
print("\nπŸ“Š RESULT 1:")
print(json.dumps(result1.model_dump(), indent=2))
print("\n" + "=" * 60)
# Test Case 2: No Job Change (Rebranding BuyerAssist -> getboomerang.ai)
test_case_2 = {
"fn": "Amit",
"ln": "Dugar",
"company": "BuyerAssist",
"location": "Pune",
"email": "[email protected]",
"title": "CTO",
"icp": "The person has to be in senior position in Engineer Vertical like VP Engineering, CTO, Research Fellow"
}
print("πŸ“‹ TEST CASE 2 ")
result2 = analyze_profile(test_case_2)
print(json.dumps(result2.model_dump(), indent=2))
return result1, result2
#if __name__ == "__main__":
# main()
# Build Gradio Interface
import gradio as gr
# Create Gradio interface
with gr.Blocks(title="Profile Analyzer App", theme=gr.themes.Soft(), css="""
.main-container { max-height: 100vh; overflow-y: auto; }
.compact-input { margin-bottom: 2px; }
.status-box { background-color: #f8f9fa; border-radius: 8px; }
.result-box { background-color: #ffffff; border: 1px solid #dee2e6; }
.test-case-btn { margin: 1px; }
.section-header { margin: 4px 0 2px 0; font-weight: 600; font-size: 13px; }
.header { margin: 4px 0; }
.footer { margin: 4px 0; font-size: 11px; }
.input-row { margin-bottom: 2px; }
.analyze-btn { margin-top: 4px; }
.minimal-header { margin: 2px 0; font-size: 16px; }
.minimal-subheader { margin: 1px 0; font-size: 12px; }
""") as demo:
# Minimal Header
gr.Markdown("# Profile Analyzer", elem_classes=["minimal-header"])
gr.Markdown("*AI-powered profile research for Job change and ICP detection*", elem_classes=["minimal-subheader"])
# Main container with two columns
with gr.Row():
# Left Column - Inputs
with gr.Column(scale=1):
gr.Markdown("** Test Cases**", elem_classes=["section-header"])
with gr.Row():
test_case_1_btn = gr.Button("πŸ§ͺ Test 1", size="sm", variant="secondary", scale=1, elem_classes=["test-case-btn"])
test_case_2_btn = gr.Button("πŸ§ͺ Test 2", size="sm", variant="secondary", scale=1, elem_classes=["test-case-btn"])
gr.Markdown("** Profile Info**", elem_classes=["section-header"])
# Ultra-compact input layout
with gr.Row(elem_classes=["input-row"]):
fn = gr.Textbox(label="First Name", placeholder="First", scale=1, lines=1, elem_classes=["compact-input"])
ln = gr.Textbox(label="Last Name", placeholder="Last", scale=1, lines=1, elem_classes=["compact-input"])
with gr.Row(elem_classes=["input-row"]):
company = gr.Textbox(label="Company", placeholder="Company", scale=1, lines=1, elem_classes=["compact-input"])
location = gr.Textbox(label="Location", placeholder="Location", scale=1, lines=1, elem_classes=["compact-input"])
with gr.Row(elem_classes=["input-row"]):
email = gr.Textbox(label="Email", placeholder="Email", scale=1, lines=1, elem_classes=["compact-input"])
title = gr.Textbox(label="Title", placeholder="Title", scale=1, lines=1, elem_classes=["compact-input"])
icp = gr.Textbox(
label="ICP Criteria",
placeholder="e.g., senior engineering",
lines=1,
elem_classes=["compact-input"]
)
# Analyze button
analyze_btn = gr.Button("πŸš€ Analyze", variant="primary", size="lg", elem_classes=["analyze-btn"])
# Right Column - Results
with gr.Column(scale=1):
gr.Markdown("** Results**", elem_classes=["section-header"])
# Status box (ultra-compact)
status_box = gr.Textbox(
label="πŸ”„ Status",
value="Ready",
lines=1,
interactive=False,
container=False,
elem_classes=["status-box"]
)
# Output box (compact)
output = gr.Textbox(
label="πŸ“Š Analysis Result",
lines=6,
max_lines=8,
container=False,
elem_classes=["result-box"]
)
# Minimal footer note
gr.Markdown("---")
gr.Markdown("* Use test cases to populate fields quickly*", elem_classes=["footer"])
# Button click events
def load_test_case_1():
return "Vamsi Krishna", "Narra", "", "Pune", "", "", ""
def load_test_case_2():
return "Amit", "Dugar", "BuyerAssist", "Pune", "[email protected]", "CTO", "The person has to be in senior position in Engineer Vertical like VP Engineering, CTO, Research Fellow"
def analyze_profile_ui(fn, ln, company, location, email, title, icp, progress=gr.Progress()):
"""Analyze profile from UI inputs with progress updates"""
if not fn or not ln:
return "Error: First Name and Last Name are required", "Error: First Name and Last Name are required"
test_case = {
"fn": fn,
"ln": ln,
"company": company or "",
"location": location or "",
"email": email or "",
"title": title or "",
"icp": icp or ""
}
try:
progress(0, desc="πŸš€ Starting profile analysis...")
# Start the analysis with progress tracking
result = analyze_profile_with_progress(test_case, progress)
return json.dumps(result.model_dump(), indent=2), "Analysis completed successfully!"
except Exception as e:
error_msg = f"Error: {str(e)}"
return error_msg, error_msg
# Connect button events
test_case_1_btn.click(
fn=load_test_case_1,
outputs=[fn, ln, company, location, email, title, icp]
)
test_case_2_btn.click(
fn=load_test_case_2,
outputs=[fn, ln, company, location, email, title, icp]
)
analyze_btn.click(
fn=analyze_profile_ui,
inputs=[fn, ln, company, location, email, title, icp],
outputs=[output, status_box]
)
# Launch the demo
if __name__ == "__main__":
demo.launch(share=False, debug=True)