Upload 3 files
Browse filesUpload tested files
- README.md +19 -0
- requirements.txt +2 -1
- supervisor_agent.py +40 -3
README.md
CHANGED
@@ -198,6 +198,8 @@ pip install -r requirements.txt
|
|
198 |
# Create .env file
|
199 |
GEMINI_API_KEY=your_gemini_api_key
|
200 |
TAVILY_API_KEY=your_tavily_api_key
|
|
|
|
|
201 |
```
|
202 |
|
203 |
### Running the System
|
@@ -231,6 +233,22 @@ result = analyze_profile({
|
|
231 |
print(result.model_dump())
|
232 |
```
|
233 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
## System Capabilities
|
235 |
|
236 |
### Autonomous Decision Making
|
@@ -242,6 +260,7 @@ print(result.model_dump())
|
|
242 |
- **Live Web Search**: Current information from multiple sources
|
243 |
- **Dynamic Updates**: Real-time progress tracking and status updates
|
244 |
- **Adaptive Queries**: Search strategies that adapt to available data
|
|
|
245 |
|
246 |
### Fault Tolerance
|
247 |
- **Incomplete Data Handling**: Works with partial profile information
|
|
|
198 |
# Create .env file
|
199 |
GEMINI_API_KEY=your_gemini_api_key
|
200 |
TAVILY_API_KEY=your_tavily_api_key
|
201 |
+
LANGSMITH_API_KEY=your_langsmith_api_key # Optional: for tracing and debugging
|
202 |
+
LANGSMITH_PROJECT=profile-analyzer # Optional: project name for LangSmith
|
203 |
```
|
204 |
|
205 |
### Running the System
|
|
|
233 |
print(result.model_dump())
|
234 |
```
|
235 |
|
236 |
+
### LangSmith Tracing & Debugging
|
237 |
+
The system includes comprehensive tracing for debugging and analysis:
|
238 |
+
|
239 |
+
1. **Public Run Viewing**: All agent executions are traced and publicly viewable
|
240 |
+
2. **Detailed Progress**: Step-by-step tracking of agent decisions and tool executions
|
241 |
+
3. **Performance Metrics**: Monitor execution time, tool usage, and agent collaboration
|
242 |
+
4. **Debug Information**: Trace through the entire reasoning process
|
243 |
+
|
244 |
+
**To enable tracing:**
|
245 |
+
```bash
|
246 |
+
export LANGSMITH_API_KEY=your_api_key
|
247 |
+
export LANGSMITH_PROJECT=profile-analyzer
|
248 |
+
```
|
249 |
+
|
250 |
+
**View runs at:** `https://smith.langchain.com/o/default/p/profile-analyzer`
|
251 |
+
|
252 |
## System Capabilities
|
253 |
|
254 |
### Autonomous Decision Making
|
|
|
260 |
- **Live Web Search**: Current information from multiple sources
|
261 |
- **Dynamic Updates**: Real-time progress tracking and status updates
|
262 |
- **Adaptive Queries**: Search strategies that adapt to available data
|
263 |
+
- **LangSmith Tracing**: Public viewing of agent execution traces and reasoning
|
264 |
|
265 |
### Fault Tolerance
|
266 |
- **Incomplete Data Handling**: Works with partial profile information
|
requirements.txt
CHANGED
@@ -3,4 +3,5 @@ langgraph
|
|
3 |
langgraph-supervisor
|
4 |
pydantic
|
5 |
python-dotenv
|
6 |
-
tavily-python
|
|
|
|
3 |
langgraph-supervisor
|
4 |
pydantic
|
5 |
python-dotenv
|
6 |
+
tavily-python
|
7 |
+
langsmith
|
supervisor_agent.py
CHANGED
@@ -662,14 +662,22 @@ def extract_data_with_ai(agent_responses: List[str], profile_input: Dict) -> Pro
|
|
662 |
search_results = []
|
663 |
parsed_current_company = None
|
664 |
parsed_current_title = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
665 |
try:
|
666 |
response_json = json.loads(agent_responses[0]) if isinstance(agent_responses[0], str) else agent_responses[0]
|
667 |
if isinstance(response_json, dict):
|
668 |
if 'search_results' in response_json:
|
669 |
search_results = response_json['search_results']
|
670 |
-
if response_json.get('parsed_current_company'):
|
671 |
parsed_current_company = response_json['parsed_current_company']
|
672 |
-
if response_json.get('parsed_current_title'):
|
673 |
parsed_current_title = response_json['parsed_current_title']
|
674 |
except Exception:
|
675 |
pass
|
@@ -813,9 +821,38 @@ def analyze_profile(profile_input: Dict[str, Any]) -> ProfileAnalysisResult:
|
|
813 |
|
814 |
print(f"π Supervisor Synthesis: {supervisor_synthesis}")
|
815 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
816 |
# Use AI to extract structured data from the supervisor's synthesis
|
817 |
agent_responses = [supervisor_synthesis] # Only use the final synthesis
|
818 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
819 |
|
820 |
def analyze_profile_with_progress(profile_input: Dict[str, Any], progress) -> ProfileAnalysisResult:
|
821 |
"""Analyze profile with progress updates for Gradio UI"""
|
|
|
662 |
search_results = []
|
663 |
parsed_current_company = None
|
664 |
parsed_current_title = None
|
665 |
+
|
666 |
+
# First priority: Get parsed data from modified profile_input (direct from tool)
|
667 |
+
if 'parsed_current_company' in profile_input:
|
668 |
+
parsed_current_company = profile_input['parsed_current_company']
|
669 |
+
if 'parsed_current_title' in profile_input:
|
670 |
+
parsed_current_title = profile_input['parsed_current_title']
|
671 |
+
|
672 |
+
# Second priority: Try to extract from agent_responses
|
673 |
try:
|
674 |
response_json = json.loads(agent_responses[0]) if isinstance(agent_responses[0], str) else agent_responses[0]
|
675 |
if isinstance(response_json, dict):
|
676 |
if 'search_results' in response_json:
|
677 |
search_results = response_json['search_results']
|
678 |
+
if not parsed_current_company and response_json.get('parsed_current_company'):
|
679 |
parsed_current_company = response_json['parsed_current_company']
|
680 |
+
if not parsed_current_title and response_json.get('parsed_current_title'):
|
681 |
parsed_current_title = response_json['parsed_current_title']
|
682 |
except Exception:
|
683 |
pass
|
|
|
821 |
|
822 |
print(f"π Supervisor Synthesis: {supervisor_synthesis}")
|
823 |
|
824 |
+
# Extract parsed_current_company and parsed_current_title directly from profile_researcher results
|
825 |
+
parsed_current_company = None
|
826 |
+
parsed_current_title = None
|
827 |
+
|
828 |
+
if 'profile_researcher' in agent_results:
|
829 |
+
profile_result = agent_results['profile_researcher']
|
830 |
+
if hasattr(profile_result, 'messages') and profile_result.messages:
|
831 |
+
for msg in profile_result.messages:
|
832 |
+
if hasattr(msg, 'tool_calls') and msg.tool_calls:
|
833 |
+
for tool_call in msg.tool_calls:
|
834 |
+
if tool_call.get('name') == 'research_person_profile':
|
835 |
+
try:
|
836 |
+
tool_output = json.loads(tool_call.get('args', {}))
|
837 |
+
if 'parsed_current_company' in tool_output:
|
838 |
+
parsed_current_company = tool_output['parsed_current_company']
|
839 |
+
if 'parsed_current_title' in tool_output:
|
840 |
+
parsed_current_title = tool_output['parsed_current_title']
|
841 |
+
print(f"π Direct tool output - Company: {parsed_current_company}, Title: {parsed_current_title}")
|
842 |
+
except Exception as e:
|
843 |
+
print(f"β Error parsing tool output: {e}")
|
844 |
+
|
845 |
# Use AI to extract structured data from the supervisor's synthesis
|
846 |
agent_responses = [supervisor_synthesis] # Only use the final synthesis
|
847 |
+
|
848 |
+
# Create a modified profile_input with the parsed data
|
849 |
+
modified_profile_input = profile_input.copy()
|
850 |
+
if parsed_current_company:
|
851 |
+
modified_profile_input['parsed_current_company'] = parsed_current_company
|
852 |
+
if parsed_current_title:
|
853 |
+
modified_profile_input['parsed_current_title'] = parsed_current_title
|
854 |
+
|
855 |
+
return extract_data_with_ai(agent_responses, modified_profile_input)
|
856 |
|
857 |
def analyze_profile_with_progress(profile_input: Dict[str, Any], progress) -> ProfileAnalysisResult:
|
858 |
"""Analyze profile with progress updates for Gradio UI"""
|