Initial commit
Browse files- .gitignore +3 -0
- agents/__init__.py +0 -0
- agents/llama_index_agent.py +57 -0
- app.py +8 -2
- requirements.txt +10 -1
- tools/__init__.py +0 -0
- tools/text_tools.py +13 -0
- youtube_analysis.py +1 -0
.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.env
|
| 2 |
+
notebooks/
|
| 3 |
+
.venv/
|
agents/__init__.py
ADDED
|
File without changes
|
agents/llama_index_agent.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_index.core.agent.workflow import (
|
| 2 |
+
AgentWorkflow,
|
| 3 |
+
ReActAgent,
|
| 4 |
+
FunctionAgent
|
| 5 |
+
)
|
| 6 |
+
from ..tools.text_tools import reverse_text_tool
|
| 7 |
+
from llama_index.llms.openai import OpenAI
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
openai = OpenAI(model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY"))
|
| 11 |
+
|
| 12 |
+
main_agent = ReActAgent(
|
| 13 |
+
name="jefe",
|
| 14 |
+
description="Agent that will receive the queries, understand them, and send them to the correct agents to do the job",
|
| 15 |
+
llm=openai,
|
| 16 |
+
system_prompt="""
|
| 17 |
+
You are a ReActAgent that has a team of AI agents available to solve
|
| 18 |
+
questions and challenges from the GAIA Benchmark.
|
| 19 |
+
|
| 20 |
+
You must very carefully read the questions, understand them, and divide them into steps.
|
| 21 |
+
You can then either answer the steps on your own or distribute them to the most relevant
|
| 22 |
+
agents in your team to find the answer for you.
|
| 23 |
+
|
| 24 |
+
At the end, once you gather
|
| 25 |
+
|
| 26 |
+
The questions will be given to you following the format:
|
| 27 |
+
```
|
| 28 |
+
{
|
| 29 |
+
'task_id': '5a0c1adf-205e-4841-a666-7c3ef95def9d',
|
| 30 |
+
'question': 'What is the first name of the only Malko Competition recipient from the 20th Century (after 1977) whose nationality on record is a country that no longer exists?',
|
| 31 |
+
'Level': '1',
|
| 32 |
+
'file_name': ''
|
| 33 |
+
}
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
If the question has a file attached, the other agents in your team will have the tools to open and
|
| 37 |
+
analyze them.
|
| 38 |
+
|
| 39 |
+
Once you have all the intermediate steps and you can provide the final answer, make sure that
|
| 40 |
+
you are doing so EXACTLY as the answer format is defined in the query.
|
| 41 |
+
|
| 42 |
+
You also have access to your own tools:
|
| 43 |
+
* `reverse_text_tool` --> Reverses the input text
|
| 44 |
+
|
| 45 |
+
Send as final answer your last answer formated as expected in the instructions of the question
|
| 46 |
+
""",
|
| 47 |
+
can_handoff_to=[
|
| 48 |
+
"video_analyst",
|
| 49 |
+
"audio_analyst",
|
| 50 |
+
"researcher",
|
| 51 |
+
"code_analyst",
|
| 52 |
+
"excel_analyst"
|
| 53 |
+
],
|
| 54 |
+
tools=[
|
| 55 |
+
reverse_text_tool
|
| 56 |
+
]
|
| 57 |
+
)
|
app.py
CHANGED
|
@@ -3,7 +3,8 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
-
|
|
|
|
| 7 |
# (Keep Constants as is)
|
| 8 |
# --- Constants ---
|
| 9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
@@ -15,7 +16,12 @@ class BasicAgent:
|
|
| 15 |
print("BasicAgent initialized.")
|
| 16 |
def __call__(self, question: str) -> str:
|
| 17 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 20 |
return fixed_answer
|
| 21 |
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
from agents.llama_index_agent import main_agent
|
| 7 |
+
import asyncio
|
| 8 |
# (Keep Constants as is)
|
| 9 |
# --- Constants ---
|
| 10 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
|
|
| 16 |
print("BasicAgent initialized.")
|
| 17 |
def __call__(self, question: str) -> str:
|
| 18 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 19 |
+
base_agent = main_agent()
|
| 20 |
+
async def agentic_main():
|
| 21 |
+
response = await base_agent.run(question)
|
| 22 |
+
response = asyncio.run(agentic_main())
|
| 23 |
+
print(response)
|
| 24 |
+
exit()
|
| 25 |
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 26 |
return fixed_answer
|
| 27 |
|
requirements.txt
CHANGED
|
@@ -1,2 +1,11 @@
|
|
| 1 |
gradio
|
| 2 |
-
requests
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
gradio
|
| 2 |
+
requests
|
| 3 |
+
llama-index
|
| 4 |
+
llama-index-tools-wikipedia
|
| 5 |
+
llama-index-tools-tavily-research
|
| 6 |
+
nest_asyncio
|
| 7 |
+
certifi
|
| 8 |
+
board_to_fen
|
| 9 |
+
keras==2.11
|
| 10 |
+
tensorflow==2.13.0rc0
|
| 11 |
+
numpy==1.23.5
|
tools/__init__.py
ADDED
|
File without changes
|
tools/text_tools.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_index.core.tools import FunctionTool
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def reverse_text(text: str) -> str:
|
| 5 |
+
"""It returns the reversed string of text in the input."""
|
| 6 |
+
return text[::-1]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
reverse_text_tool = FunctionTool.from_defaults(
|
| 10 |
+
reverse_text,
|
| 11 |
+
name="reverse_text_tool",
|
| 12 |
+
description="It returns the reversed string of text in the input.",
|
| 13 |
+
)
|
youtube_analysis.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|