Commit
·
934c2d5
1
Parent(s):
414914f
Update app UI
Browse files- gradio_ui.py +202 -152
gradio_ui.py
CHANGED
|
@@ -13,21 +13,101 @@
|
|
| 13 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
# See the License for the specific language governing permissions and
|
| 15 |
# limitations under the License.
|
|
|
|
| 16 |
import os
|
| 17 |
import re
|
| 18 |
import shutil
|
|
|
|
| 19 |
from typing import Optional
|
| 20 |
|
|
|
|
|
|
|
| 21 |
from smolagents.agent_types import AgentAudio, AgentImage, AgentText
|
| 22 |
from smolagents.agents import MultiStepAgent, PlanningStep
|
| 23 |
from smolagents.memory import ActionStep, FinalAnswerStep, MemoryStep
|
| 24 |
from smolagents.utils import _is_package_available
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
def image_to_base64(image_path):
|
| 28 |
with open(image_path, "rb") as image_file:
|
| 29 |
return base64.b64encode(image_file.read()).decode('utf-8')
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
def get_step_footnote_content(step_log: MemoryStep, step_name: str) -> str:
|
| 32 |
"""Get a footnote string for a step log with duration and token information"""
|
| 33 |
step_footnote = f"**{step_name}**"
|
|
@@ -37,111 +117,89 @@ def get_step_footnote_content(step_log: MemoryStep, step_name: str) -> str:
|
|
| 37 |
if hasattr(step_log, "duration"):
|
| 38 |
step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
|
| 39 |
step_footnote += step_duration
|
| 40 |
-
step_footnote_content = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}
|
| 41 |
return step_footnote_content
|
| 42 |
|
| 43 |
|
| 44 |
-
def pull_messages_from_step(
|
| 45 |
-
step_log: MemoryStep,
|
| 46 |
-
):
|
| 47 |
"""Extract ChatMessage objects from agent steps with proper nesting"""
|
| 48 |
if not _is_package_available("gradio"):
|
| 49 |
raise ModuleNotFoundError(
|
| 50 |
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
|
| 51 |
)
|
| 52 |
-
import gradio as gr
|
| 53 |
|
| 54 |
if isinstance(step_log, ActionStep):
|
| 55 |
-
# Output the step number
|
| 56 |
step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else "Step"
|
| 57 |
yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
|
| 58 |
|
| 59 |
-
|
| 60 |
-
if hasattr(step_log, "model_output") and step_log.model_output is not None:
|
| 61 |
-
# Clean up the LLM output
|
| 62 |
model_output = step_log.model_output.strip()
|
| 63 |
-
|
| 64 |
-
model_output = re.sub(r"
|
| 65 |
-
model_output = re.sub(
|
| 66 |
-
|
| 67 |
model_output = model_output.strip()
|
| 68 |
yield gr.ChatMessage(role="assistant", content=model_output)
|
| 69 |
|
| 70 |
-
|
| 71 |
-
if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
|
| 72 |
first_tool_call = step_log.tool_calls[0]
|
| 73 |
used_code = first_tool_call.name == "python_interpreter"
|
| 74 |
-
parent_id = f"call_{len(step_log.tool_calls)}"
|
| 75 |
-
|
| 76 |
-
# Tool call becomes the parent message with timing info
|
| 77 |
-
# First we will handle arguments based on type
|
| 78 |
args = first_tool_call.arguments
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
else:
|
| 82 |
-
content = str(args).strip()
|
| 83 |
|
| 84 |
if used_code:
|
| 85 |
-
|
| 86 |
-
content = re.sub(r"
|
| 87 |
-
content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
|
| 88 |
-
content = content.strip()
|
| 89 |
if not content.startswith("```python"):
|
| 90 |
content = f"```python\n{content}\n```"
|
| 91 |
|
| 92 |
-
|
| 93 |
role="assistant",
|
| 94 |
content=content,
|
| 95 |
metadata={
|
| 96 |
"title": f"🛠️ Used tool {first_tool_call.name}",
|
| 97 |
-
"id":
|
| 98 |
"status": "done",
|
| 99 |
},
|
| 100 |
)
|
| 101 |
-
yield parent_message_tool
|
| 102 |
-
|
| 103 |
-
# Display execution logs if they exist
|
| 104 |
-
if hasattr(step_log, "observations") and (
|
| 105 |
-
step_log.observations is not None and step_log.observations.strip()
|
| 106 |
-
): # Only yield execution logs if there's actual content
|
| 107 |
-
log_content = step_log.observations.strip()
|
| 108 |
-
if log_content:
|
| 109 |
-
log_content = re.sub(r"^Execution logs:\s*", "", log_content)
|
| 110 |
-
yield gr.ChatMessage(
|
| 111 |
-
role="assistant",
|
| 112 |
-
content=f"```bash\n{log_content}\n",
|
| 113 |
-
metadata={"title": "📝 Execution Logs", "status": "done"},
|
| 114 |
-
)
|
| 115 |
|
| 116 |
-
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
yield gr.ChatMessage(
|
| 119 |
role="assistant",
|
| 120 |
content=str(step_log.error),
|
| 121 |
metadata={"title": "💥 Error", "status": "done"},
|
| 122 |
)
|
| 123 |
|
| 124 |
-
# Update parent message metadata to done status without yielding a new message
|
| 125 |
if getattr(step_log, "observations_images", []):
|
| 126 |
for image in step_log.observations_images:
|
| 127 |
path_image = AgentImage(image).to_string()
|
| 128 |
yield gr.ChatMessage(
|
| 129 |
role="assistant",
|
| 130 |
-
content={"path": path_image,
|
|
|
|
| 131 |
metadata={"title": "🖼️ Output Image", "status": "done"},
|
| 132 |
)
|
| 133 |
|
| 134 |
-
# Handle standalone errors but not from tool calls
|
| 135 |
-
if hasattr(step_log, "error") and step_log.error is not None:
|
| 136 |
-
yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
|
| 137 |
-
|
| 138 |
yield gr.ChatMessage(role="assistant", content=get_step_footnote_content(step_log, step_number))
|
| 139 |
yield gr.ChatMessage(role="assistant", content="-----", metadata={"status": "done"})
|
| 140 |
|
| 141 |
elif isinstance(step_log, PlanningStep):
|
| 142 |
yield gr.ChatMessage(role="assistant", content="**Planning step**")
|
| 143 |
yield gr.ChatMessage(role="assistant", content=step_log.plan)
|
| 144 |
-
yield gr.ChatMessage(
|
|
|
|
|
|
|
|
|
|
| 145 |
yield gr.ChatMessage(role="assistant", content="-----", metadata={"status": "done"})
|
| 146 |
|
| 147 |
elif isinstance(step_log, FinalAnswerStep):
|
|
@@ -154,16 +212,20 @@ def pull_messages_from_step(
|
|
| 154 |
elif isinstance(final_answer, AgentImage):
|
| 155 |
yield gr.ChatMessage(
|
| 156 |
role="assistant",
|
| 157 |
-
content={"path": final_answer.to_string(),
|
|
|
|
| 158 |
)
|
| 159 |
elif isinstance(final_answer, AgentAudio):
|
| 160 |
yield gr.ChatMessage(
|
| 161 |
role="assistant",
|
| 162 |
-
content={"path": final_answer.to_string(),
|
|
|
|
| 163 |
)
|
| 164 |
else:
|
| 165 |
-
yield gr.ChatMessage(
|
| 166 |
-
|
|
|
|
|
|
|
| 167 |
else:
|
| 168 |
raise ValueError(f"Unsupported step type: {type(step_log)}")
|
| 169 |
|
|
@@ -179,7 +241,6 @@ def stream_to_gradio(
|
|
| 179 |
total_output_tokens = 0
|
| 180 |
|
| 181 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
| 182 |
-
# Track tokens if model provides them
|
| 183 |
if getattr(agent.model, "last_input_token_count", None) is not None:
|
| 184 |
total_input_tokens += agent.model.last_input_token_count
|
| 185 |
total_output_tokens += agent.model.last_output_token_count
|
|
@@ -187,9 +248,7 @@ def stream_to_gradio(
|
|
| 187 |
step_log.input_token_count = agent.model.last_input_token_count
|
| 188 |
step_log.output_token_count = agent.model.last_output_token_count
|
| 189 |
|
| 190 |
-
for message in pull_messages_from_step(
|
| 191 |
-
step_log,
|
| 192 |
-
):
|
| 193 |
yield message
|
| 194 |
|
| 195 |
|
|
@@ -205,14 +264,12 @@ class GradioUI:
|
|
| 205 |
self.file_upload_folder = file_upload_folder
|
| 206 |
self.name = getattr(agent, "name") or "Agent interface"
|
| 207 |
self.description = getattr(agent, "description", None)
|
| 208 |
-
if self.file_upload_folder is not None:
|
| 209 |
-
|
| 210 |
-
os.mkdir(file_upload_folder)
|
| 211 |
|
| 212 |
def interact_with_agent(self, prompt, messages, session_state):
|
| 213 |
import gradio as gr
|
| 214 |
|
| 215 |
-
# Get the agent type from the template agent
|
| 216 |
if "agent" not in session_state:
|
| 217 |
session_state["agent"] = self.agent
|
| 218 |
|
|
@@ -226,14 +283,11 @@ class GradioUI:
|
|
| 226 |
|
| 227 |
yield messages
|
| 228 |
except Exception as e:
|
| 229 |
-
|
| 230 |
-
|
| 231 |
yield messages
|
| 232 |
|
| 233 |
def upload_file(self, file, file_uploads_log, allowed_file_types=None):
|
| 234 |
-
"""
|
| 235 |
-
Handle file uploads, default allowed types are .pdf, .docx, and .txt
|
| 236 |
-
"""
|
| 237 |
import gradio as gr
|
| 238 |
|
| 239 |
if file is None:
|
|
@@ -246,14 +300,9 @@ class GradioUI:
|
|
| 246 |
if file_ext not in allowed_file_types:
|
| 247 |
return gr.Textbox("File type disallowed", visible=True), file_uploads_log
|
| 248 |
|
| 249 |
-
# Sanitize file name
|
| 250 |
original_name = os.path.basename(file.name)
|
| 251 |
-
sanitized_name = re.sub(
|
| 252 |
-
|
| 253 |
-
) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
|
| 254 |
-
|
| 255 |
-
# Save the uploaded file to the specified folder
|
| 256 |
-
file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
|
| 257 |
shutil.copy(file.name, file_path)
|
| 258 |
|
| 259 |
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
|
|
@@ -264,9 +313,8 @@ class GradioUI:
|
|
| 264 |
return (
|
| 265 |
text_input
|
| 266 |
+ (
|
| 267 |
-
f"\nYou have been provided with these files
|
| 268 |
-
if
|
| 269 |
-
else ""
|
| 270 |
),
|
| 271 |
"",
|
| 272 |
gr.Button(interactive=False),
|
|
@@ -278,92 +326,94 @@ class GradioUI:
|
|
| 278 |
def create_app(self):
|
| 279 |
import gradio as gr
|
| 280 |
|
| 281 |
-
with gr.Blocks(theme="ocean", fill_height=True) as demo:
|
| 282 |
-
|
|
|
|
| 283 |
session_state = gr.State({})
|
| 284 |
stored_messages = gr.State([])
|
| 285 |
file_uploads_log = gr.State([])
|
| 286 |
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
with gr.Row():
|
| 294 |
-
logo_base64 = image_to_base64("static/aivn_logo.png")
|
| 295 |
-
gr.HTML(f"""
|
| 296 |
-
<img src="data:image/png;base64,{logo_base64}"
|
| 297 |
-
alt="Logo"
|
| 298 |
-
style="display: block; margin: auto; height: 120px; width: auto; margin-bottom: 20px;">
|
| 299 |
-
""")
|
| 300 |
-
gr.Markdown(f"\n\n>{self.description}" if self.description else "")
|
| 301 |
-
|
| 302 |
-
with gr.Group():
|
| 303 |
-
gr.Markdown("**Your request**", container=True)
|
| 304 |
text_input = gr.Textbox(
|
| 305 |
-
lines=
|
| 306 |
-
label="
|
| 307 |
-
|
| 308 |
-
placeholder="Enter your prompt here and press Shift+Enter or press the button",
|
| 309 |
)
|
| 310 |
-
submit_btn = gr.Button(
|
| 311 |
-
|
| 312 |
-
# If an upload folder is provided, enable the upload feature
|
| 313 |
-
if self.file_upload_folder is not None:
|
| 314 |
-
upload_file = gr.File(label="Upload a file")
|
| 315 |
-
upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
|
| 316 |
-
upload_file.change(
|
| 317 |
-
self.upload_file,
|
| 318 |
-
[upload_file, file_uploads_log],
|
| 319 |
-
[upload_status, file_uploads_log],
|
| 320 |
)
|
| 321 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 328 |
None,
|
| 329 |
-
|
| 330 |
-
)
|
| 331 |
-
resizeable=True,
|
| 332 |
-
scale=1,
|
| 333 |
-
)
|
| 334 |
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 344 |
),
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
[text_input, submit_btn],
|
| 349 |
-
)
|
| 350 |
|
| 351 |
-
|
| 352 |
-
self.log_user_message,
|
| 353 |
-
[text_input, file_uploads_log],
|
| 354 |
-
[stored_messages, text_input, submit_btn],
|
| 355 |
-
).then(self.interact_with_agent, [stored_messages, chatbot, session_state], [chatbot]).then(
|
| 356 |
-
lambda: (
|
| 357 |
-
gr.Textbox(
|
| 358 |
-
interactive=True, placeholder="Enter your prompt here and press Shift+Enter or the button"
|
| 359 |
-
),
|
| 360 |
-
gr.Button(interactive=True),
|
| 361 |
-
),
|
| 362 |
-
None,
|
| 363 |
-
[text_input, submit_btn],
|
| 364 |
-
)
|
| 365 |
|
| 366 |
return demo
|
| 367 |
|
| 368 |
|
| 369 |
-
__all__ = ["stream_to_gradio", "GradioUI"]
|
|
|
|
| 13 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
# See the License for the specific language governing permissions and
|
| 15 |
# limitations under the License.
|
| 16 |
+
|
| 17 |
import os
|
| 18 |
import re
|
| 19 |
import shutil
|
| 20 |
+
import base64
|
| 21 |
from typing import Optional
|
| 22 |
|
| 23 |
+
import gradio as gr
|
| 24 |
+
|
| 25 |
from smolagents.agent_types import AgentAudio, AgentImage, AgentText
|
| 26 |
from smolagents.agents import MultiStepAgent, PlanningStep
|
| 27 |
from smolagents.memory import ActionStep, FinalAnswerStep, MemoryStep
|
| 28 |
from smolagents.utils import _is_package_available
|
| 29 |
+
|
| 30 |
+
CUSTOM_CSS = """
|
| 31 |
+
.gradio-container {min-height: 100vh;}
|
| 32 |
+
.content-wrap {padding-bottom: 60px;}
|
| 33 |
+
.full-width-btn {
|
| 34 |
+
width: 100% !important;
|
| 35 |
+
height: 50px !important;
|
| 36 |
+
font-size: 18px !important;
|
| 37 |
+
margin-top: 20px !important;
|
| 38 |
+
background: linear-gradient(45deg, #FF6B6B, #4ECDC4) !important;
|
| 39 |
+
color: white !important;
|
| 40 |
+
border: none !important;
|
| 41 |
+
}
|
| 42 |
+
.full-width-btn:hover {
|
| 43 |
+
background: linear-gradient(45deg, #FF5252, #3CB4AC) !important;
|
| 44 |
+
}
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
|
| 48 |
def image_to_base64(image_path):
|
| 49 |
with open(image_path, "rb") as image_file:
|
| 50 |
return base64.b64encode(image_file.read()).decode('utf-8')
|
| 51 |
|
| 52 |
+
|
| 53 |
+
def create_header():
|
| 54 |
+
with gr.Row():
|
| 55 |
+
with gr.Column(scale=1):
|
| 56 |
+
if os.path.exists("static/aivn_logo.png"):
|
| 57 |
+
logo_base64 = image_to_base64("static/aivn_logo.png")
|
| 58 |
+
gr.HTML(f"""
|
| 59 |
+
<img src="data:image/png;base64,{logo_base64}"
|
| 60 |
+
alt="Logo"
|
| 61 |
+
style="height: 120px; width: auto; margin-right: 20px; margin-bottom: 20px;">
|
| 62 |
+
""")
|
| 63 |
+
else:
|
| 64 |
+
gr.HTML("""
|
| 65 |
+
<div style="height: 120px; display: flex; align-items: center; justify-content: center; font-size: 24px; font-weight: bold;">
|
| 66 |
+
AI VIETNAM
|
| 67 |
+
</div>
|
| 68 |
+
""")
|
| 69 |
+
with gr.Column(scale=4):
|
| 70 |
+
gr.Markdown(
|
| 71 |
+
"""
|
| 72 |
+
<div style="display: flex; justify-content: space-between; align-items: center; padding: 0 15px;">
|
| 73 |
+
<div>
|
| 74 |
+
<h1 style="margin-bottom:0;">📰 Smart News Agent</h1>
|
| 75 |
+
<p style="margin-top: 0.5em; color: #666;">🚀 AIO2024 Module 10 🤗</p>
|
| 76 |
+
<p style="margin-top: 0.5em; color: #2c3e50;">🗞️ Real-time News Fetch & Summarization</p>
|
| 77 |
+
<p style="margin-top: 0.2em; color: #7f8c8d;">🔍 Topic Classification & Insight Extraction</p>
|
| 78 |
+
</div>
|
| 79 |
+
</div>
|
| 80 |
+
""")
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def create_footer():
|
| 84 |
+
footer_html = """
|
| 85 |
+
<style>
|
| 86 |
+
.sticky-footer {
|
| 87 |
+
position: fixed;
|
| 88 |
+
bottom: 0;
|
| 89 |
+
left: 0;
|
| 90 |
+
width: 100%;
|
| 91 |
+
background: white;
|
| 92 |
+
padding: 10px;
|
| 93 |
+
box-shadow: 0 -2px 10px rgba(0,0,0,0.1);
|
| 94 |
+
z-index: 1000;
|
| 95 |
+
}
|
| 96 |
+
.content-wrap {
|
| 97 |
+
padding-bottom: 60px; /* Footer height + extra spacing */
|
| 98 |
+
}
|
| 99 |
+
</style>
|
| 100 |
+
|
| 101 |
+
<div class="sticky-footer">
|
| 102 |
+
<div style="text-align: center; font-size: 14px;">
|
| 103 |
+
Created by <a href="https://vlai.work" target="_blank" style="color: #007BFF; text-decoration: none;">VLAI</a>
|
| 104 |
+
• AI VIETNAM
|
| 105 |
+
</div>
|
| 106 |
+
</div>
|
| 107 |
+
"""
|
| 108 |
+
return gr.HTML(footer_html)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
def get_step_footnote_content(step_log: MemoryStep, step_name: str) -> str:
|
| 112 |
"""Get a footnote string for a step log with duration and token information"""
|
| 113 |
step_footnote = f"**{step_name}**"
|
|
|
|
| 117 |
if hasattr(step_log, "duration"):
|
| 118 |
step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
|
| 119 |
step_footnote += step_duration
|
| 120 |
+
step_footnote_content = f""" < span style = "color: #bbbbc2; font-size: 12px;" > {step_footnote} < /span > """
|
| 121 |
return step_footnote_content
|
| 122 |
|
| 123 |
|
| 124 |
+
def pull_messages_from_step(step_log: MemoryStep):
|
|
|
|
|
|
|
| 125 |
"""Extract ChatMessage objects from agent steps with proper nesting"""
|
| 126 |
if not _is_package_available("gradio"):
|
| 127 |
raise ModuleNotFoundError(
|
| 128 |
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
|
| 129 |
)
|
|
|
|
| 130 |
|
| 131 |
if isinstance(step_log, ActionStep):
|
|
|
|
| 132 |
step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else "Step"
|
| 133 |
yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
|
| 134 |
|
| 135 |
+
if hasattr(step_log, "model_output") and step_log.model_output:
|
|
|
|
|
|
|
| 136 |
model_output = step_log.model_output.strip()
|
| 137 |
+
model_output = re.sub(r"```\s*<end_code>", "```", model_output)
|
| 138 |
+
model_output = re.sub(r"<end_code>\s*```", "```", model_output)
|
| 139 |
+
model_output = re.sub(
|
| 140 |
+
r"```\s*\n\s*<end_code>", "```", model_output)
|
| 141 |
model_output = model_output.strip()
|
| 142 |
yield gr.ChatMessage(role="assistant", content=model_output)
|
| 143 |
|
| 144 |
+
if hasattr(step_log, "tool_calls") and step_log.tool_calls:
|
|
|
|
| 145 |
first_tool_call = step_log.tool_calls[0]
|
| 146 |
used_code = first_tool_call.name == "python_interpreter"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
args = first_tool_call.arguments
|
| 148 |
+
content = str(args.get("answer", args)) if isinstance(
|
| 149 |
+
args, dict) else str(args).strip()
|
|
|
|
|
|
|
| 150 |
|
| 151 |
if used_code:
|
| 152 |
+
content = re.sub(r"```.*?\n", "", content)
|
| 153 |
+
content = re.sub(r"\s*<end_code>\s*", "", content).strip()
|
|
|
|
|
|
|
| 154 |
if not content.startswith("```python"):
|
| 155 |
content = f"```python\n{content}\n```"
|
| 156 |
|
| 157 |
+
yield gr.ChatMessage(
|
| 158 |
role="assistant",
|
| 159 |
content=content,
|
| 160 |
metadata={
|
| 161 |
"title": f"🛠️ Used tool {first_tool_call.name}",
|
| 162 |
+
"id": f"call_{len(step_log.tool_calls)}",
|
| 163 |
"status": "done",
|
| 164 |
},
|
| 165 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
+
if hasattr(step_log, "observations") and step_log.observations and step_log.observations.strip():
|
| 168 |
+
log_content = re.sub(r"^Execution logs:\s*",
|
| 169 |
+
"", step_log.observations.strip())
|
| 170 |
+
yield gr.ChatMessage(
|
| 171 |
+
role="assistant",
|
| 172 |
+
content=f"```bash\n{log_content}\n```",
|
| 173 |
+
metadata={"title": "📝 Execution Logs", "status": "done"},
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
if hasattr(step_log, "error") and step_log.error:
|
| 177 |
yield gr.ChatMessage(
|
| 178 |
role="assistant",
|
| 179 |
content=str(step_log.error),
|
| 180 |
metadata={"title": "💥 Error", "status": "done"},
|
| 181 |
)
|
| 182 |
|
|
|
|
| 183 |
if getattr(step_log, "observations_images", []):
|
| 184 |
for image in step_log.observations_images:
|
| 185 |
path_image = AgentImage(image).to_string()
|
| 186 |
yield gr.ChatMessage(
|
| 187 |
role="assistant",
|
| 188 |
+
content={"path": path_image,
|
| 189 |
+
"mime_type": f"image/{path_image.split('.')[-1]}"},
|
| 190 |
metadata={"title": "🖼️ Output Image", "status": "done"},
|
| 191 |
)
|
| 192 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
yield gr.ChatMessage(role="assistant", content=get_step_footnote_content(step_log, step_number))
|
| 194 |
yield gr.ChatMessage(role="assistant", content="-----", metadata={"status": "done"})
|
| 195 |
|
| 196 |
elif isinstance(step_log, PlanningStep):
|
| 197 |
yield gr.ChatMessage(role="assistant", content="**Planning step**")
|
| 198 |
yield gr.ChatMessage(role="assistant", content=step_log.plan)
|
| 199 |
+
yield gr.ChatMessage(
|
| 200 |
+
role="assistant",
|
| 201 |
+
content=get_step_footnote_content(step_log, "Planning step")
|
| 202 |
+
)
|
| 203 |
yield gr.ChatMessage(role="assistant", content="-----", metadata={"status": "done"})
|
| 204 |
|
| 205 |
elif isinstance(step_log, FinalAnswerStep):
|
|
|
|
| 212 |
elif isinstance(final_answer, AgentImage):
|
| 213 |
yield gr.ChatMessage(
|
| 214 |
role="assistant",
|
| 215 |
+
content={"path": final_answer.to_string(),
|
| 216 |
+
"mime_type": "image/png"},
|
| 217 |
)
|
| 218 |
elif isinstance(final_answer, AgentAudio):
|
| 219 |
yield gr.ChatMessage(
|
| 220 |
role="assistant",
|
| 221 |
+
content={"path": final_answer.to_string(),
|
| 222 |
+
"mime_type": "audio/wav"},
|
| 223 |
)
|
| 224 |
else:
|
| 225 |
+
yield gr.ChatMessage(
|
| 226 |
+
role="assistant",
|
| 227 |
+
content=f"**Final answer:** {str(final_answer)}"
|
| 228 |
+
)
|
| 229 |
else:
|
| 230 |
raise ValueError(f"Unsupported step type: {type(step_log)}")
|
| 231 |
|
|
|
|
| 241 |
total_output_tokens = 0
|
| 242 |
|
| 243 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
|
|
|
| 244 |
if getattr(agent.model, "last_input_token_count", None) is not None:
|
| 245 |
total_input_tokens += agent.model.last_input_token_count
|
| 246 |
total_output_tokens += agent.model.last_output_token_count
|
|
|
|
| 248 |
step_log.input_token_count = agent.model.last_input_token_count
|
| 249 |
step_log.output_token_count = agent.model.last_output_token_count
|
| 250 |
|
| 251 |
+
for message in pull_messages_from_step(step_log):
|
|
|
|
|
|
|
| 252 |
yield message
|
| 253 |
|
| 254 |
|
|
|
|
| 264 |
self.file_upload_folder = file_upload_folder
|
| 265 |
self.name = getattr(agent, "name") or "Agent interface"
|
| 266 |
self.description = getattr(agent, "description", None)
|
| 267 |
+
if self.file_upload_folder is not None and not os.path.exists(file_upload_folder):
|
| 268 |
+
os.mkdir(file_upload_folder)
|
|
|
|
| 269 |
|
| 270 |
def interact_with_agent(self, prompt, messages, session_state):
|
| 271 |
import gradio as gr
|
| 272 |
|
|
|
|
| 273 |
if "agent" not in session_state:
|
| 274 |
session_state["agent"] = self.agent
|
| 275 |
|
|
|
|
| 283 |
|
| 284 |
yield messages
|
| 285 |
except Exception as e:
|
| 286 |
+
messages.append(gr.ChatMessage(
|
| 287 |
+
role="assistant", content=f"Error: {str(e)}"))
|
| 288 |
yield messages
|
| 289 |
|
| 290 |
def upload_file(self, file, file_uploads_log, allowed_file_types=None):
|
|
|
|
|
|
|
|
|
|
| 291 |
import gradio as gr
|
| 292 |
|
| 293 |
if file is None:
|
|
|
|
| 300 |
if file_ext not in allowed_file_types:
|
| 301 |
return gr.Textbox("File type disallowed", visible=True), file_uploads_log
|
| 302 |
|
|
|
|
| 303 |
original_name = os.path.basename(file.name)
|
| 304 |
+
sanitized_name = re.sub(r"[^\w\-.]", "_", original_name)
|
| 305 |
+
file_path = os.path.join(self.file_upload_folder, sanitized_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
shutil.copy(file.name, file_path)
|
| 307 |
|
| 308 |
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
|
|
|
|
| 313 |
return (
|
| 314 |
text_input
|
| 315 |
+ (
|
| 316 |
+
f"\nYou have been provided with these files: {file_uploads_log}"
|
| 317 |
+
if file_uploads_log else ""
|
|
|
|
| 318 |
),
|
| 319 |
"",
|
| 320 |
gr.Button(interactive=False),
|
|
|
|
| 326 |
def create_app(self):
|
| 327 |
import gradio as gr
|
| 328 |
|
| 329 |
+
with gr.Blocks(css=CUSTOM_CSS, theme="ocean", fill_height=True) as demo:
|
| 330 |
+
create_header()
|
| 331 |
+
|
| 332 |
session_state = gr.State({})
|
| 333 |
stored_messages = gr.State([])
|
| 334 |
file_uploads_log = gr.State([])
|
| 335 |
|
| 336 |
+
# Main content area: Chat + Input
|
| 337 |
+
with gr.Row(equal_height=True, variant="panel", elem_classes="content-wrap"):
|
| 338 |
+
# Column for chat and input
|
| 339 |
+
with gr.Column(scale=3):
|
| 340 |
+
# Input area moved here
|
| 341 |
+
# gr.Markdown("**Your request**")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 342 |
text_input = gr.Textbox(
|
| 343 |
+
lines=2,
|
| 344 |
+
label="Your request",
|
| 345 |
+
placeholder="Enter your prompt here and press Shift+Enter or the button",
|
|
|
|
| 346 |
)
|
| 347 |
+
submit_btn = gr.Button(
|
| 348 |
+
"Submit", variant="primary", elem_classes="full-width-btn"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
)
|
| 350 |
|
| 351 |
+
# Chatbot
|
| 352 |
+
chatbot = gr.Chatbot(
|
| 353 |
+
label="Agent",
|
| 354 |
+
type="messages",
|
| 355 |
+
avatar_images=(
|
| 356 |
+
None,
|
| 357 |
+
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
|
| 358 |
+
),
|
| 359 |
+
resizeable=True,
|
| 360 |
+
scale=1,
|
| 361 |
+
)
|
| 362 |
|
| 363 |
+
# Optional: Column for file uploads
|
| 364 |
+
if self.file_upload_folder is not None:
|
| 365 |
+
with gr.Column(scale=1):
|
| 366 |
+
gr.Markdown("**Upload Files**")
|
| 367 |
+
upload_file = gr.File(label="Upload a file")
|
| 368 |
+
upload_status = gr.Textbox(
|
| 369 |
+
label="Upload Status", interactive=False, visible=False
|
| 370 |
+
)
|
| 371 |
+
upload_file.change(
|
| 372 |
+
self.upload_file,
|
| 373 |
+
[upload_file, file_uploads_log],
|
| 374 |
+
[upload_status, file_uploads_log],
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
# Wiring interactions
|
| 378 |
+
text_input.submit(
|
| 379 |
+
self.log_user_message,
|
| 380 |
+
[text_input, file_uploads_log],
|
| 381 |
+
[stored_messages, text_input, submit_btn],
|
| 382 |
+
).then(
|
| 383 |
+
self.interact_with_agent,
|
| 384 |
+
[stored_messages, chatbot, session_state],
|
| 385 |
+
[chatbot],
|
| 386 |
+
).then(
|
| 387 |
+
lambda: (
|
| 388 |
+
gr.update(value="", interactive=True,
|
| 389 |
+
placeholder="Enter your prompt here and press Shift+Enter or the button"),
|
| 390 |
+
gr.update(interactive=True),
|
| 391 |
+
),
|
| 392 |
None,
|
| 393 |
+
[text_input, submit_btn],
|
| 394 |
+
)
|
|
|
|
|
|
|
|
|
|
| 395 |
|
| 396 |
+
submit_btn.click(
|
| 397 |
+
self.log_user_message,
|
| 398 |
+
[text_input, file_uploads_log],
|
| 399 |
+
[stored_messages, text_input, submit_btn],
|
| 400 |
+
).then(
|
| 401 |
+
self.interact_with_agent,
|
| 402 |
+
[stored_messages, chatbot, session_state],
|
| 403 |
+
[chatbot],
|
| 404 |
+
).then(
|
| 405 |
+
lambda: (
|
| 406 |
+
gr.update(value="", interactive=True,
|
| 407 |
+
placeholder="Enter your prompt here and press Shift+Enter or the button"),
|
| 408 |
+
gr.update(interactive=True),
|
| 409 |
),
|
| 410 |
+
None,
|
| 411 |
+
[text_input, submit_btn],
|
| 412 |
+
)
|
|
|
|
|
|
|
| 413 |
|
| 414 |
+
create_footer()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 415 |
|
| 416 |
return demo
|
| 417 |
|
| 418 |
|
| 419 |
+
__all__ = ["stream_to_gradio", "GradioUI"]
|