Spaces:
Paused
Paused
adding modules
Browse files- .continue/mcpServers/playwright-mcp.yaml +8 -0
- .continue/models/local-models.yaml +12 -0
- client/app.py +19 -0
- client/requirements.txt +4 -0
- my-agent/agent.json +14 -0
- my-agent/agent.py +17 -0
.continue/mcpServers/playwright-mcp.yaml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Playwright mcpServer
|
| 2 |
+
version: 0.0.1
|
| 3 |
+
schema: v1
|
| 4 |
+
mcpServers:
|
| 5 |
+
- name: Browser search
|
| 6 |
+
command: npx
|
| 7 |
+
args:
|
| 8 |
+
- "@playwright/mcp@latest"
|
.continue/models/local-models.yaml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Ollama codegeex4:latest model
|
| 2 |
+
version: 0.0.1
|
| 3 |
+
schema: v1
|
| 4 |
+
models:
|
| 5 |
+
- provider: ollama
|
| 6 |
+
model: codegeex4:latest
|
| 7 |
+
defaultCompletionOptions:
|
| 8 |
+
contextLength: 8192
|
| 9 |
+
name: Ollama Devstral-Small
|
| 10 |
+
roles:
|
| 11 |
+
- chat
|
| 12 |
+
- edit
|
client/app.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from mcp import StdioServerParameters
|
| 5 |
+
from smolagents import InferenceClientModel, CodeAgent, ToolCollection, MCPClient
|
| 6 |
+
|
| 7 |
+
mcp_client = MCPClient(
|
| 8 |
+
{"url": "https://abidlabs-mcp-tool-http.hf.space/gradio_api/mcp/sse", "transport": "sse",})
|
| 9 |
+
tools = mcp_client.get_tools()
|
| 10 |
+
model = InferenceClientModel(token=os.getenv("HF_TOKEN"))
|
| 11 |
+
agent = CodeAgent(tools=[*tools], model=model)
|
| 12 |
+
demo = gr.ChatInterface(
|
| 13 |
+
fn=lambda message, history: str(agent.run(message)),
|
| 14 |
+
examples=["Prime factorization of 68"],
|
| 15 |
+
title="Agent with MCP Tools",
|
| 16 |
+
description="This is a simple agent that uses MCP tools to answer questions."
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
demo.launch()
|
client/requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio[mcp]
|
| 2 |
+
smolagents[mcp]
|
| 3 |
+
mcp
|
| 4 |
+
fastmcp
|
my-agent/agent.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": "Qwen/Qwen2.5-72B-Instruct",
|
| 3 |
+
"provider": "nebius",
|
| 4 |
+
"servers": [
|
| 5 |
+
{
|
| 6 |
+
"type": "stdio",
|
| 7 |
+
"command": "npx",
|
| 8 |
+
"args": [
|
| 9 |
+
"mcp-remote",
|
| 10 |
+
"http://localhost:7860/gradio_api/mcp/sse"
|
| 11 |
+
]
|
| 12 |
+
}
|
| 13 |
+
]
|
| 14 |
+
}
|
my-agent/agent.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from huggingface_hub import Agent
|
| 4 |
+
|
| 5 |
+
agent = Agent(
|
| 6 |
+
model="Qwen/Qwen2.5-72B-Instruct",
|
| 7 |
+
provider="nebius",
|
| 8 |
+
servers=[
|
| 9 |
+
{
|
| 10 |
+
"command": "npx",
|
| 11 |
+
"args": [
|
| 12 |
+
"mcp-remote",
|
| 13 |
+
"http://localhost:7860/gradio_api/mcp/sse" # Your Gradio MCP server
|
| 14 |
+
]
|
| 15 |
+
}
|
| 16 |
+
],
|
| 17 |
+
)
|