marks
commited on
Commit
·
1778e91
1
Parent(s):
485799f
Added code files
Browse files- .dockerignore +53 -0
- .gitignore +1 -0
- Dockerfile +62 -0
- __init__.py +0 -0
- api_clients.py +171 -0
- interface.py +256 -0
- logger.py +72 -0
- requirements.txt +38 -0
- setup.py +11 -0
.dockerignore
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python virtual environment
|
2 |
+
.venv/
|
3 |
+
venv/
|
4 |
+
ENV/
|
5 |
+
|
6 |
+
# Git
|
7 |
+
.git
|
8 |
+
.gitignore
|
9 |
+
|
10 |
+
# Environment files
|
11 |
+
.env
|
12 |
+
*.env
|
13 |
+
|
14 |
+
# Python cache
|
15 |
+
__pycache__/
|
16 |
+
*.py[cod]
|
17 |
+
*$py.class
|
18 |
+
.Python
|
19 |
+
*.so
|
20 |
+
|
21 |
+
# Development/IDE
|
22 |
+
.vscode/
|
23 |
+
.idea/
|
24 |
+
*.swp
|
25 |
+
*.swo
|
26 |
+
|
27 |
+
# Documentation
|
28 |
+
docs/
|
29 |
+
*.md
|
30 |
+
|
31 |
+
# Logs and databases
|
32 |
+
*.log
|
33 |
+
*.sqlite
|
34 |
+
*.db
|
35 |
+
|
36 |
+
# OS generated files
|
37 |
+
.DS_Store
|
38 |
+
.DS_Store?
|
39 |
+
._*
|
40 |
+
.Spotlight-V100
|
41 |
+
.Trashes
|
42 |
+
ehthumbs.db
|
43 |
+
Thumbs.db
|
44 |
+
|
45 |
+
# Test cache
|
46 |
+
.pytest_cache/
|
47 |
+
.coverage
|
48 |
+
htmlcov/
|
49 |
+
|
50 |
+
# Distribution/packaging
|
51 |
+
dist/
|
52 |
+
build/
|
53 |
+
*.egg-info/
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
/.env
|
Dockerfile
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11-slim
|
2 |
+
|
3 |
+
|
4 |
+
# Install system dependencies
|
5 |
+
RUN apt-get update && apt-get install -y \
|
6 |
+
wget \
|
7 |
+
gnupg \
|
8 |
+
curl \
|
9 |
+
unzip \
|
10 |
+
xvfb \
|
11 |
+
libgconf-2-4 \
|
12 |
+
libxss1 \
|
13 |
+
libnss3 \
|
14 |
+
libnspr4 \
|
15 |
+
libasound2 \
|
16 |
+
libatk1.0-0 \
|
17 |
+
libatk-bridge2.0-0 \
|
18 |
+
libcups2 \
|
19 |
+
libdbus-1-3 \
|
20 |
+
libdrm2 \
|
21 |
+
libgbm1 \
|
22 |
+
libgtk-3-0 \
|
23 |
+
libxcomposite1 \
|
24 |
+
libxdamage1 \
|
25 |
+
libxfixes3 \
|
26 |
+
libxrandr2 \
|
27 |
+
xdg-utils \
|
28 |
+
fonts-liberation \
|
29 |
+
dbus \
|
30 |
+
xauth \
|
31 |
+
xvfb \
|
32 |
+
supervisor \
|
33 |
+
net-tools \
|
34 |
+
procps \
|
35 |
+
git \
|
36 |
+
python3-numpy \
|
37 |
+
&& rm -rf /var/lib/apt/lists/*
|
38 |
+
|
39 |
+
# Install Rust and set PATH
|
40 |
+
ENV RUSTUP_HOME=/usr/local/rustup \
|
41 |
+
CARGO_HOME=/usr/local/cargo \
|
42 |
+
PATH="/usr/local/cargo/bin:$PATH"
|
43 |
+
|
44 |
+
# Install Rust
|
45 |
+
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
46 |
+
|
47 |
+
# Install Playwright and browsers with system dependencies
|
48 |
+
ENV PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
|
49 |
+
RUN pip install playwright
|
50 |
+
RUN playwright install --with-deps chromium
|
51 |
+
RUN playwright install-deps
|
52 |
+
|
53 |
+
WORKDIR /app
|
54 |
+
COPY requirements.txt .
|
55 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
56 |
+
|
57 |
+
# Copy application code
|
58 |
+
COPY . .
|
59 |
+
|
60 |
+
|
61 |
+
# Start the realtime.py script
|
62 |
+
CMD ["python", "interface.py"]
|
__init__.py
ADDED
File without changes
|
api_clients.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import lru_cache
|
2 |
+
from typing import List, Tuple, Optional
|
3 |
+
import aiohttp
|
4 |
+
import elevenlabs
|
5 |
+
from contextlib import asynccontextmanager
|
6 |
+
from .logger import setup_logger, log_execution_time, log_async_execution_time
|
7 |
+
|
8 |
+
logger = setup_logger("api_clients")
|
9 |
+
|
10 |
+
class OpenRouterClient:
|
11 |
+
"""Handles OpenRouter API interactions with comprehensive logging and error tracking"""
|
12 |
+
|
13 |
+
def __init__(self, api_key: str):
|
14 |
+
logger.info("Initializing OpenRouter client")
|
15 |
+
if not api_key or len(api_key) < 32:
|
16 |
+
logger.error("Invalid API key format")
|
17 |
+
raise ValueError("Invalid OpenRouter API key")
|
18 |
+
|
19 |
+
self.api_key = api_key
|
20 |
+
self.base_url = "https://openrouter.ai/api/v1"
|
21 |
+
self.headers = {
|
22 |
+
"Authorization": f"Bearer {api_key}",
|
23 |
+
"Content-Type": "application/json",
|
24 |
+
}
|
25 |
+
logger.debug("OpenRouter client initialized successfully")
|
26 |
+
|
27 |
+
@asynccontextmanager
|
28 |
+
async def get_session(self):
|
29 |
+
logger.debug("Creating new aiohttp session")
|
30 |
+
async with aiohttp.ClientSession(headers=self.headers) as session:
|
31 |
+
yield session
|
32 |
+
|
33 |
+
@lru_cache(maxsize=1)
|
34 |
+
async def get_models(self) -> List[Tuple[str, str]]:
|
35 |
+
"""
|
36 |
+
Fetch available models from OpenRouter API
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
List of tuples containing (model_id, model_description)
|
40 |
+
|
41 |
+
Raises:
|
42 |
+
ValueError: If API request fails
|
43 |
+
"""
|
44 |
+
logger.info("Fetching available models from OpenRouter")
|
45 |
+
async with self.get_session() as session:
|
46 |
+
async with session.get(f"{self.base_url}/models") as response:
|
47 |
+
if response.status != 200:
|
48 |
+
error_msg = await response.text()
|
49 |
+
logger.error(f"Failed to fetch models: {error_msg}")
|
50 |
+
raise ValueError(f"Failed to fetch models: {error_msg}")
|
51 |
+
|
52 |
+
models = await response.json()
|
53 |
+
logger.info(f"Successfully fetched {len(models)} models")
|
54 |
+
logger.debug(f"Available models: {[model['name'] for model in models]}")
|
55 |
+
return [(model['id'], f"{model['name']} ({model['context_length']} tokens)")
|
56 |
+
for model in models]
|
57 |
+
|
58 |
+
@log_async_execution_time(logger)
|
59 |
+
async def generate_script(self, content: str, prompt: str, model_id: str) -> str:
|
60 |
+
"""
|
61 |
+
Generate a podcast script with detailed progress tracking and validation
|
62 |
+
|
63 |
+
Performance metrics and content analysis are logged at each step.
|
64 |
+
"""
|
65 |
+
logger.info(f"Starting script generation with model: {model_id}")
|
66 |
+
logger.debug(f"Input metrics - Content: {len(content)} chars, Prompt: {len(prompt)} chars")
|
67 |
+
|
68 |
+
# Validate inputs
|
69 |
+
if not content or len(content) < 100:
|
70 |
+
logger.error("Content too short for meaningful script generation")
|
71 |
+
raise ValueError("Insufficient content for script generation")
|
72 |
+
|
73 |
+
if not prompt or len(prompt) < 10:
|
74 |
+
logger.error("Prompt too short or missing")
|
75 |
+
raise ValueError("Please provide a more detailed prompt")
|
76 |
+
|
77 |
+
try:
|
78 |
+
async with self.get_session() as session:
|
79 |
+
logger.debug("Preparing script generation request")
|
80 |
+
response = await self._make_script_request(session, content, prompt, model_id)
|
81 |
+
|
82 |
+
script = response['choices'][0]['message']['content']
|
83 |
+
logger.info(f"Script generated successfully: {len(script)} chars")
|
84 |
+
logger.debug(f"Script preview: {script[:200]}...")
|
85 |
+
|
86 |
+
return script
|
87 |
+
except Exception as e:
|
88 |
+
logger.error(f"Script generation failed", exc_info=True)
|
89 |
+
raise
|
90 |
+
|
91 |
+
async def _make_script_request(self, session, content, prompt, model_id):
|
92 |
+
async with session.post(
|
93 |
+
f"{self.base_url}/chat/completions",
|
94 |
+
json={
|
95 |
+
"model": model_id,
|
96 |
+
"messages": [
|
97 |
+
{
|
98 |
+
"role": "system",
|
99 |
+
"content": "You are an expert podcast script writer. Create engaging, conversational content."
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"role": "user",
|
103 |
+
"content": f"""Based on this content: {content}
|
104 |
+
Create a 3-minute podcast script focusing on: {prompt}
|
105 |
+
Format as a natural conversation with clear speaker parts.
|
106 |
+
Include [HOST] and [GUEST] markers for different voices."""
|
107 |
+
}
|
108 |
+
]
|
109 |
+
}
|
110 |
+
) as response:
|
111 |
+
logger.debug("Sending script generation request")
|
112 |
+
|
113 |
+
if response.status != 200:
|
114 |
+
error_msg = await response.text()
|
115 |
+
logger.error(f"Script generation failed: {error_msg}")
|
116 |
+
raise ValueError(f"Script generation failed: {error_msg}")
|
117 |
+
|
118 |
+
return await response.json()
|
119 |
+
|
120 |
+
class ElevenLabsClient:
|
121 |
+
"""Handles ElevenLabs API interactions with detailed performance tracking"""
|
122 |
+
|
123 |
+
def __init__(self, api_key: str):
|
124 |
+
logger.info("Initializing ElevenLabs client")
|
125 |
+
self.api_key = api_key
|
126 |
+
elevenlabs.set_api_key(api_key)
|
127 |
+
|
128 |
+
@lru_cache(maxsize=1)
|
129 |
+
def get_voices(self) -> List[Tuple[str, str]]:
|
130 |
+
"""
|
131 |
+
Fetch available voices from ElevenLabs
|
132 |
+
|
133 |
+
Returns:
|
134 |
+
List of tuples containing (voice_id, voice_name)
|
135 |
+
"""
|
136 |
+
logger.info("Fetching available voices from ElevenLabs")
|
137 |
+
voices = elevenlabs.voices()
|
138 |
+
logger.info(f"Successfully fetched {len(voices)} voices")
|
139 |
+
logger.debug(f"Available voices: {[voice.name for voice in voices]}")
|
140 |
+
return [(voice.voice_id, voice.name) for voice in voices]
|
141 |
+
|
142 |
+
@log_execution_time(logger)
|
143 |
+
def generate_audio(self, text: str, voice_id: str) -> bytes:
|
144 |
+
"""
|
145 |
+
Generate audio with comprehensive error handling and quality checks
|
146 |
+
|
147 |
+
Logs detailed metrics about the input text and resulting audio.
|
148 |
+
"""
|
149 |
+
logger.info(f"Starting audio generation with voice: {voice_id}")
|
150 |
+
logger.debug(f"Input text length: {len(text)} chars")
|
151 |
+
|
152 |
+
if len(text) > 5000:
|
153 |
+
logger.warning(f"Long text detected ({len(text)} chars), may impact performance")
|
154 |
+
|
155 |
+
try:
|
156 |
+
start_time = time.time()
|
157 |
+
audio = elevenlabs.generate(
|
158 |
+
text=text,
|
159 |
+
voice=voice_id,
|
160 |
+
model="eleven_monolingual_v1"
|
161 |
+
)
|
162 |
+
|
163 |
+
duration = time.time() - start_time
|
164 |
+
audio_size = len(audio)
|
165 |
+
logger.info(f"Audio generated: {audio_size} bytes in {duration:.2f} seconds")
|
166 |
+
logger.debug(f"Audio generation rate: {len(text)/duration:.2f} chars/second")
|
167 |
+
|
168 |
+
return audio
|
169 |
+
except Exception as e:
|
170 |
+
logger.error("Audio generation failed", exc_info=True)
|
171 |
+
raise
|
interface.py
ADDED
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
from dataclasses import dataclass
|
5 |
+
from typing import List, Optional, AsyncGenerator
|
6 |
+
import gradio as gr
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
from langchain_openai import ChatOpenAI
|
9 |
+
from rich.console import Console
|
10 |
+
from rich.panel import Panel
|
11 |
+
from rich.text import Text
|
12 |
+
from logger import setup_logger, log_execution_time, log_async_execution_time
|
13 |
+
|
14 |
+
from browser_use import Agent, Browser
|
15 |
+
from browser_use.browser.browser import BrowserContext
|
16 |
+
from api_clients import OpenRouterClient, ElevenLabsClient
|
17 |
+
|
18 |
+
load_dotenv()
|
19 |
+
|
20 |
+
console = Console()
|
21 |
+
logger = setup_logger("interface")
|
22 |
+
|
23 |
+
@dataclass
|
24 |
+
class ActionResult:
|
25 |
+
is_done: bool
|
26 |
+
extracted_content: Optional[str]
|
27 |
+
error: Optional[str]
|
28 |
+
include_in_memory: bool
|
29 |
+
|
30 |
+
|
31 |
+
@dataclass
|
32 |
+
class AgentHistoryList:
|
33 |
+
all_results: List[ActionResult]
|
34 |
+
all_model_outputs: List[dict]
|
35 |
+
|
36 |
+
|
37 |
+
def parse_agent_history(history_str: str) -> None:
|
38 |
+
# Split the content into sections based on ActionResult entries
|
39 |
+
sections = history_str.split('ActionResult(')
|
40 |
+
|
41 |
+
for i, section in enumerate(sections[1:], 1): # Skip first empty section
|
42 |
+
# Extract relevant information
|
43 |
+
content = ''
|
44 |
+
if 'extracted_content=' in section:
|
45 |
+
content = section.split('extracted_content=')[1].split(',')[0].strip("'")
|
46 |
+
|
47 |
+
if content:
|
48 |
+
header = Text(f'Step {i}', style='bold blue')
|
49 |
+
panel = Panel(content, title=header, border_style='blue')
|
50 |
+
console.print(panel)
|
51 |
+
console.print()
|
52 |
+
|
53 |
+
|
54 |
+
async def run_browser_task(
|
55 |
+
task: str,
|
56 |
+
api_key: str,
|
57 |
+
provider: str = 'openai',
|
58 |
+
model: str = 'gpt-4-vision',
|
59 |
+
headless: bool = True,
|
60 |
+
) -> str:
|
61 |
+
if not api_key.strip():
|
62 |
+
return 'Please provide an API key'
|
63 |
+
|
64 |
+
if provider == 'openai':
|
65 |
+
os.environ['OPENAI_API_KEY'] = api_key
|
66 |
+
llm = ChatOpenAI(model=model)
|
67 |
+
elif provider == 'anthropic':
|
68 |
+
os.environ['ANTHROPIC_API_KEY'] = api_key
|
69 |
+
llm = ChatAnthropic(model=model)
|
70 |
+
else: # google
|
71 |
+
os.environ['GOOGLE_API_KEY'] = api_key
|
72 |
+
llm = ChatGoogleGenerativeAI(model=model)
|
73 |
+
|
74 |
+
try:
|
75 |
+
agent = Agent(
|
76 |
+
task=task,
|
77 |
+
llm=llm,
|
78 |
+
browser=Browser(BrowserContext(headless=True))
|
79 |
+
)
|
80 |
+
result = await agent.run()
|
81 |
+
# TODO: The result cloud be parsed better
|
82 |
+
return result
|
83 |
+
except Exception as e:
|
84 |
+
return f'Error: {str(e)}'
|
85 |
+
|
86 |
+
|
87 |
+
@log_async_execution_time(logger)
|
88 |
+
async def scrape_content(url: str) -> str:
|
89 |
+
"""
|
90 |
+
Scrape and summarize content from the given URL using browser automation
|
91 |
+
|
92 |
+
This function performs the following steps:
|
93 |
+
1. Validates the input URL
|
94 |
+
2. Initializes the browser agent
|
95 |
+
3. Extracts and summarizes the content
|
96 |
+
|
97 |
+
Args:
|
98 |
+
url: Target URL to scrape
|
99 |
+
|
100 |
+
Returns:
|
101 |
+
Summarized content suitable for podcast generation
|
102 |
+
|
103 |
+
Raises:
|
104 |
+
ValueError: If URL is invalid or content extraction fails
|
105 |
+
"""
|
106 |
+
logger.info(f"Starting content scrape for URL: {url}")
|
107 |
+
|
108 |
+
# Input validation
|
109 |
+
if not url.startswith(('http://', 'https://')):
|
110 |
+
logger.error(f"Invalid URL format: {url}")
|
111 |
+
raise ValueError("URL must start with http:// or https://")
|
112 |
+
|
113 |
+
try:
|
114 |
+
logger.debug("Initializing LLM and browser agent")
|
115 |
+
llm = ChatOpenAI(model="gpt-4")
|
116 |
+
agent = Agent(
|
117 |
+
task=f"Visit this URL: {url} and extract the main content. Summarize it in a clear and concise way.",
|
118 |
+
llm=llm,
|
119 |
+
browser=Browser(BrowserContext(headless=True))
|
120 |
+
)
|
121 |
+
|
122 |
+
logger.info("Executing content extraction")
|
123 |
+
result = await agent.run()
|
124 |
+
|
125 |
+
logger.debug(f"Content extraction successful. Length: {len(result)} chars")
|
126 |
+
logger.debug(f"Content preview: {result[:200]}...")
|
127 |
+
|
128 |
+
return result
|
129 |
+
except Exception as e:
|
130 |
+
logger.error(f"Content extraction failed for {url}", exc_info=True)
|
131 |
+
raise
|
132 |
+
|
133 |
+
@log_async_execution_time(logger)
|
134 |
+
async def create_podcast(
|
135 |
+
url: str,
|
136 |
+
prompt: str,
|
137 |
+
elevenlabs_key: str,
|
138 |
+
voice_id: str,
|
139 |
+
openrouter_key: str,
|
140 |
+
model_id: str,
|
141 |
+
) -> AsyncGenerator[tuple[Optional[str], str], None]:
|
142 |
+
"""
|
143 |
+
Create a podcast through a multi-step process:
|
144 |
+
1. Content extraction from URL
|
145 |
+
2. Script generation using AI
|
146 |
+
3. Voice synthesis
|
147 |
+
|
148 |
+
Progress updates are yielded at each step for UI feedback.
|
149 |
+
"""
|
150 |
+
logger.info(f"Starting podcast creation for URL: {url}")
|
151 |
+
logger.debug(f"Parameters - Voice: {voice_id}, Model: {model_id}")
|
152 |
+
logger.debug(f"Prompt length: {len(prompt)} chars")
|
153 |
+
|
154 |
+
try:
|
155 |
+
# Initialize clients with validation
|
156 |
+
logger.debug("Initializing API clients")
|
157 |
+
openrouter = OpenRouterClient(openrouter_key)
|
158 |
+
elevenlabs = ElevenLabsClient(elevenlabs_key)
|
159 |
+
|
160 |
+
# Phase 1: Content scraping
|
161 |
+
logger.info("Phase 1/3: Content scraping")
|
162 |
+
yield None, "Scraping website content..."
|
163 |
+
content = await scrape_content(url)
|
164 |
+
logger.debug(f"Scraped content length: {len(content)} chars")
|
165 |
+
|
166 |
+
# Phase 2: Script generation
|
167 |
+
logger.info("Phase 2/3: Script generation")
|
168 |
+
yield None, "Generating podcast script..."
|
169 |
+
script = await openrouter.generate_script(content, prompt, model_id)
|
170 |
+
logger.debug(f"Generated script length: {len(script)} chars")
|
171 |
+
|
172 |
+
# Phase 3: Audio synthesis
|
173 |
+
logger.info("Phase 3/3: Audio generation")
|
174 |
+
yield None, "Converting to audio..."
|
175 |
+
audio = elevenlabs.generate_audio(script, voice_id)
|
176 |
+
logger.debug(f"Generated audio size: {len(audio)} bytes")
|
177 |
+
|
178 |
+
# Save output
|
179 |
+
audio_path = f"podcast_{int(time.time())}.mp3"
|
180 |
+
logger.debug(f"Saving audio to: {audio_path}")
|
181 |
+
with open(audio_path, "wb") as f:
|
182 |
+
f.write(audio)
|
183 |
+
|
184 |
+
logger.info("Podcast creation completed successfully")
|
185 |
+
yield audio_path, "Podcast created successfully!"
|
186 |
+
|
187 |
+
except Exception as e:
|
188 |
+
logger.error("Podcast creation failed", exc_info=True)
|
189 |
+
yield None, f"Error: {str(e)}"
|
190 |
+
|
191 |
+
def create_ui():
|
192 |
+
logger.info("Initializing Gradio interface")
|
193 |
+
with gr.Blocks(title='PodcastCreator', theme=gr.themes.Soft()) as interface:
|
194 |
+
with gr.Row():
|
195 |
+
with gr.Column(scale=2):
|
196 |
+
url_input = gr.Textbox(label='Source URL', placeholder='Enter the URL...')
|
197 |
+
prompt = gr.Textbox(label='Podcast Topic', lines=3)
|
198 |
+
|
199 |
+
with gr.Row():
|
200 |
+
with gr.Column():
|
201 |
+
elevenlabs_key = gr.Textbox(
|
202 |
+
label='ElevenLabs API Key',
|
203 |
+
type='password',
|
204 |
+
placeholder='Enter key...'
|
205 |
+
)
|
206 |
+
voice = gr.Dropdown(label='Voice', choices=[])
|
207 |
+
|
208 |
+
with gr.Column():
|
209 |
+
openrouter_key = gr.Textbox(
|
210 |
+
label='OpenRouter API Key',
|
211 |
+
type='password',
|
212 |
+
placeholder='Enter key...'
|
213 |
+
)
|
214 |
+
model = gr.Dropdown(label='AI Model', choices=[])
|
215 |
+
|
216 |
+
submit_btn = gr.Button('Create Podcast', variant='primary')
|
217 |
+
|
218 |
+
with gr.Column(scale=1):
|
219 |
+
audio_output = gr.Audio(label="Generated Podcast")
|
220 |
+
status = gr.Textbox(label='Status', interactive=False)
|
221 |
+
|
222 |
+
# Event handlers
|
223 |
+
def update_voices(key):
|
224 |
+
if not key:
|
225 |
+
return gr.Dropdown(choices=[])
|
226 |
+
try:
|
227 |
+
client = ElevenLabsClient(key)
|
228 |
+
voices = client.get_voices()
|
229 |
+
return gr.Dropdown(choices=voices)
|
230 |
+
except Exception as e:
|
231 |
+
return gr.Dropdown(choices=[("", f"Error: {str(e)}")])
|
232 |
+
|
233 |
+
async def update_models(key):
|
234 |
+
if not key:
|
235 |
+
return gr.Dropdown(choices=[])
|
236 |
+
try:
|
237 |
+
client = OpenRouterClient(key)
|
238 |
+
models = await client.get_models()
|
239 |
+
return gr.Dropdown(choices=models)
|
240 |
+
except Exception as e:
|
241 |
+
return gr.Dropdown(choices=[("", f"Error: {str(e)}")])
|
242 |
+
|
243 |
+
elevenlabs_key.change(fn=update_voices, inputs=elevenlabs_key, outputs=voice)
|
244 |
+
openrouter_key.change(fn=update_models, inputs=openrouter_key, outputs=model)
|
245 |
+
|
246 |
+
submit_btn.click(
|
247 |
+
fn=create_podcast,
|
248 |
+
inputs=[url_input, prompt, elevenlabs_key, voice, openrouter_key, model],
|
249 |
+
outputs=[audio_output, status]
|
250 |
+
)
|
251 |
+
|
252 |
+
return interface
|
253 |
+
|
254 |
+
if __name__ == '__main__':
|
255 |
+
demo = create_ui()
|
256 |
+
demo.launch()
|
logger.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import sys
|
3 |
+
import time
|
4 |
+
from functools import wraps
|
5 |
+
from typing import Callable, Any
|
6 |
+
from rich.logging import RichHandler
|
7 |
+
from rich.console import Console
|
8 |
+
|
9 |
+
console = Console()
|
10 |
+
|
11 |
+
def setup_logger(name: str, level: str = "INFO") -> logging.Logger:
|
12 |
+
"""
|
13 |
+
Configure and return a Rich-formatted logger with enhanced debugging capabilities
|
14 |
+
|
15 |
+
Args:
|
16 |
+
name: Logger identifier, typically module name
|
17 |
+
level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
18 |
+
|
19 |
+
Returns:
|
20 |
+
Configured logger instance with rich formatting and error tracing
|
21 |
+
"""
|
22 |
+
logging.basicConfig(
|
23 |
+
level=level,
|
24 |
+
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
|
25 |
+
datefmt="[%X]",
|
26 |
+
handlers=[
|
27 |
+
RichHandler(rich_tracebacks=True, markup=True),
|
28 |
+
logging.FileHandler(f"{name}.log") # Also log to file
|
29 |
+
]
|
30 |
+
)
|
31 |
+
|
32 |
+
logger = logging.getLogger(name)
|
33 |
+
logger.setLevel(level)
|
34 |
+
return logger
|
35 |
+
|
36 |
+
def log_execution_time(logger: logging.Logger) -> Callable:
|
37 |
+
"""Decorator to log function execution time"""
|
38 |
+
def decorator(func: Callable) -> Callable:
|
39 |
+
@wraps(func)
|
40 |
+
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
41 |
+
start_time = time.time()
|
42 |
+
logger.debug(f"Starting {func.__name__} with args: {args}, kwargs: {kwargs}")
|
43 |
+
|
44 |
+
try:
|
45 |
+
result = func(*args, **kwargs)
|
46 |
+
execution_time = time.time() - start_time
|
47 |
+
logger.debug(f"Completed {func.__name__} in {execution_time:.2f} seconds")
|
48 |
+
return result
|
49 |
+
except Exception as e:
|
50 |
+
logger.error(f"Error in {func.__name__}: {str(e)}", exc_info=True)
|
51 |
+
raise
|
52 |
+
return wrapper
|
53 |
+
return decorator
|
54 |
+
|
55 |
+
def log_async_execution_time(logger: logging.Logger) -> Callable:
|
56 |
+
"""Decorator to log async function execution time"""
|
57 |
+
def decorator(func: Callable) -> Callable:
|
58 |
+
@wraps(func)
|
59 |
+
async def wrapper(*args: Any, **kwargs: Any) -> Any:
|
60 |
+
start_time = time.time()
|
61 |
+
logger.debug(f"Starting async {func.__name__} with args: {args}, kwargs: {kwargs}")
|
62 |
+
|
63 |
+
try:
|
64 |
+
result = await func(*args, **kwargs)
|
65 |
+
execution_time = time.time() - start_time
|
66 |
+
logger.debug(f"Completed async {func.__name__} in {execution_time:.2f} seconds")
|
67 |
+
return result
|
68 |
+
except Exception as e:
|
69 |
+
logger.error(f"Error in async {func.__name__}: {str(e)}", exc_info=True)
|
70 |
+
raise
|
71 |
+
return wrapper
|
72 |
+
return decorator
|
requirements.txt
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Core AI and Language Models
|
2 |
+
langchain-openai>=0.0.1
|
3 |
+
langchain-google-genai>=0.0.1
|
4 |
+
langchain-anthropic>=0.0.1
|
5 |
+
langchain-ollama>=0.0.1
|
6 |
+
|
7 |
+
# Web and Browser Automation
|
8 |
+
playwright>=1.40.0
|
9 |
+
browser-use>=0.1.20
|
10 |
+
aiohttp>=3.8.0
|
11 |
+
requests>=2.31.0
|
12 |
+
|
13 |
+
# Audio Generation
|
14 |
+
elevenlabs>=0.1.0
|
15 |
+
|
16 |
+
# Data Handling and Validation
|
17 |
+
pydantic>=2.0.0
|
18 |
+
dataclasses>=0.6
|
19 |
+
typing-extensions>=4.8.0
|
20 |
+
|
21 |
+
# Configuration and Environment
|
22 |
+
python-dotenv>=1.0.0
|
23 |
+
|
24 |
+
# Error Handling and Logging
|
25 |
+
python-json-logger>=2.0.0 # Structured JSON logging
|
26 |
+
rich>=13.0.0 # For console output formatting
|
27 |
+
backoff>=2.2.0 # Retry mechanism
|
28 |
+
tenacity>=8.2.0 # Retry utilities
|
29 |
+
|
30 |
+
# UI and Interface
|
31 |
+
gradio>=4.0.0 # For better themes and UI components
|
32 |
+
|
33 |
+
# Async Support
|
34 |
+
asyncio>=3.4.3
|
35 |
+
|
36 |
+
# Testing and Development
|
37 |
+
pytest>=7.0.0 # For running tests
|
38 |
+
mypy>=1.0.0 # For type checking
|
setup.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup, find_packages
|
2 |
+
|
3 |
+
setup(
|
4 |
+
name="podcaster",
|
5 |
+
version="0.1",
|
6 |
+
packages=find_packages(),
|
7 |
+
install_requires=[
|
8 |
+
'rich',
|
9 |
+
# add other dependencies here
|
10 |
+
]
|
11 |
+
)
|