|
import os
|
|
import json
|
|
import time
|
|
from typing import Dict, List, Any
|
|
from rich import print
|
|
from rich.console import Console
|
|
from rich.table import Table
|
|
from rich.panel import Panel
|
|
|
|
from deepinfra_client import DeepInfraClient
|
|
|
|
console = Console()
|
|
|
|
def print_proxy_status(client):
|
|
"""Print the proxy and IP rotation status"""
|
|
status = []
|
|
|
|
if client.use_proxy_rotation and client.proxy_finder:
|
|
proxy_counts = {k: len(v) for k, v in client.proxy_finder.proxy_dict.items()}
|
|
total_proxies = sum(proxy_counts.values())
|
|
status.append(f"Proxy rotation: [green]Enabled[/green] ({total_proxies} proxies)")
|
|
|
|
|
|
table = Table(title="Available Proxies")
|
|
table.add_column("Type", style="cyan")
|
|
table.add_column("Count", style="green")
|
|
|
|
for proxy_type, count in proxy_counts.items():
|
|
if count > 0:
|
|
table.add_row(proxy_type, str(count))
|
|
|
|
console.print(table)
|
|
else:
|
|
status.append("Proxy rotation: [red]Disabled[/red]")
|
|
|
|
if client.use_ip_rotation and client.ip_rotator:
|
|
status.append(f"IP rotation: [green]Enabled[/green] (AWS API Gateway - {len(client.ip_rotator.gateways)} regions)")
|
|
else:
|
|
status.append("IP rotation: [red]Disabled[/red]")
|
|
|
|
if client.use_random_user_agent:
|
|
status.append("User-Agent rotation: [green]Enabled[/green]")
|
|
else:
|
|
status.append("User-Agent rotation: [red]Disabled[/red]")
|
|
|
|
console.print(Panel("\n".join(status), title="Client Configuration", border_style="blue"))
|
|
|
|
def chat_with_model():
|
|
"""Demonstrate interactive chat with DeepInfra models"""
|
|
|
|
client = DeepInfraClient(
|
|
api_key=os.getenv("DEEPINFRA_API_KEY"),
|
|
use_random_user_agent=True,
|
|
use_ip_rotation=True,
|
|
use_proxy_rotation=True,
|
|
proxy_types=['http', 'socks5'],
|
|
model="meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
|
)
|
|
|
|
print_proxy_status(client)
|
|
|
|
|
|
console.print("\n[bold cyan]Fetching available models...[/bold cyan]")
|
|
try:
|
|
models_response = client.models.list()
|
|
model_table = Table(title="Available Models")
|
|
model_table.add_column("Model", style="green")
|
|
|
|
for model in models_response["data"]:
|
|
model_table.add_row(model["id"])
|
|
|
|
console.print(model_table)
|
|
except Exception as e:
|
|
console.print(f"[red]Error fetching models: {str(e)}[/red]")
|
|
|
|
|
|
console.print("\n[bold green]Starting interactive chat (type 'quit' to exit)[/bold green]")
|
|
console.print("[yellow]Note: Every 3 messages, the client will rotate IP and proxy[/yellow]\n")
|
|
|
|
messages = [{"role": "system", "content": "You are a helpful assistant."}]
|
|
message_count = 0
|
|
|
|
while True:
|
|
user_input = input("\nYou: ")
|
|
if user_input.lower() in ["quit", "exit", "bye"]:
|
|
break
|
|
|
|
messages.append({"role": "user", "content": user_input})
|
|
|
|
|
|
message_count += 1
|
|
if message_count % 3 == 0:
|
|
console.print("[yellow]Rotating IP and proxy...[/yellow]")
|
|
client.refresh_proxies()
|
|
client.refresh_session()
|
|
|
|
|
|
console.print("\n[cyan]Waiting for response...[/cyan]")
|
|
start_time = time.time()
|
|
|
|
try:
|
|
response = client.chat.create(
|
|
messages=messages,
|
|
temperature=0.7,
|
|
max_tokens=1024
|
|
)
|
|
|
|
elapsed = time.time() - start_time
|
|
assistant_message = response["choices"][0]["message"]["content"]
|
|
|
|
|
|
messages.append({"role": "assistant", "content": assistant_message})
|
|
|
|
console.print(f"\n[bold green]Assistant[/bold green] [dim]({elapsed:.2f}s)[/dim]:")
|
|
console.print(assistant_message)
|
|
|
|
except Exception as e:
|
|
console.print(f"[bold red]Error: {str(e)}[/bold red]")
|
|
console.print("[yellow]Refreshing session and trying again...[/yellow]")
|
|
client.refresh_session()
|
|
|
|
def stream_example():
|
|
"""Demonstrate streaming responses"""
|
|
client = DeepInfraClient(
|
|
use_random_user_agent=True,
|
|
use_ip_rotation=True,
|
|
use_proxy_rotation=True
|
|
)
|
|
|
|
print_proxy_status(client)
|
|
|
|
prompt = "Write a short story about a robot that learns to feel emotions."
|
|
|
|
console.print(f"\n[bold cyan]Prompt:[/bold cyan] {prompt}")
|
|
console.print("\n[bold green]Streaming response:[/bold green]")
|
|
|
|
try:
|
|
response_stream = client.completions.create(
|
|
prompt=prompt,
|
|
temperature=0.8,
|
|
max_tokens=1024,
|
|
stream=True
|
|
)
|
|
|
|
full_response = ""
|
|
for chunk in response_stream:
|
|
if 'choices' in chunk and len(chunk['choices']) > 0:
|
|
delta = chunk['choices'][0].get('delta', {})
|
|
if 'content' in delta:
|
|
content = delta['content']
|
|
print(content, end='', flush=True)
|
|
full_response += content
|
|
print("\n")
|
|
|
|
except Exception as e:
|
|
console.print(f"\n[bold red]Error: {str(e)}[/bold red]")
|
|
|
|
if __name__ == "__main__":
|
|
console.print(Panel.fit(
|
|
"[bold green]DeepInfra Client Example[/bold green]\n"
|
|
"This example demonstrates the enhanced client with proxy and IP rotation",
|
|
border_style="yellow"
|
|
))
|
|
|
|
while True:
|
|
console.print("\n[bold cyan]Choose an option:[/bold cyan]")
|
|
console.print("1. Interactive Chat")
|
|
console.print("2. Streaming Example")
|
|
console.print("3. Exit")
|
|
|
|
choice = input("\nEnter your choice (1-3): ")
|
|
|
|
if choice == "1":
|
|
chat_with_model()
|
|
elif choice == "2":
|
|
stream_example()
|
|
elif choice == "3":
|
|
console.print("[yellow]Exiting...[/yellow]")
|
|
break
|
|
else:
|
|
console.print("[red]Invalid choice. Please try again.[/red]")
|
|
|