Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
import ollama | |
from dotenv import load_dotenv | |
from openai import OpenAI | |
import google.generativeai | |
import anthropic | |
from typing import List, Dict | |
import time | |
# Load environment variables and configure APIs | |
load_dotenv() | |
openai = OpenAI() | |
claude = anthropic.Anthropic() | |
google.generativeai.configure(api_key=os.getenv('GOOGLE_API_KEY')) | |
# Print API key validations | |
openai_api_key = os.getenv('OPENAI_API_KEY') | |
anthropic_api_key = os.getenv('ANTHROPIC_API_KEY') | |
google_api_key = os.getenv('GOOGLE_API_KEY') | |
if openai_api_key: | |
print(f"OpenAI API Key exists and begins with {openai_api_key[:8]}...") | |
else: | |
print("OpenAI API Key not set") | |
if anthropic_api_key: | |
print(f"Anthropic API Key exists and begins with {anthropic_api_key[:8]}...") | |
else: | |
print("Anthropic API Key not set") | |
if google_api_key: | |
print(f"Google API Key exists and begins with {google_api_key[:8]}...") | |
else: | |
print("Google API Key not set") | |
# Model configurations | |
gpt_model = "gpt-4o" | |
claude_model = "claude-3-haiku-20240307" | |
gemini_model = "gemini-1.5-pro-latest" | |
ollama_model = "llama3.2" | |
# Bot names and personalities | |
gpt_name = "Fausty GTB" | |
claude_name = "Clara Obi Claude" | |
gemini_name = "Claire Obi Gemini" | |
ollama_name = "Amanda Obi Ollama" | |
# Initial messages | |
gpt_messages = ["Hi Hi"] | |
claude_messages = ["Hello"] | |
gemini_messages = ["Good day everyone"] | |
ollama_messages = ["Greetings to you all"] | |
# Available temperaments | |
TEMPERAMENTS = ["Sanguine", "Choleric", "Melancholic", "Phlegmatic"] | |
def update_system_prompts(temperaments: Dict[str, str]) -> Dict[str, str]: | |
"""Update system prompts based on selected temperaments""" | |
return { | |
"gpt": f"You are {gpt_name}, a young woman in her early 30s. Your temperament is {temperaments['gpt']}. " | |
"You will debate any presented topic, reflecting your personality in your responses. " | |
"Be engaging and true to your temperament while making logical arguments.", | |
"claude": f"You are {claude_name}, a young woman in her late 30s. Your temperament is {temperaments['claude']}. " | |
"You will debate any presented topic, showcasing your personality. " | |
"Make sure your arguments reflect your temperament while remaining logical and clear.", | |
"gemini": f"You are {gemini_name}, a young woman in her early 40s. Your temperament is {temperaments['gemini']}. " | |
"You will debate any presented topic, demonstrating your personality. " | |
"Balance your emotional temperament with logical reasoning in your arguments.", | |
"ollama": f"You are {ollama_name}, a young woman in her late 40s. Your temperament is {temperaments['ollama']}. " | |
"You will debate any presented topic, highlighting your personality. " | |
"Use your temperament to frame your logical arguments and perspectives." | |
} | |
def call_gpt(prompt, messages): | |
"""Call GPT API with appropriate formatting""" | |
try: | |
formatted_messages = [{"role": "system", "content": gpt_system}] | |
for msg in messages: | |
if isinstance(msg, dict): | |
formatted_messages.append(msg) | |
else: | |
formatted_messages.append({"role": "assistant", "content": msg}) | |
formatted_messages.append({"role": "user", "content": prompt}) | |
response = openai.chat.completions.create( | |
model=gpt_model, | |
messages=formatted_messages, | |
temperature=0.7 | |
) | |
reply = response.choices[0].message.content | |
messages.append({"role": "assistant", "content": reply}) | |
return reply | |
except Exception as e: | |
return f"GPT Error: {str(e)}" | |
def call_claude(prompt, messages): | |
"""Call Claude API with appropriate formatting""" | |
try: | |
formatted_messages = [] | |
for msg in messages: | |
if isinstance(msg, dict): | |
formatted_messages.append(msg) | |
else: | |
formatted_messages.append({"role": "assistant", "content": msg}) | |
formatted_messages.append({"role": "user", "content": prompt}) | |
response = claude.messages.create( | |
max_tokens=1000, | |
messages=formatted_messages, | |
model=claude_model | |
) | |
reply = response.content[0].text | |
messages.append({"role": "assistant", "content": reply}) | |
return reply | |
except Exception as e: | |
return f"Claude Error: {str(e)}" | |
def call_gemini(prompt, messages): | |
"""Call Gemini API with appropriate formatting""" | |
try: | |
model = google.generativeai.GenerativeModel(gemini_model) | |
chat = model.start_chat() | |
response = chat.send_message(f"{gemini_system}\n\n{prompt}") | |
reply = response.text | |
messages.append({"role": "assistant", "content": reply}) | |
return reply | |
except Exception as e: | |
return f"Gemini Error: {str(e)}" | |
def call_ollama(prompt, messages): | |
"""Call Ollama API with appropriate formatting""" | |
try: | |
formatted_messages = [{"role": "system", "content": ollama_system}] | |
for msg in messages: | |
if isinstance(msg, dict): | |
formatted_messages.append(msg) | |
else: | |
formatted_messages.append({"role": "assistant", "content": msg}) | |
formatted_messages.append({"role": "user", "content": prompt}) | |
response = ollama.chat( | |
model=ollama_model, | |
messages=formatted_messages | |
) | |
reply = response['message']['content'] | |
messages.append({"role": "assistant", "content": reply}) | |
return reply | |
except Exception as e: | |
return f"Ollama Error: {str(e)}" | |
def parse_vote(text): | |
"""Extract numerical votes from text response""" | |
votes = {} | |
lines = text.lower().split('\n') | |
for line in lines: | |
for name, formal_name in [ | |
(gpt_name.lower(), gpt_name), | |
(claude_name.lower(), claude_name), | |
(gemini_name.lower(), gemini_name), | |
(ollama_name.lower(), ollama_name) | |
]: | |
if name in line.lower(): | |
import re | |
numbers = re.findall(r'\b([1-9]|10)\b', line) | |
if numbers: | |
votes[formal_name] = int(numbers[0]) | |
break | |
return votes | |
def run_debate_round(topic, round_number, debate_output=None): | |
"""Run a single round of debate""" | |
if debate_output is None: | |
debate_output = [] | |
debate_output.append(f"\n{'='*50}") | |
debate_output.append(f"DEBATE ROUND {round_number}") | |
debate_output.append(f"Topic: {topic}") | |
debate_output.append('='*50 + "\n") | |
yield "\n".join(debate_output) | |
arguments = {} | |
# Each bot makes their argument | |
for name, call_fn, messages in [ | |
(gpt_name, call_gpt, gpt_messages), | |
(claude_name, call_claude, claude_messages), | |
(gemini_name, call_gemini, gemini_messages), | |
(ollama_name, call_ollama, ollama_messages) | |
]: | |
debate_output.append(f"\n{name} is thinking...") | |
yield "\n".join(debate_output) | |
response = call_fn(f"Debate topic: {topic}\nMake your argument and explain your position.", messages) | |
arguments[name] = response | |
debate_output.append(f"\n{name}'s Argument:") | |
debate_output.append("-" * 30) | |
debate_output.append(response) | |
debate_output.append("\n") | |
yield "\n".join(debate_output) | |
# Format arguments for voting | |
formatted_arguments = "\n".join([f"{name}'s Argument:\n{arg}\n" for name, arg in arguments.items()]) | |
# Collect votes silently | |
debate_output.append("\nAll arguments have been presented. Collecting evaluations...") | |
yield "\n".join(debate_output) | |
voting_prompt = f"""Please evaluate all the arguments made in this debate and rate each participant's argument on a scale of 1-10 (10 being the best). | |
Consider clarity, persuasiveness, and reasoning in your evaluation. | |
The topic was: {topic} | |
Here are the arguments: | |
{formatted_arguments} | |
Please rate each participant (1-10) and briefly explain your ratings: | |
{gpt_name}: | |
{claude_name}: | |
{gemini_name}: | |
{ollama_name}:""" | |
votes = {name: {"votes": [], "total": 0} for name in [gpt_name, claude_name, gemini_name, ollama_name]} | |
# Each bot votes | |
for voter_fn, voter_messages, voter_name in [ | |
(call_gpt, gpt_messages, "GPT"), | |
(call_claude, claude_messages, "Claude"), | |
(call_gemini, gemini_messages, "Gemini"), | |
(call_ollama, ollama_messages, "Ollama") | |
]: | |
debate_output.append(f"\n{voter_name} is evaluating...") | |
yield "\n".join(debate_output) | |
vote_response = voter_fn(voting_prompt, voter_messages) | |
parsed_votes = parse_vote(vote_response) | |
for name, score in parsed_votes.items(): | |
if 1 <= score <= 10: | |
votes[name]["votes"].append(score) | |
# Calculate totals | |
for name in votes: | |
votes[name]["total"] = sum(votes[name]["votes"]) | |
return votes, arguments, debate_output | |
def run_debate( | |
topic1: str, | |
topic2: str, | |
topic3: str, | |
topic4: str, | |
topic5: str, | |
gpt_temperament: str, | |
claude_temperament: str, | |
gemini_temperament: str, | |
ollama_temperament: str, | |
progress=gr.Progress() | |
) -> gr.Markdown: | |
"""Main debate function for Gradio interface""" | |
try: | |
# Validate topics | |
topics = [t.strip() for t in [topic1, topic2, topic3, topic4, topic5] if t.strip()] | |
if not topics: | |
return "Please provide at least one debate topic" | |
# Update system prompts | |
system_prompts = update_system_prompts({ | |
'gpt': gpt_temperament, | |
'claude': claude_temperament, | |
'gemini': gemini_temperament, | |
'ollama': ollama_temperament | |
}) | |
global gpt_system, claude_system, gemini_system, ollama_system | |
gpt_system = system_prompts['gpt'] | |
claude_system = system_prompts['claude'] | |
gemini_system = system_prompts['gemini'] | |
ollama_system = system_prompts['ollama'] | |
debate_output = [] | |
current_round = 1 | |
winners = None | |
all_votes = [] | |
while current_round <= len(topics) and (current_round == 1 or (winners and len(winners) > 1)): | |
# Run debate round | |
votes, arguments, round_output = yield from run_debate_round(topics[current_round - 1], current_round) | |
all_votes.append(votes) | |
debate_output.extend(round_output) | |
# Calculate winners | |
max_votes = max(votes.values(), key=lambda x: x["total"])["total"] | |
winners = [name for name, data in votes.items() if data["total"] == max_votes] | |
# Show results only if we have a winner or this is the last topic | |
if len(winners) == 1 or current_round == len(topics): | |
debate_output.append("\n" + "="*50) | |
debate_output.append("FINAL RESULTS") | |
debate_output.append("="*50 + "\n") | |
for name, data in votes.items(): | |
debate_output.append(f"{name}:") | |
debate_output.append(f"Total Score: {data['total']}") | |
debate_output.append(f"Individual votes: {data['votes']}") | |
debate_output.append("") | |
if len(winners) == 1: | |
debate_output.append(f"\nπ The winner is {winners[0]}!") | |
else: | |
debate_output.append(f"\nπ€ The debate ends in a tie between: {', '.join(winners)}") | |
yield gr.Markdown("\n".join(debate_output)) | |
break | |
current_round += 1 | |
if current_round <= len(topics): | |
debate_output.append(f"\nMoving to tiebreaker topic {current_round}...\n") | |
yield gr.Markdown("\n".join(debate_output)) | |
except Exception as e: | |
return f"An error occurred during the debate: {str(e)}" | |
def create_interface(): | |
"""Create the Gradio interface""" | |
with gr.Blocks(theme=gr.themes.Soft()) as debate_interface: | |
gr.Markdown(""" | |
# π AI Debate Arena | |
Welcome to the AI Debate Platform! Meet our four unique debaters: | |
π©βπΌ **Fausty GTB** (Early 30s) | |
π©ββοΈ **Clara Obi Claude** (Late 30s) | |
π©βπ« **Claire Obi Gemini** (Early 40s) | |
π©βπ» **Amanda Obi Ollama** (Late 40s) | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
topic1 = gr.Textbox( | |
label="Main Debate Topic", | |
placeholder="Enter the main debate topic", | |
scale=2 | |
) | |
with gr.Accordion("Tiebreaker Topics (Optional)", open=False): | |
topic2 = gr.Textbox(label="Tiebreaker Topic 1", placeholder="In case of a tie...") | |
topic3 = gr.Textbox(label="Tiebreaker Topic 2", placeholder="If still tied...") | |
topic4 = gr.Textbox(label="Tiebreaker Topic 3", placeholder="If needed...") | |
topic5 = gr.Textbox(label="Tiebreaker Topic 4", placeholder="Final tiebreaker...") | |
with gr.Column(): | |
gr.Markdown("### π Personality Settings") | |
gpt_temp = gr.Dropdown( | |
choices=TEMPERAMENTS, | |
value="Sanguine", | |
label=f"Select {gpt_name}'s Temperament" | |
) | |
claude_temp = gr.Dropdown( | |
choices=TEMPERAMENTS, | |
value="Choleric", | |
label=f"Select {claude_name}'s Temperament" | |
) | |
gemini_temp = gr.Dropdown( | |
choices=TEMPERAMENTS, | |
value="Melancholic", | |
label=f"Select {gemini_name}'s Temperament" | |
) | |
ollama_temp = gr.Dropdown( | |
choices=TEMPERAMENTS, | |
value="Phlegmatic", | |
label=f"Select {ollama_name}'s Temperament" | |
) | |
with gr.Row(): | |
start_btn = gr.Button("π― Start Debate", variant="primary", size="lg") | |
clear_btn = gr.Button("π Reset", variant="secondary", size="lg") | |
# Custom CSS for styling | |
gr.Markdown(""" | |
<style> | |
.debate-box { | |
border: 2px solid #2196F3; | |
border-radius: 10px; | |
padding: 20px; | |
margin: 10px 0; | |
background-color: #f8f9fa; | |
box-shadow: 0 2px 5px rgba(0,0,0,0.1); | |
} | |
</style> | |
""") | |
# Debate Output Box | |
with gr.Row(): | |
with gr.Column(elem_classes="debate-box"): | |
gr.Markdown("### π’ Debate Session") | |
output = gr.Markdown() | |
# Handle button clicks | |
start_btn.click( | |
fn=run_debate, | |
inputs=[ | |
topic1, topic2, topic3, topic4, topic5, | |
gpt_temp, claude_temp, gemini_temp, ollama_temp | |
], | |
outputs=output, | |
show_progress="full" | |
) | |
clear_btn.click( | |
fn=lambda: None, | |
inputs=None, | |
outputs=output | |
) | |
# Instructions (outside the debate box) | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown(""" | |
### π¨ About Temperaments | |
Each debater can embody one of these personality types: | |
- **Sanguine**: Outgoing, lively, sociable, and carefree | |
- **Choleric**: Ambitious, energetic, and leader-like | |
- **Melancholic**: Analytical, deep thinking, and detail-oriented | |
- **Phlegmatic**: Calm, peaceful, and easy-going | |
### π How it Works | |
1. Enter your main debate topic | |
2. Optionally add tiebreaker topics (used only if there's a tie) | |
3. Customize personalities if desired | |
4. Click "Start Debate" to begin | |
5. Watch the debate unfold in real-time | |
6. Final results appear after voting is complete | |
### βοΈ Voting System | |
- Each bot evaluates all arguments after presentation | |
- Votes are collected privately until the end | |
- Scores range from 1-10 for each argument | |
- Winner is determined by total score | |
- In case of a tie, debate continues with next topic | |
### π Notes | |
- A clear winner ends the debate immediately | |
- All tiebreaker topics will be used if ties continue | |
- Multiple winners are possible in final round | |
- System adapts to each bot's personality | |
""") | |
return debate_interface | |
# # Launch the interface | |
if __name__ == "__main__": | |
debate_interface = create_interface() | |
debate_interface.launch( | |
share=True | |
) |