File size: 17,502 Bytes
215a45d
0cb424a
 
 
 
 
 
 
 
215a45d
0cb424a
 
 
 
 
215a45d
0cb424a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215a45d
0cb424a
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
import gradio as gr
import os
import ollama
from dotenv import load_dotenv
from openai import OpenAI
import google.generativeai
import anthropic
from typing import List, Dict
import time

# Load environment variables and configure APIs
load_dotenv()
openai = OpenAI()
claude = anthropic.Anthropic()
google.generativeai.configure(api_key=os.getenv('GOOGLE_API_KEY'))

# Print API key validations
openai_api_key = os.getenv('OPENAI_API_KEY')
anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')
google_api_key = os.getenv('GOOGLE_API_KEY')

if openai_api_key:
    print(f"OpenAI API Key exists and begins with {openai_api_key[:8]}...")
else:
    print("OpenAI API Key not set")
    
if anthropic_api_key:
    print(f"Anthropic API Key exists and begins with {anthropic_api_key[:8]}...")
else:
    print("Anthropic API Key not set")

if google_api_key:
    print(f"Google API Key exists and begins with {google_api_key[:8]}...")
else:
    print("Google API Key not set")

# Model configurations
gpt_model = "gpt-4o"
claude_model = "claude-3-haiku-20240307"
gemini_model = "gemini-1.5-pro-latest"
ollama_model = "llama3.2" 

# Bot names and personalities
gpt_name = "Fausty GTB"
claude_name = "Clara Obi Claude"
gemini_name = "Claire Obi Gemini"
ollama_name = "Amanda Obi Ollama"

# Initial messages
gpt_messages = ["Hi Hi"]
claude_messages = ["Hello"]
gemini_messages = ["Good day everyone"]
ollama_messages = ["Greetings to you all"]

# Available temperaments
TEMPERAMENTS = ["Sanguine", "Choleric", "Melancholic", "Phlegmatic"]

def update_system_prompts(temperaments: Dict[str, str]) -> Dict[str, str]:
    """Update system prompts based on selected temperaments"""
    return {
        "gpt": f"You are {gpt_name}, a young woman in her early 30s. Your temperament is {temperaments['gpt']}. "
               "You will debate any presented topic, reflecting your personality in your responses. "
               "Be engaging and true to your temperament while making logical arguments.",
        "claude": f"You are {claude_name}, a young woman in her late 30s. Your temperament is {temperaments['claude']}. "
                 "You will debate any presented topic, showcasing your personality. "
                 "Make sure your arguments reflect your temperament while remaining logical and clear.",
        "gemini": f"You are {gemini_name}, a young woman in her early 40s. Your temperament is {temperaments['gemini']}. "
                 "You will debate any presented topic, demonstrating your personality. "
                 "Balance your emotional temperament with logical reasoning in your arguments.",
        "ollama": f"You are {ollama_name}, a young woman in her late 40s. Your temperament is {temperaments['ollama']}. "
                 "You will debate any presented topic, highlighting your personality. "
                 "Use your temperament to frame your logical arguments and perspectives."
    }

def call_gpt(prompt, messages):
    """Call GPT API with appropriate formatting"""
    try:
        formatted_messages = [{"role": "system", "content": gpt_system}]
        for msg in messages:
            if isinstance(msg, dict):
                formatted_messages.append(msg)
            else:
                formatted_messages.append({"role": "assistant", "content": msg})
        formatted_messages.append({"role": "user", "content": prompt})
        
        response = openai.chat.completions.create(
            model=gpt_model,
            messages=formatted_messages,
            temperature=0.7
        )
        reply = response.choices[0].message.content
        messages.append({"role": "assistant", "content": reply})
        return reply
    except Exception as e:
        return f"GPT Error: {str(e)}"

def call_claude(prompt, messages):
    """Call Claude API with appropriate formatting"""
    try:
        formatted_messages = []
        for msg in messages:
            if isinstance(msg, dict):
                formatted_messages.append(msg)
            else:
                formatted_messages.append({"role": "assistant", "content": msg})
        formatted_messages.append({"role": "user", "content": prompt})
        
        response = claude.messages.create(
            max_tokens=1000,
            messages=formatted_messages,
            model=claude_model
        )
        reply = response.content[0].text
        messages.append({"role": "assistant", "content": reply})
        return reply
    except Exception as e:
        return f"Claude Error: {str(e)}"

def call_gemini(prompt, messages):
    """Call Gemini API with appropriate formatting"""
    try:
        model = google.generativeai.GenerativeModel(gemini_model)
        chat = model.start_chat()
        response = chat.send_message(f"{gemini_system}\n\n{prompt}")
        reply = response.text
        messages.append({"role": "assistant", "content": reply})
        return reply
    except Exception as e:
        return f"Gemini Error: {str(e)}"

def call_ollama(prompt, messages):
    """Call Ollama API with appropriate formatting"""
    try:
        formatted_messages = [{"role": "system", "content": ollama_system}]
        for msg in messages:
            if isinstance(msg, dict):
                formatted_messages.append(msg)
            else:
                formatted_messages.append({"role": "assistant", "content": msg})
        formatted_messages.append({"role": "user", "content": prompt})
        
        response = ollama.chat(
            model=ollama_model,
            messages=formatted_messages
        )
        reply = response['message']['content']
        messages.append({"role": "assistant", "content": reply})
        return reply
    except Exception as e:
        return f"Ollama Error: {str(e)}"

def parse_vote(text):
    """Extract numerical votes from text response"""
    votes = {}
    lines = text.lower().split('\n')
    for line in lines:
        for name, formal_name in [
            (gpt_name.lower(), gpt_name),
            (claude_name.lower(), claude_name),
            (gemini_name.lower(), gemini_name),
            (ollama_name.lower(), ollama_name)
        ]:
            if name in line.lower():
                import re
                numbers = re.findall(r'\b([1-9]|10)\b', line)
                if numbers:
                    votes[formal_name] = int(numbers[0])
                    break
    return votes

def run_debate_round(topic, round_number, debate_output=None):
    """Run a single round of debate"""
    if debate_output is None:
        debate_output = []

    debate_output.append(f"\n{'='*50}")
    debate_output.append(f"DEBATE ROUND {round_number}")
    debate_output.append(f"Topic: {topic}")
    debate_output.append('='*50 + "\n")
    yield "\n".join(debate_output)

    arguments = {}
    
    # Each bot makes their argument
    for name, call_fn, messages in [
        (gpt_name, call_gpt, gpt_messages),
        (claude_name, call_claude, claude_messages),
        (gemini_name, call_gemini, gemini_messages),
        (ollama_name, call_ollama, ollama_messages)
    ]:
        debate_output.append(f"\n{name} is thinking...")
        yield "\n".join(debate_output)
        
        response = call_fn(f"Debate topic: {topic}\nMake your argument and explain your position.", messages)
        arguments[name] = response
        debate_output.append(f"\n{name}'s Argument:")
        debate_output.append("-" * 30)
        debate_output.append(response)
        debate_output.append("\n")
        yield "\n".join(debate_output)

    # Format arguments for voting
    formatted_arguments = "\n".join([f"{name}'s Argument:\n{arg}\n" for name, arg in arguments.items()])
    
    # Collect votes silently
    debate_output.append("\nAll arguments have been presented. Collecting evaluations...")
    yield "\n".join(debate_output)
    
    voting_prompt = f"""Please evaluate all the arguments made in this debate and rate each participant's argument on a scale of 1-10 (10 being the best).
    Consider clarity, persuasiveness, and reasoning in your evaluation.
    
    The topic was: {topic}
    
    Here are the arguments:
    {formatted_arguments}
    
    Please rate each participant (1-10) and briefly explain your ratings:
    {gpt_name}:
    {claude_name}:
    {gemini_name}:
    {ollama_name}:"""
    
    votes = {name: {"votes": [], "total": 0} for name in [gpt_name, claude_name, gemini_name, ollama_name]}
    
    # Each bot votes
    for voter_fn, voter_messages, voter_name in [
        (call_gpt, gpt_messages, "GPT"),
        (call_claude, claude_messages, "Claude"),
        (call_gemini, gemini_messages, "Gemini"),
        (call_ollama, ollama_messages, "Ollama")
    ]:
        debate_output.append(f"\n{voter_name} is evaluating...")
        yield "\n".join(debate_output)
        
        vote_response = voter_fn(voting_prompt, voter_messages)
        parsed_votes = parse_vote(vote_response)
        
        for name, score in parsed_votes.items():
            if 1 <= score <= 10:
                votes[name]["votes"].append(score)
    
    # Calculate totals
    for name in votes:
        votes[name]["total"] = sum(votes[name]["votes"])
    
    return votes, arguments, debate_output

def run_debate(
    topic1: str,
    topic2: str,
    topic3: str,
    topic4: str,
    topic5: str,
    gpt_temperament: str,
    claude_temperament: str,
    gemini_temperament: str,
    ollama_temperament: str,
    progress=gr.Progress()
) -> gr.Markdown:
    """Main debate function for Gradio interface"""
    try:
        # Validate topics
        topics = [t.strip() for t in [topic1, topic2, topic3, topic4, topic5] if t.strip()]
        if not topics:
            return "Please provide at least one debate topic"
        
        # Update system prompts
        system_prompts = update_system_prompts({
            'gpt': gpt_temperament,
            'claude': claude_temperament,
            'gemini': gemini_temperament,
            'ollama': ollama_temperament
        })
        
        global gpt_system, claude_system, gemini_system, ollama_system
        gpt_system = system_prompts['gpt']
        claude_system = system_prompts['claude']
        gemini_system = system_prompts['gemini']
        ollama_system = system_prompts['ollama']
        
        debate_output = []
        current_round = 1
        winners = None
        all_votes = []
        
        while current_round <= len(topics) and (current_round == 1 or (winners and len(winners) > 1)):
            # Run debate round
            votes, arguments, round_output = yield from run_debate_round(topics[current_round - 1], current_round)
            all_votes.append(votes)
            debate_output.extend(round_output)
            
            # Calculate winners
            max_votes = max(votes.values(), key=lambda x: x["total"])["total"]
            winners = [name for name, data in votes.items() if data["total"] == max_votes]
            
            # Show results only if we have a winner or this is the last topic
            if len(winners) == 1 or current_round == len(topics):
                debate_output.append("\n" + "="*50)
                debate_output.append("FINAL RESULTS")
                debate_output.append("="*50 + "\n")
                
                for name, data in votes.items():
                    debate_output.append(f"{name}:")
                    debate_output.append(f"Total Score: {data['total']}")
                    debate_output.append(f"Individual votes: {data['votes']}")
                    debate_output.append("")
                
                if len(winners) == 1:
                    debate_output.append(f"\nπŸ† The winner is {winners[0]}!")
                else:
                    debate_output.append(f"\n🀝 The debate ends in a tie between: {', '.join(winners)}")
                
                yield gr.Markdown("\n".join(debate_output))
                break
            
            current_round += 1
            if current_round <= len(topics):
                debate_output.append(f"\nMoving to tiebreaker topic {current_round}...\n")
                yield gr.Markdown("\n".join(debate_output))
    
    except Exception as e:
        return f"An error occurred during the debate: {str(e)}"


def create_interface():
    """Create the Gradio interface"""
    with gr.Blocks(theme=gr.themes.Soft()) as debate_interface:
        gr.Markdown("""
        # 🎭 AI Debate Arena
        
        Welcome to the AI Debate Platform! Meet our four unique debaters:
        
        πŸ‘©β€πŸ’Ό **Fausty GTB** (Early 30s)  
        πŸ‘©β€βš–οΈ **Clara Obi Claude** (Late 30s)  
        πŸ‘©β€πŸ« **Claire Obi Gemini** (Early 40s)  
        πŸ‘©β€πŸ’» **Amanda Obi Ollama** (Late 40s)
        """)
        
        with gr.Row():
            with gr.Column():
                topic1 = gr.Textbox(
                    label="Main Debate Topic",
                    placeholder="Enter the main debate topic",
                    scale=2
                )
                with gr.Accordion("Tiebreaker Topics (Optional)", open=False):
                    topic2 = gr.Textbox(label="Tiebreaker Topic 1", placeholder="In case of a tie...")
                    topic3 = gr.Textbox(label="Tiebreaker Topic 2", placeholder="If still tied...")
                    topic4 = gr.Textbox(label="Tiebreaker Topic 3", placeholder="If needed...")
                    topic5 = gr.Textbox(label="Tiebreaker Topic 4", placeholder="Final tiebreaker...")
            
            with gr.Column():
                gr.Markdown("### 🎭 Personality Settings")
                gpt_temp = gr.Dropdown(
                    choices=TEMPERAMENTS,
                    value="Sanguine",
                    label=f"Select {gpt_name}'s Temperament"
                )
                claude_temp = gr.Dropdown(
                    choices=TEMPERAMENTS,
                    value="Choleric",
                    label=f"Select {claude_name}'s Temperament"
                )
                gemini_temp = gr.Dropdown(
                    choices=TEMPERAMENTS,
                    value="Melancholic",
                    label=f"Select {gemini_name}'s Temperament"
                )
                ollama_temp = gr.Dropdown(
                    choices=TEMPERAMENTS,
                    value="Phlegmatic",
                    label=f"Select {ollama_name}'s Temperament"
                )

        with gr.Row():
            start_btn = gr.Button("🎯 Start Debate", variant="primary", size="lg")
            clear_btn = gr.Button("πŸ”„ Reset", variant="secondary", size="lg")

        # Custom CSS for styling
        gr.Markdown("""
        <style>
        .debate-box {
            border: 2px solid #2196F3;
            border-radius: 10px;
            padding: 20px;
            margin: 10px 0;
            background-color: #f8f9fa;
            box-shadow: 0 2px 5px rgba(0,0,0,0.1);
        }
        </style>
        """)

        # Debate Output Box
        with gr.Row():
            with gr.Column(elem_classes="debate-box"):
                gr.Markdown("### πŸ“’ Debate Session")
                output = gr.Markdown()

        # Handle button clicks
        start_btn.click(
            fn=run_debate,
            inputs=[
                topic1, topic2, topic3, topic4, topic5,
                gpt_temp, claude_temp, gemini_temp, ollama_temp
            ],
            outputs=output,
            show_progress="full"
        )

        clear_btn.click(
            fn=lambda: None,
            inputs=None,
            outputs=output
        )
        
        # Instructions (outside the debate box)
        with gr.Row():
            with gr.Column():
                gr.Markdown("""
                ### 🎨 About Temperaments
                
                Each debater can embody one of these personality types:
                
                - **Sanguine**: Outgoing, lively, sociable, and carefree
                - **Choleric**: Ambitious, energetic, and leader-like
                - **Melancholic**: Analytical, deep thinking, and detail-oriented
                - **Phlegmatic**: Calm, peaceful, and easy-going
                
                ### πŸ“ How it Works
                
                1. Enter your main debate topic
                2. Optionally add tiebreaker topics (used only if there's a tie)
                3. Customize personalities if desired
                4. Click "Start Debate" to begin
                5. Watch the debate unfold in real-time
                6. Final results appear after voting is complete
                
                ### βš–οΈ Voting System
                
                - Each bot evaluates all arguments after presentation
                - Votes are collected privately until the end
                - Scores range from 1-10 for each argument
                - Winner is determined by total score
                - In case of a tie, debate continues with next topic
                
                ### πŸ“Œ Notes
                
                - A clear winner ends the debate immediately
                - All tiebreaker topics will be used if ties continue
                - Multiple winners are possible in final round
                - System adapts to each bot's personality
                """)
    
    return debate_interface

# # Launch the interface
if __name__ == "__main__":
    debate_interface = create_interface()
    debate_interface.launch(
        share=True
    )