File size: 8,380 Bytes
43dcc4e
aae51f6
b0f31f9
aae51f6
b0f31f9
 
43dcc4e
b0f31f9
 
 
 
 
 
 
 
 
756fcd2
 
b0f31f9
08fd398
846c63c
 
 
 
 
 
 
 
 
 
aae51f6
b0f31f9
 
 
 
aae51f6
b0f31f9
 
aae51f6
b0f31f9
2bc2c0d
4e48838
b0f31f9
 
aae51f6
 
b0f31f9
 
 
 
2810c82
b0f31f9
 
2810c82
43dcc4e
aae51f6
 
 
 
 
 
 
b0f31f9
aae51f6
 
 
2810c82
aae51f6
 
 
 
 
 
b0f31f9
 
2810c82
b0f31f9
 
 
 
2810c82
 
b0f31f9
2810c82
 
b0f31f9
 
 
2810c82
b0f31f9
 
 
 
 
2810c82
b0f31f9
 
 
 
 
 
 
 
 
 
 
 
2810c82
b0f31f9
 
 
 
 
 
 
43dcc4e
aae51f6
2bc2c0d
aff9731
b0f31f9
2bc2c0d
aff9731
b0f31f9
aae51f6
 
b0f31f9
2810c82
 
 
 
aae51f6
2810c82
aae51f6
 
2810c82
b0f31f9
2810c82
aae51f6
2810c82
 
aae51f6
 
2810c82
b0f31f9
2810c82
aae51f6
2810c82
 
 
43dcc4e
2810c82
 
aae51f6
 
 
b0f31f9
aae51f6
b0f31f9
 
43dcc4e
b0f31f9
2810c82
ec300ad
2810c82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec300ad
b0f31f9
 
 
 
 
 
 
2810c82
b0f31f9
2bc2c0d
aae51f6
ec300ad
2810c82
86179aa
 
2810c82
2bc2c0d
2810c82
 
 
 
 
 
 
 
 
 
 
 
ec300ad
aae51f6
2810c82
b0f31f9
 
 
aae51f6
 
 
43dcc4e
2810c82
b0f31f9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
import gradio as gr
import os
import re
from openai import OpenAI
from huggingface_hub import InferenceClient
from dotenv import load_dotenv

load_dotenv(override=True)

# Initialize clients
hf_token = os.getenv("HF_TOKEN")
client = InferenceClient(token=hf_token)
deepseek_api_key = os.getenv("DeepSeek_API_KEY")
openai = OpenAI(api_key=deepseek_api_key, base_url="https://api.deepseek.com/v1/")

# Debate configurations
opinion_1 = ''
opinion_2 = ''

def call_bots(bot_num, system, opinion, bot1_messages, bot2_messages, model="meta-llama/Meta-Llama-3-8B-Instruct"):
    # Determine which messages belong to opponent
    opponent_messages = bot2_messages if bot_num == 1 else bot1_messages
    own_messages = bot1_messages if bot_num == 1 else bot2_messages
    
    # Build full conversation history
    messages = [{"role": "system", "content": system}]
    for i in range(len(opponent_messages)):
        messages.append({"role": "user", "content": opponent_messages[i]})
        if i < len(own_messages) - 1:  # Add own previous response if exists
            messages.append({"role": "assistant", "content": own_messages[i+1]})
    
    prompt = "<|begin_of_text|>"
    for msg in messages:
        prompt += f"<|start_header_id|>{msg['role']}<|end_header_id|>\n\n{msg['content']}<|eot_id|>"
    prompt += "<|start_header_id|>assistant<|end_header_id|>\n\n"
    
    stream = client.text_generation(
        prompt,
        model=model,
        stream=True,
        max_new_tokens=300,
        temperature=0.7,
        repetition_penalty=1.2,
        stop_sequences=["<|eot_id|>", "<|end_of_text|>"]
    )
    
    response = ""
    for token in stream:
        clean_token = token.replace("<|eot_id|>", "").replace("<|end_of_text|>", "")
        response += clean_token
        yield response, []
    
    response = response.split("<|eot_id|>")[0].strip()
    yield response, []

def judge_gpt_stream(context):
    judge_prompt = [
        {'role': 'system', 'content': judge_system},
        {'role': 'user', 'content': f"**Debate Transcript:**\n{context}"}
    ]
    
    full_verdict = ""
    buffer = ""
    stream = openai.chat.completions.create(
        model='deepseek-reasoner',
        messages=judge_prompt,
        temperature=2,
        stream=True
    )
    
    for chunk in stream:
        if chunk.choices[0].delta.content:
            part = chunk.choices[0].delta.content
            buffer += part
            
            # Process complete lines
            while '\n' in buffer:
                line_end = buffer.index('\n') + 1
                line = buffer[:line_end]
                buffer = buffer[line_end:]
                
                # Convert headers to proper markdown
                line = line.replace("# 🎯", "### 🎯").replace("# πŸ’₯", "### πŸ’₯").replace("# πŸ†", "### πŸ†")
                # Ensure proper line spacing
                line = re.sub(r'(### \S+)(\S)', r'\1 \2', line)  # Add space after emoji
                full_verdict += line
                yield full_verdict
                
    # Process remaining buffer
    if buffer:
        buffer = buffer.replace("# 🎯", "### 🎯").replace("# πŸ’₯", "### πŸ’₯").replace("# πŸ†", "### πŸ†")
        full_verdict += buffer
        yield full_verdict

judge_system = """**You are a snarky debate judge. Format your verdict EXACTLY like this:**

```markdown
### 🎯 Key Strengths
- [Bot 1/2]: Concise bullet points
- [Bot 1/2]: Another strength

### πŸ’₯ Critical Weaknesses
- [Bot 1/2]: Main flaws here
- [Bot 1/2]: Additional weaknesses

### πŸ† Final Decision  
**Winner:** [Bot 1/Bot 2]  
**Why:** 1-2 snarky sentences with emojis

Follow these RULES:
1. Use exactly three '#' for headers
2. Add empty line after headers
3. Space between emojis and text
4. No markdown in mid-sentence
```"""

def stream_debate(opinion1, opinion2, num_rounds):
    system_prompt_1 = f"""You are snarky and argumentative. Keep responses short and under 3 sentences.
    Support {opinion1} and oppose {opinion2}. Use logical arguments. No civility. Argue in the style of online arguments"""
    
    system_prompt_2 = f"""You are snarky and argumentative. Keep responses short and under 3 sentences.
    Support {opinion2} and oppose {opinion1}. Use logical arguments. No civility. Argue in the style of online arguments"""

    bot1_history = [opinion1]
    bot2_history = [opinion2]
    display_history = []
    full_context = f"### Bot 1 Opinion\n{opinion1}\n\n### Bot 2 Opinion\n{opinion2}"
    
    display_history.append((opinion1, opinion2))
    yield display_history.copy(), "# πŸ§‘βš–οΈ Judgment Pending..."
    
    for _ in range(int(num_rounds)):
        # Bot 1's turn
        bot1_response = ""
        for chunk, _ in call_bots(1, system_prompt_1, opinion1, bot1_history, bot2_history):
            bot1_response = chunk
            yield display_history + [(bot1_response, None)], ""
        bot1_history.append(bot1_response)
        full_context += f"\n\n**Bot 1:** {bot1_response}"
        
        # Bot 2's turn
        bot2_response = ""
        for chunk, _ in call_bots(2, system_prompt_2, opinion2, bot1_history, bot2_history):
            bot2_response = chunk
            yield display_history + [(bot1_response, bot2_response)], ""
        bot2_history.append(bot2_response)
        full_context += f"\n\n**Bot 2:** {bot2_response}"
        
        display_history.append((bot1_response, bot2_response))

    yield display_history, "## πŸ§‘βš–οΈ Judge is Deliberating...\n\n⏳ Analyzing arguments..."
    
    verdict_md = ""
    for partial in judge_gpt_stream(full_context):
        verdict_md = partial
        yield display_history, f"## πŸ§‘βš–οΈ Live Judgment\n\n{verdict_md}"
    
    verdict_md = re.sub(r'(#+)\s*(?=#)', '', verdict_md)
    yield display_history, f"## πŸ§‘βš–οΈ Final Judgment\n\n{verdict_md.strip()}"

with gr.Blocks(title="πŸ€–βš”οΈ Debate Arena", css="""
/* Mobile-first styles */
@media (max-width: 768px) {
    .mobile-stack {
        flex-direction: column !important;
        gap: 8px !important;
    }
    .mobile-full {
        width: 100% !important;
        min-width: unset !important;
    }
    .mobile-pad {
        padding: 8px !important;
    }
    .judge-box {
        max-height: 300px !important;
        font-size: 14px !important;
    }
    .chatbot {
        min-height: 300px !important;
        max-height: 50vh !important;
    }
    h1 {
        font-size: 24px !important;
        text-align: center;
    }
    button {
        width: 100% !important;
    }
}
.judge-box {
    border: 2px solid #4CAF50 !important;
    border-radius: 10px !important;
    padding: 15px !important;
    max-height: 400px !important;
    overflow-y: auto !important;
}
/* ... (keep other existing CSS rules) */
""") as demo:
    gr.Markdown("# πŸ€–βš”οΈπŸ€– AI Debate Arena\nInput any opinion for Bot 1 and Bot 2 and select the number of rounds. After the argument is finished, a third Bot will pick the winner. (May have to scroll down to view on mobile devices.)", elem_classes=["mobile-pad"])
    
    with gr.Row(elem_classes=["mobile-stack"]):
        with gr.Column(scale=2, elem_classes=["mobile-full"]):
            bot1_input = gr.Textbox(label="Bot 1 Opinion", value=opinion_1, elem_classes=["mobile-full"])
            bot2_input = gr.Textbox(label="Bot 2 Opinion", value=opinion_2, elem_classes=["mobile-full"])
        with gr.Column(scale=1, elem_classes=["mobile-full"]):
            rounds = gr.Number(label="Number of Rounds", value=3, precision=0, minimum=1, maximum=500)
            start_btn = gr.Button("Start Debate!", variant="primary", elem_classes=["mobile-full"])
    
    with gr.Row(elem_classes=["mobile-stack"]):
        chatbot = gr.Chatbot(height=400, label="Live Debate", elem_classes=["mobile-full"])
    
    with gr.Row(elem_classes=["mobile-stack"]):
        verdict_box = gr.Markdown(
            "## πŸ§‘βš–οΈ Debate Judge\n\n*Awaiting debate conclusion...*",
            elem_classes=["judge-box", "mobile-full"],
            label="Judge's Verdict"
        )
    
    
    start_btn.click(
        lambda: ([], "## πŸ§‘βš–οΈ Debate Judge\n\n*Starting new debate...*"),
        outputs=[chatbot, verdict_box]
    ).then(
        stream_debate,
        inputs=[bot1_input, bot2_input, rounds],
        outputs=[chatbot, verdict_box]
    )


demo.queue().launch()