Spaces:
Running
Running
import gradio as gr | |
import os | |
import re | |
from openai import OpenAI | |
from huggingface_hub import InferenceClient | |
from dotenv import load_dotenv | |
load_dotenv(override=True) | |
# Initialize clients | |
hf_token = os.getenv("HF_TOKEN") | |
client = InferenceClient(token=hf_token) | |
deepseek_api_key = os.getenv("DeepSeek_API_KEY") | |
openai = OpenAI(api_key=deepseek_api_key, base_url="https://api.deepseek.com/v1/") | |
# Debate configurations | |
opinion_1 = '' | |
opinion_2 = '' | |
def call_bots(bot_num, system, opinion, bot1_messages, bot2_messages, model="meta-llama/Meta-Llama-3-8B-Instruct"): | |
# Determine which messages belong to opponent | |
opponent_messages = bot2_messages if bot_num == 1 else bot1_messages | |
own_messages = bot1_messages if bot_num == 1 else bot2_messages | |
# Build full conversation history | |
messages = [{"role": "system", "content": system}] | |
for i in range(len(opponent_messages)): | |
messages.append({"role": "user", "content": opponent_messages[i]}) | |
if i < len(own_messages) - 1: # Add own previous response if exists | |
messages.append({"role": "assistant", "content": own_messages[i+1]}) | |
prompt = "<|begin_of_text|>" | |
for msg in messages: | |
prompt += f"<|start_header_id|>{msg['role']}<|end_header_id|>\n\n{msg['content']}<|eot_id|>" | |
prompt += "<|start_header_id|>assistant<|end_header_id|>\n\n" | |
stream = client.text_generation( | |
prompt, | |
model=model, | |
stream=True, | |
max_new_tokens=300, | |
temperature=0.7, | |
repetition_penalty=1.2, | |
stop_sequences=["<|eot_id|>", "<|end_of_text|>"] | |
) | |
response = "" | |
for token in stream: | |
clean_token = token.replace("<|eot_id|>", "").replace("<|end_of_text|>", "") | |
response += clean_token | |
yield response, [] | |
response = response.split("<|eot_id|>")[0].strip() | |
yield response, [] | |
def judge_gpt_stream(context): | |
judge_prompt = [ | |
{'role': 'system', 'content': judge_system}, | |
{'role': 'user', 'content': f"**Debate Transcript:**\n{context}"} | |
] | |
full_verdict = "" | |
buffer = "" | |
stream = openai.chat.completions.create( | |
model='deepseek-reasoner', | |
messages=judge_prompt, | |
temperature=2, | |
stream=True | |
) | |
for chunk in stream: | |
if chunk.choices[0].delta.content: | |
part = chunk.choices[0].delta.content | |
buffer += part | |
# Process complete lines | |
while '\n' in buffer: | |
line_end = buffer.index('\n') + 1 | |
line = buffer[:line_end] | |
buffer = buffer[line_end:] | |
# Convert headers to proper markdown | |
line = line.replace("# π―", "### π―").replace("# π₯", "### π₯").replace("# π", "### π") | |
# Ensure proper line spacing | |
line = re.sub(r'(### \S+)(\S)', r'\1 \2', line) # Add space after emoji | |
full_verdict += line | |
yield full_verdict | |
# Process remaining buffer | |
if buffer: | |
buffer = buffer.replace("# π―", "### π―").replace("# π₯", "### π₯").replace("# π", "### π") | |
full_verdict += buffer | |
yield full_verdict | |
judge_system = """**You are a snarky debate judge. Format your verdict EXACTLY like this:** | |
```markdown | |
### π― Key Strengths | |
- [Bot 1/2]: Concise bullet points | |
- [Bot 1/2]: Another strength | |
### π₯ Critical Weaknesses | |
- [Bot 1/2]: Main flaws here | |
- [Bot 1/2]: Additional weaknesses | |
### π Final Decision | |
**Winner:** [Bot 1/Bot 2] | |
**Why:** 1-2 snarky sentences with emojis | |
Follow these RULES: | |
1. Use exactly three '#' for headers | |
2. Add empty line after headers | |
3. Space between emojis and text | |
4. No markdown in mid-sentence | |
```""" | |
def stream_debate(opinion1, opinion2, num_rounds): | |
system_prompt_1 = f"""You are snarky and argumentative. Keep responses short and under 3 sentences. | |
Support {opinion1} and oppose {opinion2}. Use logical arguments. No civility. Argue in the style of online arguments""" | |
system_prompt_2 = f"""You are snarky and argumentative. Keep responses short and under 3 sentences. | |
Support {opinion2} and oppose {opinion1}. Use logical arguments. No civility. Argue in the style of online arguments""" | |
bot1_history = [opinion1] | |
bot2_history = [opinion2] | |
display_history = [] | |
full_context = f"### Bot 1 Opinion\n{opinion1}\n\n### Bot 2 Opinion\n{opinion2}" | |
display_history.append((opinion1, opinion2)) | |
yield display_history.copy(), "# π§βοΈ Judgment Pending..." | |
for _ in range(int(num_rounds)): | |
# Bot 1's turn | |
bot1_response = "" | |
for chunk, _ in call_bots(1, system_prompt_1, opinion1, bot1_history, bot2_history): | |
bot1_response = chunk | |
yield display_history + [(bot1_response, None)], "" | |
bot1_history.append(bot1_response) | |
full_context += f"\n\n**Bot 1:** {bot1_response}" | |
# Bot 2's turn | |
bot2_response = "" | |
for chunk, _ in call_bots(2, system_prompt_2, opinion2, bot1_history, bot2_history): | |
bot2_response = chunk | |
yield display_history + [(bot1_response, bot2_response)], "" | |
bot2_history.append(bot2_response) | |
full_context += f"\n\n**Bot 2:** {bot2_response}" | |
display_history.append((bot1_response, bot2_response)) | |
yield display_history, "## π§βοΈ Judge is Deliberating...\n\nβ³ Analyzing arguments..." | |
verdict_md = "" | |
for partial in judge_gpt_stream(full_context): | |
verdict_md = partial | |
yield display_history, f"## π§βοΈ Live Judgment\n\n{verdict_md}" | |
verdict_md = re.sub(r'(#+)\s*(?=#)', '', verdict_md) | |
yield display_history, f"## π§βοΈ Final Judgment\n\n{verdict_md.strip()}" | |
with gr.Blocks(title="π€βοΈ Debate Arena", css=""" | |
/* Mobile-first styles */ | |
@media (max-width: 768px) { | |
.mobile-stack { | |
flex-direction: column !important; | |
gap: 8px !important; | |
} | |
.mobile-full { | |
width: 100% !important; | |
min-width: unset !important; | |
} | |
.mobile-pad { | |
padding: 8px !important; | |
} | |
.judge-box { | |
max-height: 300px !important; | |
font-size: 14px !important; | |
} | |
.chatbot { | |
min-height: 300px !important; | |
max-height: 50vh !important; | |
} | |
h1 { | |
font-size: 24px !important; | |
text-align: center; | |
} | |
button { | |
width: 100% !important; | |
} | |
} | |
.judge-box { | |
border: 2px solid #4CAF50 !important; | |
border-radius: 10px !important; | |
padding: 15px !important; | |
max-height: 400px !important; | |
overflow-y: auto !important; | |
} | |
/* ... (keep other existing CSS rules) */ | |
""") as demo: | |
gr.Markdown("# π€βοΈπ€ AI Debate Arena\nInput any opinion for Bot 1 and Bot 2 and select the number of rounds. After the argument is finished, a third Bot will pick the winner. (May have to scroll down to view on mobile devices.)", elem_classes=["mobile-pad"]) | |
with gr.Row(elem_classes=["mobile-stack"]): | |
with gr.Column(scale=2, elem_classes=["mobile-full"]): | |
bot1_input = gr.Textbox(label="Bot 1 Opinion", value=opinion_1, elem_classes=["mobile-full"]) | |
bot2_input = gr.Textbox(label="Bot 2 Opinion", value=opinion_2, elem_classes=["mobile-full"]) | |
with gr.Column(scale=1, elem_classes=["mobile-full"]): | |
rounds = gr.Number(label="Number of Rounds", value=3, precision=0, minimum=1, maximum=500) | |
start_btn = gr.Button("Start Debate!", variant="primary", elem_classes=["mobile-full"]) | |
with gr.Row(elem_classes=["mobile-stack"]): | |
chatbot = gr.Chatbot(height=400, label="Live Debate", elem_classes=["mobile-full"]) | |
with gr.Row(elem_classes=["mobile-stack"]): | |
verdict_box = gr.Markdown( | |
"## π§βοΈ Debate Judge\n\n*Awaiting debate conclusion...*", | |
elem_classes=["judge-box", "mobile-full"], | |
label="Judge's Verdict" | |
) | |
start_btn.click( | |
lambda: ([], "## π§βοΈ Debate Judge\n\n*Starting new debate...*"), | |
outputs=[chatbot, verdict_box] | |
).then( | |
stream_debate, | |
inputs=[bot1_input, bot2_input, rounds], | |
outputs=[chatbot, verdict_box] | |
) | |
demo.queue().launch() |