from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline import gradio as gr model_name = "google/flan-t5-base" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer) # New, more specific instructions personas = { "🟢 Optimist": "Give a detailed, optimistic argument with at least two clear benefits and an example.", "🔴 Pessimist": "Give a detailed, critical argument highlighting at least two risks or drawbacks and an example.", "🟡 Neutral": "Provide a balanced perspective. Start by listing pros, then cons, and conclude with a neutral summary." } def generate_debate(topic): results = [] for label, instruction in personas.items(): prompt = ( f"You are an experienced debater.\n" f"Debate Topic: \"{topic}\"\n" f"{instruction}\n" f"Write at least 3–4 sentences." ) response = pipe( prompt, max_new_tokens=180, # increased from 120 temperature=0.7 )[0]['generated_text'].strip() results.append(f"### {label}\n{response}") return "\n\n".join(results) demo = gr.Interface( fn=generate_debate, inputs=gr.Textbox(label="Debate Topic"), outputs=gr.Markdown(), title="🎙️ Multi-Agent Debate Simulator", description="Debates with Optimist, Pessimist & Neutral perspectives using FLAN-T5-Base." ) demo.launch()