|
|
import gradio as gr |
|
|
from transformers import pipeline |
|
|
|
|
|
|
|
|
model_1 = pipeline("text-generation", model="gpt2") |
|
|
model_2 = pipeline("text-generation", model="tiiuae/falcon-rw-1b", trust_remote_code=True) |
|
|
model_3 = pipeline("text2text-generation", model="google/flan-t5-small") |
|
|
|
|
|
|
|
|
def compare_outputs(prompt): |
|
|
out1 = model_1(prompt, max_length=50, do_sample=True, temperature=0.7)[0]["generated_text"] |
|
|
out2 = model_2(prompt, max_length=50, do_sample=True, temperature=0.7)[0]["generated_text"] |
|
|
out3 = model_3(prompt, max_length=50)[0]["generated_text"] |
|
|
return out1.strip(), out2.strip(), out3.strip() |
|
|
|
|
|
|
|
|
gr.Interface( |
|
|
fn=compare_outputs, |
|
|
inputs=gr.Textbox(lines=4, label="Your Prompt"), |
|
|
outputs=[ |
|
|
gr.Textbox(label="GPT-2 Output"), |
|
|
gr.Textbox(label="Falcon-RW-1B Output"), |
|
|
gr.Textbox(label="FLAN-T5 Small Output"), |
|
|
], |
|
|
title="🧪 LLM Prompt Behavior Explorer", |
|
|
description="Compare how small, open-source language models respond to the same prompt." |
|
|
).launch() |
|
|
|
|
|
|