Shreshth16's picture
Rename appy.py to app.py
a7f3262 verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
# Load the model and tokenizer from Hugging Face Hub
model_name = "Shreshth16/My_PEGASUS_Model" # Replace with your model's repo name
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
def summarize(text):
# Prepend the task prefix if required during training
input_text = text
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True).to(device)
summary_ids = model.generate(inputs, max_length=150, num_beams=4, early_stopping=True)
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
return summary
# Define Gradio interface
iface = gr.Interface(
fn=summarize,
inputs=gr.Textbox(lines=10, placeholder="Enter text to summarize..."),
outputs=gr.Textbox(),
title="PEGASUS Summarization",
description="Enter text to generate a summary using a trained PEGASUS model."
)
iface.launch()