Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
import os | |
# Set the path to your local model directory | |
model_path = "./bart_samsum" | |
# Check if the model path exists | |
if not os.path.exists(model_path): | |
st.error(f"The path {model_path} does not exist. Please check the path.") | |
else: | |
# Load the tokenizer and model from the local directory | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_path) | |
# Streamlit app UI | |
st.title("BART Summarization Model") | |
input_text = st.text_area("Input Text", "Enter text here...") | |
if st.button("Generate Summary"): | |
if not input_text.strip(): | |
st.warning("Please enter some text to summarize.") | |
else: | |
# Tokenize and generate summary | |
inputs = tokenizer(input_text, return_tensors="pt") | |
summary_ids = model.generate(inputs["input_ids"], max_length=150, num_beams=4, early_stopping=True) | |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
# Display the summary | |
st.subheader("Generated Summary") | |
st.write(summary) | |
# Optionally, you can add a section to display model information or statistics | |
st.sidebar.title("Model Information") | |
st.sidebar.write("This app uses a fine-tuned BART model for summarization.") | |