import streamlit as st from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Replace with your model name on Hugging Face Hub model_name = "your-username/your-model-name" # Load the tokenizer and model from Hugging Face Hub tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) # Streamlit app UI st.title("BART Summarization Model") input_text = st.text_area("Input Text", "Enter text here...") if st.button("Generate Summary"): if not input_text.strip(): st.warning("Please enter some text to summarize.") else: # Tokenize and generate summary inputs = tokenizer(input_text, return_tensors="pt") summary_ids = model.generate(inputs["input_ids"], max_length=150, num_beams=4, early_stopping=True) summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) # Display the summary st.subheader("Generated Summary") st.write(summary) # Optionally, you can add a section to display model information or statistics st.sidebar.title("Model Information") st.sidebar.write("This app uses a fine-tuned BART model for summarization.")