File size: 1,405 Bytes
6e55b8d
 
80d5c42
712901d
80d5c42
 
712901d
80d5c42
 
 
 
 
 
 
712901d
80d5c42
 
712901d
80d5c42
712901d
80d5c42
 
 
 
 
 
 
 
 
 
 
 
712901d
85de71b
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import os

# Set the path to your local model directory
model_path = "./bart_samsum"

# Check if the model path exists
if not os.path.exists(model_path):
    st.error(f"The path {model_path} does not exist. Please check the path.")
else:
    # Load the tokenizer and model from the local directory
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModelForSeq2SeqLM.from_pretrained(model_path)

    # Streamlit app UI
    st.title("BART Summarization Model")

    input_text = st.text_area("Input Text", "Enter text here...")

    if st.button("Generate Summary"):
        if not input_text.strip():
            st.warning("Please enter some text to summarize.")
        else:
            # Tokenize and generate summary
            inputs = tokenizer(input_text, return_tensors="pt")
            summary_ids = model.generate(inputs["input_ids"], max_length=150, num_beams=4, early_stopping=True)
            summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
            
            # Display the summary
            st.subheader("Generated Summary")
            st.write(summary)

# Optionally, you can add a section to display model information or statistics
st.sidebar.title("Model Information")
st.sidebar.write("This app uses a fine-tuned BART model for summarization.")