Shuja007 commited on
Commit
85de71b
·
verified ·
1 Parent(s): 712901d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -28
app.py CHANGED
@@ -1,36 +1,31 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
- import os
4
 
5
- # Set the path to your Google Drive folder
6
- model_path = '/content/drive/My Drive/bart_samsum'
7
 
8
- # Check if the model path exists
9
- if not os.path.exists(model_path):
10
- st.error(f"The path {model_path} does not exist. Please check the path.")
11
- else:
12
- # Load the tokenizer and model from Google Drive
13
- tokenizer = AutoTokenizer.from_pretrained(model_path)
14
- model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
15
 
16
- # Streamlit app UI
17
- st.title("BART Summarization Model")
18
 
19
- input_text = st.text_area("Input Text", "Enter text here...")
20
 
21
- if st.button("Generate Summary"):
22
- if not input_text.strip():
23
- st.warning("Please enter some text to summarize.")
24
- else:
25
- # Tokenize and generate summary
26
- inputs = tokenizer(input_text, return_tensors="pt")
27
- summary_ids = model.generate(inputs["input_ids"], max_length=150, num_beams=4, early_stopping=True)
28
- summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
29
-
30
- # Display the summary
31
- st.subheader("Generated Summary")
32
- st.write(summary)
33
 
34
- # Optionally, you can add a section to display model information or statistics
35
- st.sidebar.title("Model Information")
36
- st.sidebar.write("This app uses a fine-tuned BART model for summarization.")
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
 
3
 
4
+ # Replace with your model name on Hugging Face Hub
5
+ model_name = "your-username/your-model-name"
6
 
7
+ # Load the tokenizer and model from Hugging Face Hub
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
 
 
 
 
10
 
11
+ # Streamlit app UI
12
+ st.title("BART Summarization Model")
13
 
14
+ input_text = st.text_area("Input Text", "Enter text here...")
15
 
16
+ if st.button("Generate Summary"):
17
+ if not input_text.strip():
18
+ st.warning("Please enter some text to summarize.")
19
+ else:
20
+ # Tokenize and generate summary
21
+ inputs = tokenizer(input_text, return_tensors="pt")
22
+ summary_ids = model.generate(inputs["input_ids"], max_length=150, num_beams=4, early_stopping=True)
23
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
24
+
25
+ # Display the summary
26
+ st.subheader("Generated Summary")
27
+ st.write(summary)
28
 
29
+ # Optionally, you can add a section to display model information or statistics
30
+ st.sidebar.title("Model Information")
31
+ st.sidebar.write("This app uses a fine-tuned BART model for summarization.")