Shuja007 commited on
Commit
712901d
·
verified ·
1 Parent(s): 2b194d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -29
app.py CHANGED
@@ -1,31 +1,36 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
- import torch
4
- import nltk
5
-
6
- # Download punkt for sentence tokenization
7
- nltk.download('punkt')
8
-
9
- # Load tokenizer and model from the Hugging Face Hub
10
- tokenizer = AutoTokenizer.from_pretrained("your-huggingface-username/your-model-repo-name")
11
- model = AutoModelForSeq2SeqLM.from_pretrained("your-huggingface-username/your-model-repo-name")
12
-
13
- st.title("Dialogue Summarization with BART")
14
-
15
- # Input dialogue
16
- dialogue = st.text_area("Enter dialogue:", height=200)
17
-
18
- if st.button("Summarize"):
19
- # Tokenize input
20
- inputs = tokenizer(dialogue, max_length=512, truncation=True, return_tensors="pt")
21
-
22
- # Generate summary
23
- summary_ids = model.generate(inputs["input_ids"], max_length=128, num_beams=4, early_stopping=True)
24
- summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
25
-
26
- # Display summary
27
- st.subheader("Summary:")
28
- st.write(summary)
29
-
30
- st.markdown("---")
31
- st.markdown("This app uses a fine-tuned BART model to summarize dialogues. The model was trained on the SAMSum dataset.")
 
 
 
 
 
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ import os
4
+
5
+ # Set the path to your Google Drive folder
6
+ model_path = '/content/drive/My Drive/bart_samsum'
7
+
8
+ # Check if the model path exists
9
+ if not os.path.exists(model_path):
10
+ st.error(f"The path {model_path} does not exist. Please check the path.")
11
+ else:
12
+ # Load the tokenizer and model from Google Drive
13
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
14
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
15
+
16
+ # Streamlit app UI
17
+ st.title("BART Summarization Model")
18
+
19
+ input_text = st.text_area("Input Text", "Enter text here...")
20
+
21
+ if st.button("Generate Summary"):
22
+ if not input_text.strip():
23
+ st.warning("Please enter some text to summarize.")
24
+ else:
25
+ # Tokenize and generate summary
26
+ inputs = tokenizer(input_text, return_tensors="pt")
27
+ summary_ids = model.generate(inputs["input_ids"], max_length=150, num_beams=4, early_stopping=True)
28
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
29
+
30
+ # Display the summary
31
+ st.subheader("Generated Summary")
32
+ st.write(summary)
33
+
34
+ # Optionally, you can add a section to display model information or statistics
35
+ st.sidebar.title("Model Information")
36
+ st.sidebar.write("This app uses a fine-tuned BART model for summarization.")