Shuja007 commited on
Commit
82f5587
·
verified ·
1 Parent(s): 8ba42f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -26
app.py CHANGED
@@ -1,50 +1,77 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  import torch
4
  import gdown
5
  import os
6
 
7
  # Set the title of the Streamlit app
8
- st.title("Text Classification with Hugging Face Transformers")
9
 
10
  # Function to download the model from Google Drive
11
  def download_model_from_drive(file_id, dest_path):
12
  url = f'https://drive.google.com/uc?id={file_id}'
13
- gdown.download(url, dest_path, quiet=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  # Download the model files
16
  with st.spinner("Downloading model..."):
17
- download_model_from_drive('1-V2bEtPR9Y3iBXK9zOR-qM5y9hKiQUnF', 'model/model.safetensors')
18
- download_model_from_drive('1-T2etSP_k_3j5LzunWq8viKGQCQ5RMr_', 'model/config.json')
19
- download_model_from_drive('1-cRYNPWqlNNGRxeztympRRfVuy3hWuMY', 'model/tokenizer.json')
20
- download_model_from_drive('1-t9AhomeH7YIIpAqCGTok8wjvl0tml0F', 'model/vocab.json')
21
- download_model_from_drive('1-l77_KEdK7GBFjMX_6UXGE-ZTGDraaDm', 'model/merges.txt')
22
 
23
  # Load the model and tokenizer
24
  @st.cache(allow_output_mutation=True)
25
  def load_model_and_tokenizer():
26
- tokenizer = AutoTokenizer.from_pretrained('model')
27
- # For Safetensors, you might need a custom loading mechanism
28
- model = AutoModelForSequenceClassification.from_pretrained('model', use_safetensors=True) # Adjust if necessary
29
- return tokenizer, model
 
 
 
30
 
31
  tokenizer, model = load_model_and_tokenizer()
32
 
33
  # Input text from user
34
- input_text = st.text_area("Enter the text to classify:")
35
 
36
- if st.button("Classify"):
37
  if input_text:
38
- # Tokenize the input text
39
- inputs = tokenizer(input_text, return_tensors="pt")
40
-
41
- # Perform classification
42
- with torch.no_grad():
43
- outputs = model(**inputs)
44
-
45
- # Get the predicted class
46
- predicted_class = torch.argmax(outputs.logits, dim=1).item()
47
-
48
- st.write(f"Predicted Class: {predicted_class}")
 
 
 
 
 
 
49
  else:
50
- st.write("Please enter some text to classify.")
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import torch
4
  import gdown
5
  import os
6
 
7
  # Set the title of the Streamlit app
8
+ st.title("Text Summarization with Fine-Tuned BART")
9
 
10
  # Function to download the model from Google Drive
11
  def download_model_from_drive(file_id, dest_path):
12
  url = f'https://drive.google.com/uc?id={file_id}'
13
+ try:
14
+ gdown.download(url, dest_path, quiet=False)
15
+ st.success(f"Downloaded {dest_path}")
16
+ except Exception as e:
17
+ st.error(f"Error downloading {dest_path}: {e}")
18
+
19
+ # Ensure the model directory exists
20
+ model_dir = 'model'
21
+ if not os.path.exists(model_dir):
22
+ os.makedirs(model_dir)
23
+
24
+ # File IDs for your model components
25
+ file_ids = {
26
+ 'model': '1-V2bEtPR9Y3iBXK9zOR-qM5y9hKiQUnF',
27
+ 'config': '1-T2etSP_k_3j5LzunWq8viKGQCQ5RMr_',
28
+ 'tokenizer': '1-cRYNPWqlNNGRxeztympRRfVuy3hWuMY',
29
+ 'vocab': '1-t9AhomeH7YIIpAqCGTok8wjvl0tml0F',
30
+ 'merges': '1-l77_KEdK7GBFjMX_6UXGE-ZTGDraaDm'
31
+ }
32
 
33
  # Download the model files
34
  with st.spinner("Downloading model..."):
35
+ download_model_from_drive(file_ids['model'], os.path.join(model_dir, 'pytorch_model.bin'))
36
+ download_model_from_drive(file_ids['config'], os.path.join(model_dir, 'config.json'))
37
+ download_model_from_drive(file_ids['tokenizer'], os.path.join(model_dir, 'tokenizer.json'))
38
+ download_model_from_drive(file_ids['vocab'], os.path.join(model_dir, 'vocab.json'))
39
+ download_model_from_drive(file_ids['merges'], os.path.join(model_dir, 'merges.txt'))
40
 
41
  # Load the model and tokenizer
42
  @st.cache(allow_output_mutation=True)
43
  def load_model_and_tokenizer():
44
+ try:
45
+ tokenizer = AutoTokenizer.from_pretrained(model_dir)
46
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_dir)
47
+ return tokenizer, model
48
+ except Exception as e:
49
+ st.error(f"Error loading model or tokenizer: {e}")
50
+ return None, None
51
 
52
  tokenizer, model = load_model_and_tokenizer()
53
 
54
  # Input text from user
55
+ input_text = st.text_area("Enter the text to summarize:")
56
 
57
+ if st.button("Summarize"):
58
  if input_text:
59
+ if tokenizer and model:
60
+ try:
61
+ # Tokenize the input text
62
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True)
63
+
64
+ # Perform summarization
65
+ with torch.no_grad():
66
+ summary_ids = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True)
67
+
68
+ # Decode the summary
69
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
70
+
71
+ st.write(f"Summary: {summary}")
72
+ except Exception as e:
73
+ st.error(f"Error during summarization: {e}")
74
+ else:
75
+ st.error("Model or tokenizer not loaded.")
76
  else:
77
+ st.write("Please enter some text to summarize.")