zaafirriaz commited on
Commit
94304ce
·
verified ·
1 Parent(s): 88d7414

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -20
app.py CHANGED
@@ -1,27 +1,8 @@
1
- import os
2
- import gdown
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
4
  import gradio as gr
5
 
6
- # Define Google Drive file IDs and corresponding file names
7
- files = {
8
- 'vocab.json': '1b0lO8qDUE6N36yXZzokpCTCG-13GO8Nx',
9
- 'merges.txt': '1R2W6k7iHlkzoxLmd1pMQZ9Y7phHjaLFk',
10
- 'config.json': '1U5PzBzLyWhs5Z1smW5w7P-PsJZJ_JB1c',
11
- 'generation_config.json': '1GqkR8f_TMCN8Qls4YmK5E7YtP8V9IW27',
12
- 'special_tokens_map.json': '1R4MkInjvH_u6IrW_RR1vc_R5nUQV-J5S',
13
- 'tokenizer_config.json': '1l4OGiYwJ4GR9RMIvMX8f8xSkbMC_76v7',
14
- 'training_args.bin': '1YPDBwO6Q1cszH_K1bVwXkZn7BtI5G2Ex'
15
- }
16
-
17
- # Download files from Google Drive
18
- for file_name, file_id in files.items():
19
- url = f'https://drive.google.com/uc?id={file_id}'
20
- output = file_name
21
- gdown.download(url, output, quiet=False)
22
-
23
  # Load the model and tokenizer
24
- model_path = '.' # Path to the current directory where files are downloaded
25
 
26
  tokenizer = AutoTokenizer.from_pretrained(model_path)
27
  model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
 
 
 
1
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
2
  import gradio as gr
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  # Load the model and tokenizer
5
+ model_path = '.' # Path to the current directory where files are located
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_path)