John Smith
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import torch
|
4 |
|
5 |
# Load the model and tokenizer
|
6 |
model_name = "cognitivecomputations/TinyDolphin-2.8-1.1b"
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
# Move model to GPU if available
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
model.to(device)
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer
|
3 |
import torch
|
4 |
|
5 |
# Load the model and tokenizer
|
6 |
model_name = "cognitivecomputations/TinyDolphin-2.8-1.1b"
|
|
|
7 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
9 |
+
# Try to load the tokenizer, with a fallback option
|
10 |
+
try:
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
12 |
+
except ValueError:
|
13 |
+
print("Failed to load AutoTokenizer. Falling back to LlamaTokenizer.")
|
14 |
+
tokenizer = LlamaTokenizer.from_pretrained(model_name)
|
15 |
+
|
16 |
# Move model to GPU if available
|
17 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
18 |
model.to(device)
|