Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,17 @@ from peft import PeftModel, PeftConfig
|
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
import torch
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
# Load the model and config when the script starts
|
7 |
-
peft_model_id =
|
|
|
8 |
config = PeftConfig.from_pretrained(peft_model_id)
|
9 |
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True)
|
10 |
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
|
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
import torch
|
5 |
|
6 |
+
|
7 |
+
if torch.cuda.is_available():
|
8 |
+
device = torch.device("cuda")
|
9 |
+
print("GPU is available!")
|
10 |
+
else:
|
11 |
+
device = torch.device("cpu")
|
12 |
+
print("GPU is not available, using CPU.")
|
13 |
+
|
14 |
# Load the model and config when the script starts
|
15 |
+
peft_model_id =
|
16 |
+
"phearion/bigbrain-v0.0.1"
|
17 |
config = PeftConfig.from_pretrained(peft_model_id)
|
18 |
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True)
|
19 |
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|