Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,13 +2,19 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
from peft import PeftModel
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
# Lade Tokenizer und Basismodell
|
7 |
base_model_name = "togethercomputer/Mistral-7B-Instruct-v0.2"
|
8 |
lora_model_name = "TooKeen/neo-blockchain-assistant"
|
9 |
|
10 |
-
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
11 |
-
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, device_map="auto")
|
12 |
model = PeftModel.from_pretrained(base_model, lora_model_name)
|
13 |
|
14 |
# Definiere die Vorhersagefunktion
|
|
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
from peft import PeftModel
|
5 |
+
import os
|
6 |
+
|
7 |
+
# Hole den API-Token aus den Umgebungsvariablen
|
8 |
+
hf_token = os.getenv("HF_TOKEN")
|
9 |
+
if hf_token is None:
|
10 |
+
raise ValueError("HF_TOKEN is not set. Please check your secrets.")
|
11 |
|
12 |
# Lade Tokenizer und Basismodell
|
13 |
base_model_name = "togethercomputer/Mistral-7B-Instruct-v0.2"
|
14 |
lora_model_name = "TooKeen/neo-blockchain-assistant"
|
15 |
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_name, use_auth_token=hf_token)
|
17 |
+
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, use_auth_token=hf_token, device_map="auto")
|
18 |
model = PeftModel.from_pretrained(base_model, lora_model_name)
|
19 |
|
20 |
# Definiere die Vorhersagefunktion
|