Spaces:
Sleeping
Sleeping
Gumelar Teja Sukma
commited on
Commit
·
b31e33d
1
Parent(s):
a72d296
bug fix
Browse files
app.py
CHANGED
@@ -19,7 +19,7 @@ model_name_or_path = "meta-llama/Llama-2-7b-chat-hf"
|
|
19 |
# model_name_or_path = "TheBloke/Mistral-7B-v0.1-GPTQ"
|
20 |
# model_name_or_path = "unsloth/DeepSeek-R1-0528-GGUF" # 3x lebih cepat dari Mistral-7B
|
21 |
# tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
|
22 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
23 |
|
24 |
model = AutoGPTQForCausalLM.from_quantized(
|
25 |
model_name_or_path,
|
|
|
19 |
# model_name_or_path = "TheBloke/Mistral-7B-v0.1-GPTQ"
|
20 |
# model_name_or_path = "unsloth/DeepSeek-R1-0528-GGUF" # 3x lebih cepat dari Mistral-7B
|
21 |
# tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
|
23 |
|
24 |
model = AutoGPTQForCausalLM.from_quantized(
|
25 |
model_name_or_path,
|