Spaces:
Runtime error
Runtime error
Gumelar Teja Sukma
commited on
Commit
Β·
a72d296
1
Parent(s):
9bf3993
bug fix
Browse files
app.py
CHANGED
@@ -8,13 +8,14 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false" # Hindari beban CPU
|
|
8 |
torch.set_num_threads(2) # Sesuai limit Spaces gratis
|
9 |
|
10 |
# Load model & tokenizer
|
11 |
-
# model_name_or_path = "TheBloke/Llama-2-7B-Chat-GPTQ"
|
12 |
print("PyTorch Version",torch.__version__) # Versi PyTorch
|
13 |
print("Is GPU Available",torch.cuda.is_available()) # Apakah GPU terdeteksi?
|
14 |
print("CPU cores:", psutil.cpu_count())
|
15 |
print("RAM (GB):", psutil.virtual_memory().total / (1024**3))
|
16 |
|
17 |
-
model_name_or_path = "TheBloke/Llama-2-7B-Chat-
|
|
|
|
|
18 |
# model_name_or_path = "TheBloke/Mistral-7B-v0.1-GPTQ"
|
19 |
# model_name_or_path = "unsloth/DeepSeek-R1-0528-GGUF" # 3x lebih cepat dari Mistral-7B
|
20 |
# tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
|
|
|
8 |
torch.set_num_threads(2) # Sesuai limit Spaces gratis
|
9 |
|
10 |
# Load model & tokenizer
|
|
|
11 |
print("PyTorch Version",torch.__version__) # Versi PyTorch
|
12 |
print("Is GPU Available",torch.cuda.is_available()) # Apakah GPU terdeteksi?
|
13 |
print("CPU cores:", psutil.cpu_count())
|
14 |
print("RAM (GB):", psutil.virtual_memory().total / (1024**3))
|
15 |
|
16 |
+
# model_name_or_path = "TheBloke/Llama-2-7B-Chat-GPTQ"
|
17 |
+
model_name_or_path = "meta-llama/Llama-2-7b-chat-hf"
|
18 |
+
# model_name_or_path = "TheBloke/Llama-2-7B-Chat-GGUF"
|
19 |
# model_name_or_path = "TheBloke/Mistral-7B-v0.1-GPTQ"
|
20 |
# model_name_or_path = "unsloth/DeepSeek-R1-0528-GGUF" # 3x lebih cepat dari Mistral-7B
|
21 |
# tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
|