Gumelar Teja Sukma commited on
Commit
4e121c2
·
1 Parent(s): b31e33d
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -13,8 +13,8 @@ print("Is GPU Available",torch.cuda.is_available()) # Apakah GPU terdeteksi?
13
  print("CPU cores:", psutil.cpu_count())
14
  print("RAM (GB):", psutil.virtual_memory().total / (1024**3))
15
 
16
- # model_name_or_path = "TheBloke/Llama-2-7B-Chat-GPTQ"
17
- model_name_or_path = "meta-llama/Llama-2-7b-chat-hf"
18
  # model_name_or_path = "TheBloke/Llama-2-7B-Chat-GGUF"
19
  # model_name_or_path = "TheBloke/Mistral-7B-v0.1-GPTQ"
20
  # model_name_or_path = "unsloth/DeepSeek-R1-0528-GGUF" # 3x lebih cepat dari Mistral-7B
 
13
  print("CPU cores:", psutil.cpu_count())
14
  print("RAM (GB):", psutil.virtual_memory().total / (1024**3))
15
 
16
+ model_name_or_path = "TheBloke/Llama-2-7B-Chat-GPTQ"
17
+ # model_name_or_path = "meta-llama/Llama-2-7b-chat-hf"
18
  # model_name_or_path = "TheBloke/Llama-2-7B-Chat-GGUF"
19
  # model_name_or_path = "TheBloke/Mistral-7B-v0.1-GPTQ"
20
  # model_name_or_path = "unsloth/DeepSeek-R1-0528-GGUF" # 3x lebih cepat dari Mistral-7B