hanghangaidoudou commited on
Commit
775b7e5
·
1 Parent(s): 43f5ccd

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -2
README.md CHANGED
@@ -14,14 +14,18 @@ import platform
14
  import torch
15
  from transformers import AutoTokenizer, AutoModel
16
 
17
-
18
- model_path = "cntd/CNTDAI-6B"
 
19
  print("是否可用:", torch.cuda.is_available()) # 查看GPU是否可用
20
  print("GPU数量:", torch.cuda.device_count()) # 查看GPU数量
21
  print("torch方法查看CUDA版本:", torch.version.cuda) # torch方法查看CUDA版本
22
  print("GPU索引号:", torch.cuda.current_device()) # 查看GPU索引号
23
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
24
  model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda()
 
 
 
25
  model = model.eval()
26
  os_name = platform.system()
27
  clear_command = 'cls' if os_name == 'Windows' else 'clear'
@@ -68,5 +72,7 @@ def main():
68
  if __name__ == "__main__":
69
  main()
70
 
 
 
71
  ```
72
 
 
14
  import torch
15
  from transformers import AutoTokenizer, AutoModel
16
 
17
+ #current_dir = os.path.dirname(os.path.abspath(__file__))
18
+ #model_path = os.path.join(current_dir, 'cntd','CNTDAI-6B')
19
+ model_path = "cntd/CNTDAI-6B"
20
  print("是否可用:", torch.cuda.is_available()) # 查看GPU是否可用
21
  print("GPU数量:", torch.cuda.device_count()) # 查看GPU数量
22
  print("torch方法查看CUDA版本:", torch.version.cuda) # torch方法查看CUDA版本
23
  print("GPU索引号:", torch.cuda.current_device()) # 查看GPU索引号
24
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
25
  model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda()
26
+ # 多显卡支持,使用下面两行代替上面一行,将num_gpus改为你实际的显卡数量
27
+ # from utils import load_model_on_gpus
28
+ # model = load_model_on_gpus(model_path, num_gpus=2)
29
  model = model.eval()
30
  os_name = platform.system()
31
  clear_command = 'cls' if os_name == 'Windows' else 'clear'
 
72
  if __name__ == "__main__":
73
  main()
74
 
75
+
76
+
77
  ```
78