3v324v23 commited on
Commit
7774d63
·
1 Parent(s): 23bf651

specify low_cpu_mem_usage=True to halve loading requirements

Browse files
Files changed (1) hide show
  1. run_test.py +2 -2
run_test.py CHANGED
@@ -17,7 +17,7 @@ os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
17
  torch.manual_seed(0)
18
  random.seed(0)
19
  np.random.seed(0)
20
- import accelerate, safetensors.torch, transformers, torch, tqdm
21
 
22
  config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
23
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
@@ -25,7 +25,7 @@ Model = transformers.AutoModelForCausalLM
25
  if config.model_type == 'deepseek_v3':
26
  #Model = transformers.DeepseekV3ForCausalLM
27
  pass
28
- model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True, torch_dtype=torch.float64, use_safetensors=True)
29
  if config.model_type == 'deepseek_v3':
30
  model._supports_cache_class = False
31
  #model = accelerate.cpu_offload(model, 'cuda:0', offload_buffers=True)
 
17
  torch.manual_seed(0)
18
  random.seed(0)
19
  np.random.seed(0)
20
+ import accelerate, safetensors.torch, transformers, tqdm
21
 
22
  config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
23
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
 
25
  if config.model_type == 'deepseek_v3':
26
  #Model = transformers.DeepseekV3ForCausalLM
27
  pass
28
+ model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True, torch_dtype=torch.float64, use_safetensors=True, low_cpu_mem_usage=True)
29
  if config.model_type == 'deepseek_v3':
30
  model._supports_cache_class = False
31
  #model = accelerate.cpu_offload(model, 'cuda:0', offload_buffers=True)