3v324v23 commited on
Commit
54fe972
·
1 Parent(s): a74d750

added a variable to set device map, placed model kws in another variable and passed as metadata

Browse files
Files changed (1) hide show
  1. run_test.py +7 -7
run_test.py CHANGED
@@ -5,6 +5,7 @@ STORE_WEIGHTS = False
5
  FAKE_H100 = False
6
  TORCH_DTYPE = 'float64'
7
  USE_GPU = False
 
8
  model_id, revision = sys.argv[1:]
9
  user, model = model_id.split('/')
10
  prompt = 'Once upon a time,'
@@ -38,9 +39,10 @@ max_memory = accelerate.utils.get_max_memory()
38
  if not USE_GPU:
39
  max_memory = {'cpu': max_memory['cpu']}
40
  max_memory['cpu'] //= 3
41
- model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True,
42
  torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
43
- device_map='auto', max_memory=max_memory, offload_state_dict=True, offload_buffers=True)
 
44
 
45
  if config.model_type == 'deepseek_v3':
46
  model._supports_cache_class = False
@@ -50,11 +52,9 @@ pipe = transformers.pipeline('text-generation', model=model, config=config, toke
50
  SafeTensors = WritingSafeTensors(
51
  fn,
52
  prompt = prompt,
53
- model_id = model_id,
54
- revision = revision,
55
- weights = STORE_WEIGHTS,
56
- torch_dtype = TORCH_DTYPE,
57
- gpu = USE_GPU,
58
  **{
59
  f'{hw}:{idx}':
60
  str(getattr(torch, hw).get_device_properties(idx))
 
5
  FAKE_H100 = False
6
  TORCH_DTYPE = 'float64'
7
  USE_GPU = False
8
+ DEVICE_MAP = 'auto'
9
  model_id, revision = sys.argv[1:]
10
  user, model = model_id.split('/')
11
  prompt = 'Once upon a time,'
 
39
  if not USE_GPU:
40
  max_memory = {'cpu': max_memory['cpu']}
41
  max_memory['cpu'] //= 3
42
+ model_kwparams = dict(pretrained_model_name_or_path=model_id, revision=revision, trust_remote_code=True,
43
  torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
44
+ device_map=DEVICE_MAP, max_memory=max_memory, offload_state_dict=True, offload_buffers=True)
45
+ model = Model.from_pretrained(**model_kwparams)
46
 
47
  if config.model_type == 'deepseek_v3':
48
  model._supports_cache_class = False
 
52
  SafeTensors = WritingSafeTensors(
53
  fn,
54
  prompt = prompt,
55
+ store_weights = STORE_WEIGHTS,
56
+ use_gpu = USE_GPU,
57
+ **model_kwparams,
 
 
58
  **{
59
  f'{hw}:{idx}':
60
  str(getattr(torch, hw).get_device_properties(idx))