3v324v23 commited on
Commit
9461170
·
1 Parent(s): f4dd190

notes from deepseek attempt

Browse files
Files changed (1) hide show
  1. run_test.py +6 -2
run_test.py CHANGED
@@ -5,9 +5,13 @@ STORE_WEIGHTS = False
5
  DEDUPLICATE_SAFETENSORS = True
6
  SAVE_ON_CRASH = False
7
  FAKE_H100 = False
 
 
 
 
8
  TORCH_DTYPE = 'float64'
9
  USE_GPU = False
10
- DEVICE_MAP = 'auto' # 'cuda' for deepseek
11
  model_id, revision = sys.argv[1:]
12
  user, model = model_id.split('/')
13
  prompt = 'Once upon a time,'
@@ -64,7 +68,7 @@ if config.model_type == 'deepseek_v3':
64
  max_memory = accelerate.utils.get_max_memory()
65
  if not USE_GPU:
66
  max_memory = {'cpu': max_memory['cpu']}
67
- max_memory['cpu'] //= 3
68
  model_kwparams = dict(pretrained_model_name_or_path=model_id, revision=revision, trust_remote_code=True,
69
  torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
70
  device_map=DEVICE_MAP, max_memory=max_memory, offload_state_dict=True, offload_buffers=True)
 
5
  DEDUPLICATE_SAFETENSORS = True
6
  SAVE_ON_CRASH = False
7
  FAKE_H100 = False
8
+ # notes transformers 4.49.0:
9
+ # - a model with unexpected weights must not have 'disk' anywhere in device_map
10
+ # - a model with dtypes unsupported by numpy must have offload_state_dict = False
11
+ # - dtype=float64 can overflow, no dtype specified at transformers/modeling_attn_mask_utils.py(158)_make_causal_mask() defaulted to float32
12
  TORCH_DTYPE = 'float64'
13
  USE_GPU = False
14
+ DEVICE_MAP = 'auto'
15
  model_id, revision = sys.argv[1:]
16
  user, model = model_id.split('/')
17
  prompt = 'Once upon a time,'
 
68
  max_memory = accelerate.utils.get_max_memory()
69
  if not USE_GPU:
70
  max_memory = {'cpu': max_memory['cpu']}
71
+ #max_memory['cpu'] //= 3
72
  model_kwparams = dict(pretrained_model_name_or_path=model_id, revision=revision, trust_remote_code=True,
73
  torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
74
  device_map=DEVICE_MAP, max_memory=max_memory, offload_state_dict=True, offload_buffers=True)