Your Name commited on
Commit
0b22441
·
1 Parent(s): e7df8ec

untested run_test.py code, committing to try on another system

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. _safetensors.py +70 -0
  3. compare_safetensors.py +6 -1
  4. run_test.py +42 -13
.gitattributes CHANGED
@@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  *.logits.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  *.logits.safetensors filter=lfs diff=lfs merge=lfs -text
61
+ *.logits-and-weights.safetensors filter=lfs diff=lfs merge=lfs -text
_safetensors.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ran into memory issues with safetensors. this code moves by them.
2
+ import contextlib, os
3
+
4
+ class WritingSafeTensors:
5
+ def __init__(self, filename, **metadata):
6
+ self.filename = filename
7
+ self.fd = os.open(self.filename, os.O_RDWR | os.O_CREAT)
8
+ self.size = 0
9
+ self.capacity = 0
10
+ self.mmapview = None
11
+ self.header = {'__metadata__': metadata}
12
+ def _reserve(self, length):
13
+ if self.size + length > self.capacity:
14
+ new_capacity = self.size * 2
15
+ if new_capacity < self.size + length:
16
+ new_capacity = (((self.size + length)*2 - 1) // mmap.PAGESIZE + 1) * mmap.PAGESIZE
17
+ os.truncate(self.fn, new_capacity)
18
+ self.mmapview = memoryview(mmap.mmap(self.fd, new_capacity))
19
+ self.capacity = new_capacity
20
+ def add(self, name, tensor):
21
+ print(name, '...')
22
+ length = tensor.numel() * tensor.dtype.itemsize
23
+ self._reserve(length)
24
+ torch.frombuffer(
25
+ self.mmapview[self.size:self.size+length],
26
+ dtype=tensor.dtype, count=tensor.numel,
27
+ ).view(tensor.shape)[:] = tensor
28
+ end = self.size + length
29
+ self.header[descr] = {
30
+ 'dtype':
31
+ str(tensor.dtype).rsplit('.',1)[-1]
32
+ .replace('float','F')
33
+ .replace('uint','U')
34
+ .replace('int','I')
35
+ .removesuffix('fn')
36
+ .upper(),
37
+ 'shape':
38
+ list(tensor.shape),
39
+ 'data_offsets':
40
+ [self.size, end],
41
+ }
42
+ self.size = end
43
+ def finalize(self):
44
+ print(self.filename, '...')
45
+ import pdb; pdb.set_trace()
46
+ header = json.dumps(self.header).encode()
47
+ insert = len(header) + 8
48
+ self._reserve(insert)
49
+ self.mmapview[insert:insert+self.size] = self.mmapview[:self.size]
50
+ self.size += insert
51
+ self[:8] = len(header).to_bytes(8, 'little')
52
+ self[8:insert] = header
53
+ del self.header
54
+ del self.mmapview
55
+ os.close(self.fd)
56
+ os.truncate(self.filename, self.size)
57
+ def delete(self):
58
+ print('deleting', self.filename, '...')
59
+ del self.header
60
+ del self.mmapview
61
+ os.close(self.fd)
62
+ os.unlink(self.filename)
63
+ def __enter__(self):
64
+ return self
65
+ def __exit__(self, Exc, exc, tb):
66
+ if Exc is None:
67
+ self.finalize()
68
+ else:
69
+ self.delete()
70
+
compare_safetensors.py CHANGED
@@ -11,6 +11,7 @@ def compare(*fns):
11
  print('dtypes ...')
12
  dtypes = {k: [files[0].get_slice(k)[0].dtype,files[1].get_slice(k)[0].dtype] for k in files[0].keys()}
13
  dtypes = {k: [min(dts, key=lambda dt: dt.itemsize),max(dts, key=lambda dt: dt.itemsize)] for k, dts in dtypes.items()}
 
14
  print('midpoints ...')
15
  avgs = {k:((files[0].get_tensor(k) + files[1].get_tensor(k))/2).to(dtypes[k][0]) for k in files[0].keys()}
16
  print('dists ...')
@@ -20,6 +21,7 @@ def compare(*fns):
20
  mismatching_keys = [k for k, d in dists.items() if (d!=0).any()]
21
 
22
  print(f'{len(mismatching_keys)/len(files[0].keys())*100:.2f}% keys mismatch')
 
23
 
24
  #errs = {k:(dists[k] / avgs[k]).nan_to_num() for k in files[0].keys()}
25
 
@@ -45,7 +47,10 @@ def compare(*fns):
45
  print('.. ', end='')
46
  for idx in range_:
47
  for token in range(head_tokens.values.shape[-2]):
48
- print(f'{head_tokens.indices[token,idx]}({head_tokens.values[token,idx]*100:.2f}%) ',end='')
 
 
 
49
  print()
50
 
51
 
 
11
  print('dtypes ...')
12
  dtypes = {k: [files[0].get_slice(k)[0].dtype,files[1].get_slice(k)[0].dtype] for k in files[0].keys()}
13
  dtypes = {k: [min(dts, key=lambda dt: dt.itemsize),max(dts, key=lambda dt: dt.itemsize)] for k, dts in dtypes.items()}
14
+ mismatching_dtypes = [k for k, dts in dtypes.items() if dts[0] is not dts[1]]
15
  print('midpoints ...')
16
  avgs = {k:((files[0].get_tensor(k) + files[1].get_tensor(k))/2).to(dtypes[k][0]) for k in files[0].keys()}
17
  print('dists ...')
 
21
  mismatching_keys = [k for k, d in dists.items() if (d!=0).any()]
22
 
23
  print(f'{len(mismatching_keys)/len(files[0].keys())*100:.2f}% keys mismatch')
24
+ print(f'{len(mismatching_dtypes)/len(files[0].keys())*100:.2f}% dtypes mismatch')
25
 
26
  #errs = {k:(dists[k] / avgs[k]).nan_to_num() for k in files[0].keys()}
27
 
 
47
  print('.. ', end='')
48
  for idx in range_:
49
  for token in range(head_tokens.values.shape[-2]):
50
+ if range_idx == 0:
51
+ print(f'{head_tokens.indices[token][idx]}({head_tokens.values[token][idx]*100:.3f}%) ',end='')
52
+ else: # the % format changes from f to e to show smaller values
53
+ print(f'{head_tokens.indices[token][idx]}({head_tokens.values[token][idx]*100:.3e}%) ',end='')
54
  print()
55
 
56
 
run_test.py CHANGED
@@ -1,9 +1,13 @@
1
  #!/usr/bin/env python3
2
- import os, sys
3
 
4
  STORE_WEIGHTS = False
 
 
 
5
  model_id, revision = sys.argv[1:]
6
  user, model = model_id.split('/')
 
7
  fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.safetensors'
8
 
9
  import torch, numpy as np, random
@@ -17,7 +21,11 @@ os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
17
  torch.manual_seed(0)
18
  random.seed(0)
19
  np.random.seed(0)
 
 
 
20
  import accelerate, safetensors.torch, transformers, tqdm
 
21
 
22
  config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
23
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
@@ -26,18 +34,35 @@ if config.model_type == 'deepseek_v3':
26
  #Model = transformers.DeepseekV3ForCausalLM
27
  pass
28
 
29
- model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True, torch_dtype=torch.float64, use_safetensors=True, low_cpu_mem_usage=True,
30
- device_map='auto', max_memory={'cpu':accelerate.utils.get_max_memory()['cpu']}, offload_state_dict=True, offload_buffers=True)
 
 
 
 
 
31
 
32
  if config.model_type == 'deepseek_v3':
33
  model._supports_cache_class = False
34
- #model = accelerate.cpu_offload(model, 'cuda:0', offload_buffers=True)
35
 
36
  pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer)
37
 
38
- tensors = {}
39
- def store_tensor(descr, tensor):
40
- tensors[descr] = tensor.cpu().detach().contiguous().clone()
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  IDX = 0 # IDX is unused
43
  module_names = {mod:name for name, mod in pipe.model.named_modules()}
@@ -45,25 +70,29 @@ tensors = {}
45
  def hook(module, inputs, outputs):
46
  global IDX
47
  name = module_names[module]
 
 
 
48
  for idx, input in enumerate(inputs):
49
  if isinstance(input, torch.Tensor):
50
  store_tensor(f'{name}.input.{idx}', input);
 
 
 
 
 
51
  if isinstance(outputs, torch.Tensor):
52
  store_tensor(f'{name}.output', outputs);
53
  else:
54
  for idx, output in enumerate(outputs):
55
  if isinstance(output, torch.Tensor):
56
  store_tensor(f'{name}.output.{idx}', output);
57
- if STORE_WEIGHTS and not list(module.children()):
58
- for wtname, wt in list(module.named_parameters()) + list(module.named_buffers()):
59
- store_tensor(f'{name}.{wtname}', wt)
60
  IDX += 1
61
 
62
  for module in pipe.model.modules():
63
  module.register_forward_hook(hook)
64
 
65
- prompt = 'Once upon a time,'
66
- output = pipe(prompt, do_sample=False, max_new_tokens=1, temperature=1.0, top_p=1.0)
67
- safetensors.torch.save_file(tensors, fn, dict(prompt=prompt))
68
  print()
69
  print(output)
 
1
  #!/usr/bin/env python3
2
+ import mmap, os, sys
3
 
4
  STORE_WEIGHTS = False
5
+ FAKE_H100 = False
6
+ TORCH_DTYPE = 'float64'
7
+ USE_GPU = False
8
  model_id, revision = sys.argv[1:]
9
  user, model = model_id.split('/')
10
+ prompt = 'Once upon a time,'
11
  fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.safetensors'
12
 
13
  import torch, numpy as np, random
 
21
  torch.manual_seed(0)
22
  random.seed(0)
23
  np.random.seed(0)
24
+ if FAKE_H100:
25
+ torch.cuda.is_available = lambda: True
26
+ torch.cuda.get_device_capability = lambda: [9,0]
27
  import accelerate, safetensors.torch, transformers, tqdm
28
+ from _safetensors import WritingSafeTensors
29
 
30
  config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
31
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
 
34
  #Model = transformers.DeepseekV3ForCausalLM
35
  pass
36
 
37
+ max_memory = accelerate.utils.get_max_memory()
38
+ if not USE_GPU:
39
+ max_memory = {'cpu': max_memory['cpu']}
40
+ max_memory['cpu'] //= 3
41
+ model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True,
42
+ torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
43
+ device_map='auto', max_memory=max_memory, offload_state_dict=True, offload_buffers=True)
44
 
45
  if config.model_type == 'deepseek_v3':
46
  model._supports_cache_class = False
 
47
 
48
  pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer)
49
 
50
+ SafeTensors = WritingSafeTensors(
51
+ fn,
52
+ prompt = prompt,
53
+ model_id = model_id,
54
+ revision = revision,
55
+ weights = STORE_WEIGHTS,
56
+ torch_dtype = TORCH_DTYPE,
57
+ gpu = USE_GPU,
58
+ **{
59
+ f'{hw}:{idx}':
60
+ str(getattr(torch, hw).get_device_properties(idx))
61
+ for hw in ['npu','mlu','musa','xpu','cuda']
62
+ if hasattr(torch, hw)
63
+ for idx in range(getattr(torch,hw).device_count())
64
+ }
65
+ )
66
 
67
  IDX = 0 # IDX is unused
68
  module_names = {mod:name for name, mod in pipe.model.named_modules()}
 
70
  def hook(module, inputs, outputs):
71
  global IDX
72
  name = module_names[module]
73
+ HAS_HF_HOOK = hasattr(module, '_hf_hook')
74
+ if HAS_HF_HOOK:
75
+ inputs = module._hf_hook.pre_forward(module, *inputs)
76
  for idx, input in enumerate(inputs):
77
  if isinstance(input, torch.Tensor):
78
  store_tensor(f'{name}.input.{idx}', input);
79
+ if STORE_WEIGHTS and not list(module.children()):
80
+ for wtname, wt in list(module.named_parameters()) + list(module.named_buffers()):
81
+ store_tensor(f'{name}.{wtname}', wt)
82
+ if HAS_HF_HOOK:
83
+ outputs = module._hf_hook.post_forward(module, outputs)
84
  if isinstance(outputs, torch.Tensor):
85
  store_tensor(f'{name}.output', outputs);
86
  else:
87
  for idx, output in enumerate(outputs):
88
  if isinstance(output, torch.Tensor):
89
  store_tensor(f'{name}.output.{idx}', output);
 
 
 
90
  IDX += 1
91
 
92
  for module in pipe.model.modules():
93
  module.register_forward_hook(hook)
94
 
95
+ with SafeTensors:
96
+ output = pipe(prompt, do_sample=False, max_new_tokens=1, temperature=1.0, top_p=1.0)
 
97
  print()
98
  print(output)