Your Name commited on
Commit
a74d750
·
1 Parent(s): 459fb48

runs again

Browse files
Files changed (2) hide show
  1. _safetensors.py +20 -10
  2. run_test.py +5 -5
_safetensors.py CHANGED
@@ -1,20 +1,31 @@
1
  # ran into memory issues with safetensors. this code moves by them.
2
- import contextlib, os
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  class WritingSafeTensors:
5
- def __init__(self, filename, **metadata):
6
  self.filename = filename
7
  self.fd = os.open(self.filename, os.O_RDWR | os.O_CREAT)
8
  self.size = 0
9
  self.capacity = 0
10
  self.mmapview = None
11
- self.header = {'__metadata__': metadata}
12
  def _reserve(self, length):
13
  if self.size + length > self.capacity:
14
  new_capacity = self.size * 2
15
  if new_capacity < self.size + length:
16
  new_capacity = (((self.size + length)*2 - 1) // mmap.PAGESIZE + 1) * mmap.PAGESIZE
17
- os.truncate(self.fn, new_capacity)
18
  self.mmapview = memoryview(mmap.mmap(self.fd, new_capacity))
19
  self.capacity = new_capacity
20
  def add(self, name, tensor):
@@ -23,10 +34,10 @@ class WritingSafeTensors:
23
  self._reserve(length)
24
  torch.frombuffer(
25
  self.mmapview[self.size:self.size+length],
26
- dtype=tensor.dtype, count=tensor.numel,
27
  ).view(tensor.shape)[:] = tensor
28
  end = self.size + length
29
- self.header[descr] = {
30
  'dtype':
31
  str(tensor.dtype).rsplit('.',1)[-1]
32
  .replace('float','F')
@@ -42,14 +53,13 @@ class WritingSafeTensors:
42
  self.size = end
43
  def finalize(self):
44
  print(self.filename, '...')
45
- import pdb; pdb.set_trace()
46
- header = json.dumps(self.header).encode()
47
  insert = len(header) + 8
48
  self._reserve(insert)
49
  self.mmapview[insert:insert+self.size] = self.mmapview[:self.size]
50
  self.size += insert
51
- self[:8] = len(header).to_bytes(8, 'little')
52
- self[8:insert] = header
53
  del self.header
54
  del self.mmapview
55
  os.close(self.fd)
 
1
  # ran into memory issues with safetensors. this code moves by them.
2
+ import contextlib, json, mmap, os
3
+
4
+ import torch
5
+
6
+ # huggingface has a size limit on individual files
7
+ # this should shard its files
8
+ # the filename would be some kind of base name that would turn into the index name
9
+ # presently state is tracked assuming one file
10
+ # an organized approach would be to separate it into two classes
11
+
12
+ # what would the internal interface provided to the outer class look like?
13
+
14
 
15
  class WritingSafeTensors:
16
+ def __init__(self, filename, max_size=16*1024*1024*1024, **metadata):
17
  self.filename = filename
18
  self.fd = os.open(self.filename, os.O_RDWR | os.O_CREAT)
19
  self.size = 0
20
  self.capacity = 0
21
  self.mmapview = None
22
+ self.header = {'__metadata__': {k:str(v) for k,v in metadata.items()}}
23
  def _reserve(self, length):
24
  if self.size + length > self.capacity:
25
  new_capacity = self.size * 2
26
  if new_capacity < self.size + length:
27
  new_capacity = (((self.size + length)*2 - 1) // mmap.PAGESIZE + 1) * mmap.PAGESIZE
28
+ os.truncate(self.filename, new_capacity)
29
  self.mmapview = memoryview(mmap.mmap(self.fd, new_capacity))
30
  self.capacity = new_capacity
31
  def add(self, name, tensor):
 
34
  self._reserve(length)
35
  torch.frombuffer(
36
  self.mmapview[self.size:self.size+length],
37
+ dtype=tensor.dtype, count=tensor.numel(),
38
  ).view(tensor.shape)[:] = tensor
39
  end = self.size + length
40
+ self.header[name] = {
41
  'dtype':
42
  str(tensor.dtype).rsplit('.',1)[-1]
43
  .replace('float','F')
 
53
  self.size = end
54
  def finalize(self):
55
  print(self.filename, '...')
56
+ header = json.dumps(self.header, separators=[',',':']).encode()
 
57
  insert = len(header) + 8
58
  self._reserve(insert)
59
  self.mmapview[insert:insert+self.size] = self.mmapview[:self.size]
60
  self.size += insert
61
+ self.mmapview[:8] = len(header).to_bytes(8, 'little')
62
+ self.mmapview[8:insert] = header
63
  del self.header
64
  del self.mmapview
65
  os.close(self.fd)
run_test.py CHANGED
@@ -1,5 +1,5 @@
1
  #!/usr/bin/env python3
2
- import mmap, os, sys
3
 
4
  STORE_WEIGHTS = False
5
  FAKE_H100 = False
@@ -75,18 +75,18 @@ def hook(module, inputs, outputs):
75
  inputs = module._hf_hook.pre_forward(module, *inputs)
76
  for idx, input in enumerate(inputs):
77
  if isinstance(input, torch.Tensor):
78
- store_tensor(f'{name}.input.{idx}', input);
79
  if STORE_WEIGHTS and not list(module.children()):
80
  for wtname, wt in list(module.named_parameters()) + list(module.named_buffers()):
81
- store_tensor(f'{name}.{wtname}', wt)
82
  if HAS_HF_HOOK:
83
  outputs = module._hf_hook.post_forward(module, outputs)
84
  if isinstance(outputs, torch.Tensor):
85
- store_tensor(f'{name}.output', outputs);
86
  else:
87
  for idx, output in enumerate(outputs):
88
  if isinstance(output, torch.Tensor):
89
- store_tensor(f'{name}.output.{idx}', output);
90
  IDX += 1
91
 
92
  for module in pipe.model.modules():
 
1
  #!/usr/bin/env python3
2
+ import os, sys
3
 
4
  STORE_WEIGHTS = False
5
  FAKE_H100 = False
 
75
  inputs = module._hf_hook.pre_forward(module, *inputs)
76
  for idx, input in enumerate(inputs):
77
  if isinstance(input, torch.Tensor):
78
+ SafeTensors.add(f'{name}.input.{idx}', input);
79
  if STORE_WEIGHTS and not list(module.children()):
80
  for wtname, wt in list(module.named_parameters()) + list(module.named_buffers()):
81
+ SafeTensors.add(f'{name}.{wtname}', wt)
82
  if HAS_HF_HOOK:
83
  outputs = module._hf_hook.post_forward(module, outputs)
84
  if isinstance(outputs, torch.Tensor):
85
+ SafeTensors.add(f'{name}.output', outputs);
86
  else:
87
  for idx, output in enumerate(outputs):
88
  if isinstance(output, torch.Tensor):
89
+ SafeTensors.add(f'{name}.output.{idx}', output);
90
  IDX += 1
91
 
92
  for module in pipe.model.modules():