karl
commited on
Commit
·
5d3e4b5
1
Parent(s):
49049b7
added more input/output datas and drafted deduplicating tensor hash code. may not run have run into issues detering testing agian.
Browse files- _bighash.py +91 -0
- _safetensors.py +40 -18
- run_test.py +24 -12
_bighash.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
# xxhash primes converted to int64
|
4 |
+
XXPRIME_1 = -(11400714785074694791 ^ 0xffffffffffffffff) - 1
|
5 |
+
XXPRIME_2 = -(14029467366897019727 ^ 0xffffffffffffffff) - 1
|
6 |
+
XXPRIME_5 = 2870177450012600261
|
7 |
+
# python tuple value
|
8 |
+
XXPRIME_5_3527539 = XXPRIME_5 ^ 3527539
|
9 |
+
|
10 |
+
def tensor_python_tuple_hash(items, out_or_in_place):
|
11 |
+
# https://github.com/python/cpython/blob/v3.13.2/Objects/tupleobject.c#L321
|
12 |
+
# this is apparently a simplified & modified form of xxhash
|
13 |
+
# learning xxhash could likely improve the code in this file
|
14 |
+
|
15 |
+
len_ = len(items)
|
16 |
+
# first iteration pulled out to provide storage placement
|
17 |
+
if out_or_in_place is None:
|
18 |
+
# in place
|
19 |
+
acc = torch.add(XXPRIME_5, items[0], alpha=XXPRIME_2, out=items[0])
|
20 |
+
else:
|
21 |
+
# place in out_or_in_place
|
22 |
+
acc = torch.add(XXPRIME_5, items[0], alpha=XXPRIME_2, out=out_or_in_place)
|
23 |
+
# bitwise rotation
|
24 |
+
upshift = acc << 31
|
25 |
+
acc >>= 33
|
26 |
+
acc &= 0x7fffffff # mask int64 sign extension
|
27 |
+
acc |= upshift
|
28 |
+
|
29 |
+
acc *= XXPRIME_1
|
30 |
+
|
31 |
+
for i in range(1, len_):
|
32 |
+
# acc += x * prime2
|
33 |
+
acc.add_(items[i], alpha=XXPRIME_2)
|
34 |
+
|
35 |
+
# bitwise rotation
|
36 |
+
upshift = acc << 31
|
37 |
+
acc >>= 33
|
38 |
+
acc &= 0x7fffffff # mask int64 sign extension
|
39 |
+
acc |= upshift
|
40 |
+
|
41 |
+
acc *= XXPRIME_1
|
42 |
+
|
43 |
+
acc += (len_ ^ XXPRIME_5_3527539)
|
44 |
+
return acc
|
45 |
+
|
46 |
+
def hash(buffer, *incoming_unhashed_ints):
|
47 |
+
if len(buffer) < 16:
|
48 |
+
return buffer.__hash__()
|
49 |
+
|
50 |
+
# first pass
|
51 |
+
# - allocate storage
|
52 |
+
# - place unhashed ints
|
53 |
+
|
54 |
+
words = len(buffer) // 8
|
55 |
+
dwords = words // 2
|
56 |
+
incoming_data = torch.frombuffer(buffer, count=words, dtype=torch.int64)
|
57 |
+
incoming_unhashed_ints = [int.from_bytes(buffer[words*8:],'little')] + list(incoming_unhashed_ints)
|
58 |
+
|
59 |
+
incoming_hashable_length = words & ~1
|
60 |
+
incoming_unhashed_length = words & 1
|
61 |
+
incoming_unhashed_int_length = len(incoming_unhashed_ints)
|
62 |
+
incoming_hashable_data = incoming_data[:incoming_hashable_length].view(2,-1)
|
63 |
+
incoming_unhashed_data = incoming_data[incoming_hashable_length:]
|
64 |
+
|
65 |
+
storage = torch.empty([dwords + incoming_unhashed_length + incoming_unhashed_int_length], dtype=torch.int64)
|
66 |
+
hashed_data = tensor_python_tuple_hash(incoming_hashable_data, out_or_in_place=storage[:dwords])
|
67 |
+
storage[dwords:-incoming_unhashed_int_length] = incoming_unhashed_data
|
68 |
+
for idx in range(incoming_unhashed_int_length):
|
69 |
+
storage[dwords+idx] = incoming_unhashed_ints[idx]
|
70 |
+
|
71 |
+
incoming_data = storage
|
72 |
+
words = len(incoming_data)
|
73 |
+
dwords = words // 2
|
74 |
+
|
75 |
+
# iterative passes
|
76 |
+
while words > 1:
|
77 |
+
incoming_hashable_length = words & ~1
|
78 |
+
incoming_unhashed_length = words & 1
|
79 |
+
incoming_hashable_data = incoming_data[:incoming_hashable_length].view(2,-1)
|
80 |
+
incoming_unhashed_tensor_data = incoming_data[incoming_hashable_length:]
|
81 |
+
|
82 |
+
hashed_data = tensor_python_tuple_hash(incoming_hashable_data, out_or_in_place=None)
|
83 |
+
words = dwords + incoming_unhashed_length
|
84 |
+
storage[dwords:words] = incoming_unhashed_tensor_data
|
85 |
+
incoming_data = storage[:words]
|
86 |
+
dwords = words // 2
|
87 |
+
|
88 |
+
return incoming_data[0].item()
|
89 |
+
|
90 |
+
if __name__ == '__main__':
|
91 |
+
print(hash(bytearray(b'the quick brown fox jumped over the lazy dog')))
|
_safetensors.py
CHANGED
@@ -3,6 +3,8 @@ import contextlib, json, mmap, os
|
|
3 |
|
4 |
import torch
|
5 |
|
|
|
|
|
6 |
class WritingSafeTensors:
|
7 |
def __init__(self, name, file_size=16*1024*1024*1024, **metadata):
|
8 |
self.name = name.removesuffix('.safetensors')
|
@@ -10,36 +12,41 @@ class WritingSafeTensors:
|
|
10 |
self.file = self.File(self.name + '.safetensors')
|
11 |
self.files = {self.file.filename:self.file}
|
12 |
self.file_size = file_size
|
13 |
-
self.total_size = 0
|
14 |
self.weight_map = {}
|
|
|
15 |
def add(self, name, tensor):
|
16 |
print(name, '...')
|
17 |
-
|
18 |
-
self.
|
19 |
-
if
|
|
|
|
|
|
|
|
|
20 |
self.file.undo(name, tensor)
|
21 |
ct = len(self.files)
|
22 |
if len(self.files) == 1:
|
23 |
self.file.rename(f'{self.name}-{ct:05}.safetensors')
|
24 |
self.file.set_metadata(index = str(ct))
|
25 |
self.files = {self.file.filename:self.file}
|
26 |
-
self.file.finalize()
|
27 |
-
self.total_size += self.file.size
|
28 |
ct += 1
|
29 |
self.file = self.File(f'{self.name}-{ct:05}.safetensors', index = ct)
|
30 |
self.files[self.file.filename] = self.file
|
31 |
self.file.add(name, tensor)
|
32 |
self.weight_map[name] = self.file
|
33 |
def finalize(self):
|
34 |
-
self.
|
35 |
-
|
36 |
-
|
37 |
index_name = self.name + '.safetensors.index.json'
|
38 |
print(index_name, '...')
|
|
|
39 |
tot = len(self.files)
|
40 |
for ct, file in enumerate(self.files.values()):
|
41 |
ct += 1
|
42 |
file.rename(f'{self.name}-{ct:05}-of-{tot:06}.safetensors')
|
|
|
|
|
43 |
with open(index_name, 'w') as fh:
|
44 |
json.dump(
|
45 |
{
|
@@ -82,6 +89,7 @@ class WritingSafeTensors:
|
|
82 |
|
83 |
class File:
|
84 |
def __init__(self, filename, **metadata):
|
|
|
85 |
self.filename = filename
|
86 |
self.fd = os.open(self.filename, os.O_RDWR | os.O_CREAT)
|
87 |
self.size = 0
|
@@ -97,28 +105,43 @@ class WritingSafeTensors:
|
|
97 |
os.truncate(self.filename, new_capacity)
|
98 |
self.mmapview = memoryview(mmap.mmap(self.fd, new_capacity))
|
99 |
self.capacity = new_capacity
|
100 |
-
def add(self, name, tensor):
|
101 |
length = tensor.numel() * tensor.dtype.itemsize
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
self.header[name] = {
|
109 |
'dtype':
|
110 |
str(tensor.dtype).rsplit('.',1)[-1]
|
111 |
.replace('float','F')
|
112 |
.replace('uint','U')
|
113 |
.replace('int','I')
|
|
|
114 |
.removesuffix('fn')
|
115 |
.upper(),
|
116 |
'shape':
|
117 |
list(tensor.shape),
|
118 |
'data_offsets':
|
119 |
-
[
|
120 |
}
|
121 |
self.size = end
|
|
|
122 |
def undo(self, name, tensor):
|
123 |
length = tensor.numel() * tensor.dtype.itemsize
|
124 |
del self.header[name]
|
@@ -153,4 +176,3 @@ class WritingSafeTensors:
|
|
153 |
os.close(self.fd)
|
154 |
self.finalized = True
|
155 |
os.unlink(self.filename)
|
156 |
-
|
|
|
3 |
|
4 |
import torch
|
5 |
|
6 |
+
from _bighash import hash
|
7 |
+
|
8 |
class WritingSafeTensors:
|
9 |
def __init__(self, name, file_size=16*1024*1024*1024, **metadata):
|
10 |
self.name = name.removesuffix('.safetensors')
|
|
|
12 |
self.file = self.File(self.name + '.safetensors')
|
13 |
self.files = {self.file.filename:self.file}
|
14 |
self.file_size = file_size
|
|
|
15 |
self.weight_map = {}
|
16 |
+
self.hash_map = {}
|
17 |
def add(self, name, tensor):
|
18 |
print(name, '...')
|
19 |
+
tensor_hash = self.file.add(name, tensor)
|
20 |
+
image_of = self.hash_map.get(tensor_hash)
|
21 |
+
if image_of is not None:
|
22 |
+
self.file.undo(name, tensor)
|
23 |
+
imaged_hash = self.weight_map[image_of].add(name, tensor, image_of)
|
24 |
+
assert imaged_hash == tensor_hash
|
25 |
+
elif self.file.size >= self.file_size:
|
26 |
self.file.undo(name, tensor)
|
27 |
ct = len(self.files)
|
28 |
if len(self.files) == 1:
|
29 |
self.file.rename(f'{self.name}-{ct:05}.safetensors')
|
30 |
self.file.set_metadata(index = str(ct))
|
31 |
self.files = {self.file.filename:self.file}
|
|
|
|
|
32 |
ct += 1
|
33 |
self.file = self.File(f'{self.name}-{ct:05}.safetensors', index = ct)
|
34 |
self.files[self.file.filename] = self.file
|
35 |
self.file.add(name, tensor)
|
36 |
self.weight_map[name] = self.file
|
37 |
def finalize(self):
|
38 |
+
if len(self.files) == 1:
|
39 |
+
self.file.finalize()
|
40 |
+
else:
|
41 |
index_name = self.name + '.safetensors.index.json'
|
42 |
print(index_name, '...')
|
43 |
+
total_size = 0
|
44 |
tot = len(self.files)
|
45 |
for ct, file in enumerate(self.files.values()):
|
46 |
ct += 1
|
47 |
file.rename(f'{self.name}-{ct:05}-of-{tot:06}.safetensors')
|
48 |
+
file.finalize()
|
49 |
+
total_size += file.size
|
50 |
with open(index_name, 'w') as fh:
|
51 |
json.dump(
|
52 |
{
|
|
|
89 |
|
90 |
class File:
|
91 |
def __init__(self, filename, **metadata):
|
92 |
+
print(filename, '...')
|
93 |
self.filename = filename
|
94 |
self.fd = os.open(self.filename, os.O_RDWR | os.O_CREAT)
|
95 |
self.size = 0
|
|
|
105 |
os.truncate(self.filename, new_capacity)
|
106 |
self.mmapview = memoryview(mmap.mmap(self.fd, new_capacity))
|
107 |
self.capacity = new_capacity
|
108 |
+
def add(self, name, tensor, image_of=None):
|
109 |
length = tensor.numel() * tensor.dtype.itemsize
|
110 |
+
if image_of is None:
|
111 |
+
self._reserve(length)
|
112 |
+
start, end = self.size, self.size + length
|
113 |
+
torch.frombuffer(
|
114 |
+
self.mmapview[start : end],
|
115 |
+
dtype=tensor.dtype, count=tensor.numel(),
|
116 |
+
).view(tensor.shape or [1])[:] = tensor
|
117 |
+
else:
|
118 |
+
image = self.header[image_of]
|
119 |
+
start, end = image['data_offsets']
|
120 |
+
assert end - start == length
|
121 |
+
assert (tensor == torch.frombuffer(
|
122 |
+
self.mmapview[start : end],
|
123 |
+
dtype=tensor.dtype, count=tensor.numel(),
|
124 |
+
).view(tensor.shape or [1])[:]).all()
|
125 |
+
|
126 |
+
tensor.flatten()
|
127 |
+
tensor_hash = hash(self.mmapview[start : end])
|
128 |
+
|
129 |
self.header[name] = {
|
130 |
'dtype':
|
131 |
str(tensor.dtype).rsplit('.',1)[-1]
|
132 |
.replace('float','F')
|
133 |
.replace('uint','U')
|
134 |
.replace('int','I')
|
135 |
+
.removesuffix('uz')
|
136 |
.removesuffix('fn')
|
137 |
.upper(),
|
138 |
'shape':
|
139 |
list(tensor.shape),
|
140 |
'data_offsets':
|
141 |
+
[start, end],
|
142 |
}
|
143 |
self.size = end
|
144 |
+
return tensor_hash
|
145 |
def undo(self, name, tensor):
|
146 |
length = tensor.numel() * tensor.dtype.itemsize
|
147 |
del self.header[name]
|
|
|
176 |
os.close(self.fd)
|
177 |
self.finalized = True
|
178 |
os.unlink(self.filename)
|
|
run_test.py
CHANGED
@@ -67,33 +67,45 @@ SafeTensors = WritingSafeTensors(
|
|
67 |
IDX = 0 # IDX is unused
|
68 |
module_prefixes = {mod : name + '.' if name else '' for name, mod in pipe.model.named_modules()}
|
69 |
tensors = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
def hook(module, inputs, kwinputs, outputs):
|
71 |
global IDX
|
72 |
prefix = module_prefixes[module]
|
|
|
|
|
73 |
HAS_HF_HOOK = hasattr(module, '_hf_hook')
|
74 |
if HAS_HF_HOOK:
|
75 |
inputs, kwinputs = module._hf_hook.pre_forward(module, *inputs, **kwinputs)
|
76 |
for idx, input in enumerate(inputs):
|
77 |
-
|
78 |
-
SafeTensors.add(f'{prefix}input.{idx}', input);
|
79 |
for key, input in kwinputs.items():
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
85 |
if HAS_HF_HOOK:
|
86 |
outputs = module._hf_hook.post_forward(module, outputs)
|
87 |
if isinstance(outputs, torch.Tensor):
|
88 |
-
|
89 |
elif isinstance(outputs, dict):
|
90 |
for key, output in outputs.items():
|
91 |
-
|
92 |
-
SafeTensors.add(f'{prefix}output.{key}', output);
|
93 |
else:
|
94 |
for idx, output in enumerate(outputs):
|
95 |
-
|
96 |
-
SafeTensors.add(f'{prefix}output.{idx}', output);
|
97 |
IDX += 1
|
98 |
|
99 |
for module in pipe.model.modules():
|
|
|
67 |
IDX = 0 # IDX is unused
|
68 |
module_prefixes = {mod : name + '.' if name else '' for name, mod in pipe.model.named_modules()}
|
69 |
tensors = {}
|
70 |
+
def add_if_tensor(name, tensor):
|
71 |
+
if not isinstance(tensor, torch.Tensor):
|
72 |
+
try:
|
73 |
+
tensor = torch.tensor(tensor)
|
74 |
+
except:
|
75 |
+
try:
|
76 |
+
for idx, subtensor in enumerate(tensor):
|
77 |
+
add_if_tensor(f'{name}.{idx}', subtensor)
|
78 |
+
except:
|
79 |
+
pass
|
80 |
+
return
|
81 |
+
SafeTensors.add(name, tensor);
|
82 |
def hook(module, inputs, kwinputs, outputs):
|
83 |
global IDX
|
84 |
prefix = module_prefixes[module]
|
85 |
+
if not prefix:
|
86 |
+
import pdb; pdb.set_trace()
|
87 |
HAS_HF_HOOK = hasattr(module, '_hf_hook')
|
88 |
if HAS_HF_HOOK:
|
89 |
inputs, kwinputs = module._hf_hook.pre_forward(module, *inputs, **kwinputs)
|
90 |
for idx, input in enumerate(inputs):
|
91 |
+
add_if_tensor(f'{prefix}input.{idx}', input);
|
|
|
92 |
for key, input in kwinputs.items():
|
93 |
+
add_if_tensor(f'{prefix}input.{key}', input);
|
94 |
+
if STORE_WEIGHTS:
|
95 |
+
for wtname, wt in module.named_buffers(recurse=False):
|
96 |
+
add_if_tensor(f'{prefix}{wtname}', wt)
|
97 |
+
for wtname, wt in module.named_parameters(recurse=False):
|
98 |
+
add_if_tensor(f'{prefix}{wtname}', wt)
|
99 |
if HAS_HF_HOOK:
|
100 |
outputs = module._hf_hook.post_forward(module, outputs)
|
101 |
if isinstance(outputs, torch.Tensor):
|
102 |
+
add_if_tensor(f'{prefix}output', outputs);
|
103 |
elif isinstance(outputs, dict):
|
104 |
for key, output in outputs.items():
|
105 |
+
add_if_tensor(f'{prefix}output.{key}', output);
|
|
|
106 |
else:
|
107 |
for idx, output in enumerate(outputs):
|
108 |
+
add_if_tensor(f'{prefix}output.{idx}', output);
|
|
|
109 |
IDX += 1
|
110 |
|
111 |
for module in pipe.model.modules():
|