llm_logits / compare_safetensors.py
Your Name
untested run_test.py code, committing to try on another system
0b22441
raw
history blame
2.91 kB
#!/usr/bin/env python3
import safetensors
def compare(*fns):
global files, mismatching_keys, avgs, dists, errs
files = [safetensors.safe_open(files, framework='pt') for files in fns]
assert set(files[0].keys()) == set(files[1].keys())
print('dtypes ...')
dtypes = {k: [files[0].get_slice(k)[0].dtype,files[1].get_slice(k)[0].dtype] for k in files[0].keys()}
dtypes = {k: [min(dts, key=lambda dt: dt.itemsize),max(dts, key=lambda dt: dt.itemsize)] for k, dts in dtypes.items()}
mismatching_dtypes = [k for k, dts in dtypes.items() if dts[0] is not dts[1]]
print('midpoints ...')
avgs = {k:((files[0].get_tensor(k) + files[1].get_tensor(k))/2).to(dtypes[k][0]) for k in files[0].keys()}
print('dists ...')
dists = {k:(files[0].get_tensor(k).to(dtypes[k][0]) - files[1].get_tensor(k).to(dtypes[k][0])).abs() for k in files[0].keys()}
print('keys ...')
mismatching_keys = [k for k, d in dists.items() if (d!=0).any()]
print(f'{len(mismatching_keys)/len(files[0].keys())*100:.2f}% keys mismatch')
print(f'{len(mismatching_dtypes)/len(files[0].keys())*100:.2f}% dtypes mismatch')
#errs = {k:(dists[k] / avgs[k]).nan_to_num() for k in files[0].keys()}
#print('greatest scalar error:', max([e.max().item() for e in errs.values()])*100, '%')
#print('cumulative scalar error:', sum([e.sum().item() for e in errs.values()])*100, '%')
#print('total error:', (sum([d.sum() for d in dists.values()]) / sum([a.sum() for a in avgs.values()])).item()*100, '%')
#print('greatest scalar dist:'`
embed_name = [x for x in ['model.embed_tokens'] if x + '.output' in dtypes][0]
head_name = [x for x in ['lm_head'] if x + '.output' in dtypes][0]
print('input embed dist:', dists[embed_name+'.output'].sum().item())
#print('input embed error sum:', errs[embed_name+'.output'].sum())
print('output head dist:', dists[head_name+'.output'].sum().item())
#print('output head error sum:', dists[head_name+'.output'].sum())
for idx in range(2):
head_tokens = files[idx].get_tensor(head_name+'.output')[0].softmax(dim=-1).sort(dim=-1,descending=True)
print('file',idx,'tokens: ', end='')
for range_idx, range_ in enumerate([range(3), range(-3,0)]):
if range_idx > 0:
print('.. ', end='')
for idx in range_:
for token in range(head_tokens.values.shape[-2]):
if range_idx == 0:
print(f'{head_tokens.indices[token][idx]}({head_tokens.values[token][idx]*100:.3f}%) ',end='')
else: # the % format changes from f to e to show smaller values
print(f'{head_tokens.indices[token][idx]}({head_tokens.values[token][idx]*100:.3e}%) ',end='')
print()
if __name__ == '__main__':
import sys
assert len(sys.argv[1:]) == 2
compare(*sys.argv[1:])