llm_logits / run_test.py
user
changed safetensors metadata so files are deterministic
500b596
raw
history blame
1.91 kB
#!/usr/bin/env python3
import os, sys
import accelerate, safetensors.torch, transformers, torch, tqdm
model_id, revision = sys.argv[1:]
user, model = model_id.split('/')
fn = f'{user}_{model}_{revision}.logits.safetensors'
config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
Model = transformers.AutoModelForCausalLM
if config.model_type == 'deepseek_v3':
#Model = transformers.DeepseekV3ForCausalLM
pass
model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True, torch_dtype='auto', device_map='cpu')
if config.model_type == 'deepseek_v3':
model._supports_cache_class = False
model = accelerate.cpu_offload(model, 'cuda:0', offload_buffers=True)
pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer)
tensors = {}
def store_tensor(descr, tensor):
tensors[descr] = tensor.cpu().detach().contiguous()
IDX = 0
module_names = {mod:name for name, mod in pipe.model.named_modules()}
tensors = {}
def hook(module, inputs, outputs):
global IDX
name = module_names[module]
for idx, input in enumerate(inputs):
if isinstance(input, torch.Tensor):
store_tensor(f'{name}.input.{idx}', input);
if isinstance(outputs, torch.Tensor):
store_tensor(f'{name}.output', outputs);
else:
for idx, output in enumerate(outputs):
if isinstance(output, torch.Tensor):
store_tensor(f'{name}.output.{idx}', output);
IDX += 1
for module in pipe.model.modules():
module.register_forward_hook(hook)
prompt = 'Once upon a time,'
output = pipe(prompt, do_sample=False, max_new_tokens=1, temperature=1.0, top_p=1.0)
safetensors.torch.save_file(tensors, fn, dict(prompt=prompt))
print()
print(output)