llm_logits / run_test.py
3v324v23's picture
notes from deepseek attempt
9461170
#!/usr/bin/env python3
import contextlib, os, sys
STORE_WEIGHTS = False
DEDUPLICATE_SAFETENSORS = True
SAVE_ON_CRASH = False
FAKE_H100 = False
# notes transformers 4.49.0:
# - a model with unexpected weights must not have 'disk' anywhere in device_map
# - a model with dtypes unsupported by numpy must have offload_state_dict = False
# - dtype=float64 can overflow, no dtype specified at transformers/modeling_attn_mask_utils.py(158)_make_causal_mask() defaulted to float32
TORCH_DTYPE = 'float64'
USE_GPU = False
DEVICE_MAP = 'auto'
model_id, revision = sys.argv[1:]
user, model = model_id.split('/')
prompt = 'Once upon a time,'
fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.{"DEDUPLICATED.safetensors" if DEDUPLICATE_SAFETENSORS else "safetensors"}'
## Increase determinism, from torch docs
import torch, numpy as np, random
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:2'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
if FAKE_H100:
torch.cuda.is_available = lambda: True
torch.cuda.get_device_capability = lambda *params: [9,0]
import accelerate, safetensors.torch, transformers, tqdm
from _safetensors import WritingSafeTensors
## Show progress for slow internal functions (model layer enumerations)
import builtins
builtin_range = range
def range_progress(*params):
return tqdm.tqdm(builtin_range(*params), leave=False, desc=range_progress.desc + ' range('+','.join([repr(p)for p in params])+')')
@contextlib.contextmanager
def range_progress_description(str):
range_progress.desc = str
try:
yield
finally:
range_progress.desc = ''
range_progress.desc = ''
builtins.range = range_progress
tqdm.std.range = builtin_range
import sre_parse; sre_parse.range = builtin_range
config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
Model = transformers.AutoModelForCausalLM
if config.model_type == 'deepseek_v3':
#Model = transformers.DeepseekV3ForCausalLM
pass
max_memory = accelerate.utils.get_max_memory()
if not USE_GPU:
max_memory = {'cpu': max_memory['cpu']}
#max_memory['cpu'] //= 3
model_kwparams = dict(pretrained_model_name_or_path=model_id, revision=revision, trust_remote_code=True,
torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
device_map=DEVICE_MAP, max_memory=max_memory, offload_state_dict=True, offload_buffers=True)
with range_progress_description('constructing model'):
model = Model.from_pretrained(**model_kwparams)
if config.model_type == 'deepseek_v3':
model._supports_cache_class = False
with range_progress_description('constructing pipeline'):
pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer)
SafeTensors = WritingSafeTensors(
fn,
deduplicate = DEDUPLICATE_SAFETENSORS,
save_on_crash = SAVE_ON_CRASH,
prompt = prompt,
store_weights = STORE_WEIGHTS,
use_gpu = USE_GPU,
**model_kwparams,
**{
f'{hw}:{idx}':
str(getattr(torch, hw).get_device_properties(idx))
for hw in ['npu','mlu','musa','xpu','cuda']
if hasattr(torch, hw)
for idx in range(getattr(torch,hw).device_count())
}
)
IDX = 0 # IDX is unused
module_prefixes = {mod : name + '.' if name else '' for name, mod in pipe.model.named_modules()}
tensors = {}
def add_if_tensor(name, tensor):
if not isinstance(tensor, torch.Tensor):
try:
tensor = torch.tensor(tensor)
except:
try:
for idx, subtensor in enumerate(tensor):
add_if_tensor(f'{name}.{idx}', subtensor)
except:
pass
return
SafeTensors.add(name, tensor);
def hook(module, inputs, kwinputs, outputs):
global IDX
prefix = module_prefixes[module]
HAS_HF_HOOK = hasattr(module, '_hf_hook')
if HAS_HF_HOOK:
inputs, kwinputs = module._hf_hook.pre_forward(module, *inputs, **kwinputs)
for idx, input in enumerate(inputs):
add_if_tensor(f'{prefix}input.{idx}', input);
for key, input in kwinputs.items():
add_if_tensor(f'{prefix}input.{key}', input);
if STORE_WEIGHTS:
for wtname, wt in module.named_buffers(recurse=False):
add_if_tensor(f'{prefix}{wtname}', wt)
for wtname, wt in module.named_parameters(recurse=False):
add_if_tensor(f'{prefix}{wtname}', wt)
if HAS_HF_HOOK:
outputs = module._hf_hook.post_forward(module, outputs)
if isinstance(outputs, torch.Tensor):
add_if_tensor(f'{prefix}output', outputs);
elif isinstance(outputs, dict):
for key, output in outputs.items():
add_if_tensor(f'{prefix}output.{key}', output);
else:
for idx, output in enumerate(outputs):
add_if_tensor(f'{prefix}output.{idx}', output);
IDX += 1
for module in pipe.model.modules():
module.register_forward_hook(hook, with_kwargs=True)
with SafeTensors:
output = pipe(prompt, do_sample=False, max_new_tokens=1, temperature=1.0, top_p=1.0)
print()
print(output)