File size: 5,632 Bytes
937476a 8ac8443 937476a 629637c fdcd239 f4dd190 0b22441 9461170 0b22441 9461170 937476a 0b22441 fdcd239 937476a 8ac8443 59b00ec 8ac8443 0b22441 8fe79fa 8ac8443 7774d63 0b22441 59b00ec 8ac8443 62b9ba4 e7df8ec 0b22441 9461170 54fe972 0b22441 54fe972 8ac8443 e7df8ec 62b9ba4 937476a 8ac8443 62b9ba4 0b22441 fdcd239 f4dd190 0b22441 54fe972 0b22441 937476a d46d5fb 49049b7 62b9ba4 5d3e4b5 49049b7 937476a 49049b7 0b22441 49049b7 937476a 5d3e4b5 49049b7 5d3e4b5 0b22441 937476a 5d3e4b5 49049b7 5d3e4b5 937476a 5d3e4b5 937476a 49049b7 937476a 0b22441 937476a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
#!/usr/bin/env python3
import contextlib, os, sys
STORE_WEIGHTS = False
DEDUPLICATE_SAFETENSORS = True
SAVE_ON_CRASH = False
FAKE_H100 = False
# notes transformers 4.49.0:
# - a model with unexpected weights must not have 'disk' anywhere in device_map
# - a model with dtypes unsupported by numpy must have offload_state_dict = False
# - dtype=float64 can overflow, no dtype specified at transformers/modeling_attn_mask_utils.py(158)_make_causal_mask() defaulted to float32
TORCH_DTYPE = 'float64'
USE_GPU = False
DEVICE_MAP = 'auto'
model_id, revision = sys.argv[1:]
user, model = model_id.split('/')
prompt = 'Once upon a time,'
fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.{"DEDUPLICATED.safetensors" if DEDUPLICATE_SAFETENSORS else "safetensors"}'
## Increase determinism, from torch docs
import torch, numpy as np, random
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:2'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
if FAKE_H100:
torch.cuda.is_available = lambda: True
torch.cuda.get_device_capability = lambda *params: [9,0]
import accelerate, safetensors.torch, transformers, tqdm
from _safetensors import WritingSafeTensors
## Show progress for slow internal functions (model layer enumerations)
import builtins
builtin_range = range
def range_progress(*params):
return tqdm.tqdm(builtin_range(*params), leave=False, desc=range_progress.desc + ' range('+','.join([repr(p)for p in params])+')')
@contextlib.contextmanager
def range_progress_description(str):
range_progress.desc = str
try:
yield
finally:
range_progress.desc = ''
range_progress.desc = ''
builtins.range = range_progress
tqdm.std.range = builtin_range
import sre_parse; sre_parse.range = builtin_range
config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
Model = transformers.AutoModelForCausalLM
if config.model_type == 'deepseek_v3':
#Model = transformers.DeepseekV3ForCausalLM
pass
max_memory = accelerate.utils.get_max_memory()
if not USE_GPU:
max_memory = {'cpu': max_memory['cpu']}
#max_memory['cpu'] //= 3
model_kwparams = dict(pretrained_model_name_or_path=model_id, revision=revision, trust_remote_code=True,
torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
device_map=DEVICE_MAP, max_memory=max_memory, offload_state_dict=True, offload_buffers=True)
with range_progress_description('constructing model'):
model = Model.from_pretrained(**model_kwparams)
if config.model_type == 'deepseek_v3':
model._supports_cache_class = False
with range_progress_description('constructing pipeline'):
pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer)
SafeTensors = WritingSafeTensors(
fn,
deduplicate = DEDUPLICATE_SAFETENSORS,
save_on_crash = SAVE_ON_CRASH,
prompt = prompt,
store_weights = STORE_WEIGHTS,
use_gpu = USE_GPU,
**model_kwparams,
**{
f'{hw}:{idx}':
str(getattr(torch, hw).get_device_properties(idx))
for hw in ['npu','mlu','musa','xpu','cuda']
if hasattr(torch, hw)
for idx in range(getattr(torch,hw).device_count())
}
)
IDX = 0 # IDX is unused
module_prefixes = {mod : name + '.' if name else '' for name, mod in pipe.model.named_modules()}
tensors = {}
def add_if_tensor(name, tensor):
if not isinstance(tensor, torch.Tensor):
try:
tensor = torch.tensor(tensor)
except:
try:
for idx, subtensor in enumerate(tensor):
add_if_tensor(f'{name}.{idx}', subtensor)
except:
pass
return
SafeTensors.add(name, tensor);
def hook(module, inputs, kwinputs, outputs):
global IDX
prefix = module_prefixes[module]
HAS_HF_HOOK = hasattr(module, '_hf_hook')
if HAS_HF_HOOK:
inputs, kwinputs = module._hf_hook.pre_forward(module, *inputs, **kwinputs)
for idx, input in enumerate(inputs):
add_if_tensor(f'{prefix}input.{idx}', input);
for key, input in kwinputs.items():
add_if_tensor(f'{prefix}input.{key}', input);
if STORE_WEIGHTS:
for wtname, wt in module.named_buffers(recurse=False):
add_if_tensor(f'{prefix}{wtname}', wt)
for wtname, wt in module.named_parameters(recurse=False):
add_if_tensor(f'{prefix}{wtname}', wt)
if HAS_HF_HOOK:
outputs = module._hf_hook.post_forward(module, outputs)
if isinstance(outputs, torch.Tensor):
add_if_tensor(f'{prefix}output', outputs);
elif isinstance(outputs, dict):
for key, output in outputs.items():
add_if_tensor(f'{prefix}output.{key}', output);
else:
for idx, output in enumerate(outputs):
add_if_tensor(f'{prefix}output.{idx}', output);
IDX += 1
for module in pipe.model.modules():
module.register_forward_hook(hook, with_kwargs=True)
with SafeTensors:
output = pipe(prompt, do_sample=False, max_new_tokens=1, temperature=1.0, top_p=1.0)
print()
print(output)
|