|
|
|
import os, sys |
|
|
|
STORE_WEIGHTS = False |
|
FAKE_H100 = False |
|
TORCH_DTYPE = 'float64' |
|
USE_GPU = False |
|
DEVICE_MAP = 'auto' |
|
model_id, revision = sys.argv[1:] |
|
user, model = model_id.split('/') |
|
prompt = 'Once upon a time,' |
|
fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.safetensors' |
|
|
|
import torch, numpy as np, random |
|
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False |
|
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False |
|
torch.backends.cudnn.allow_tf32 = False |
|
torch.backends.cudnn.benchmark = False |
|
torch.use_deterministic_algorithms(True) |
|
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:2' |
|
os.environ['CUDA_LAUNCH_BLOCKING'] = '1' |
|
torch.manual_seed(0) |
|
random.seed(0) |
|
np.random.seed(0) |
|
if FAKE_H100: |
|
torch.cuda.is_available = lambda: True |
|
torch.cuda.get_device_capability = lambda: [9,0] |
|
import accelerate, safetensors.torch, transformers, tqdm |
|
from _safetensors import WritingSafeTensors |
|
|
|
config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True) |
|
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True) |
|
Model = transformers.AutoModelForCausalLM |
|
if config.model_type == 'deepseek_v3': |
|
|
|
pass |
|
|
|
max_memory = accelerate.utils.get_max_memory() |
|
if not USE_GPU: |
|
max_memory = {'cpu': max_memory['cpu']} |
|
max_memory['cpu'] //= 3 |
|
model_kwparams = dict(pretrained_model_name_or_path=model_id, revision=revision, trust_remote_code=True, |
|
torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True, |
|
device_map=DEVICE_MAP, max_memory=max_memory, offload_state_dict=True, offload_buffers=True) |
|
model = Model.from_pretrained(**model_kwparams) |
|
|
|
if config.model_type == 'deepseek_v3': |
|
model._supports_cache_class = False |
|
|
|
pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer) |
|
|
|
SafeTensors = WritingSafeTensors( |
|
fn, |
|
prompt = prompt, |
|
store_weights = STORE_WEIGHTS, |
|
use_gpu = USE_GPU, |
|
**model_kwparams, |
|
**{ |
|
f'{hw}:{idx}': |
|
str(getattr(torch, hw).get_device_properties(idx)) |
|
for hw in ['npu','mlu','musa','xpu','cuda'] |
|
if hasattr(torch, hw) |
|
for idx in range(getattr(torch,hw).device_count()) |
|
} |
|
) |
|
|
|
IDX = 0 |
|
module_names = {mod:name for name, mod in pipe.model.named_modules()} |
|
tensors = {} |
|
def hook(module, inputs, outputs): |
|
global IDX |
|
name = module_names[module] |
|
HAS_HF_HOOK = hasattr(module, '_hf_hook') |
|
if HAS_HF_HOOK: |
|
inputs = module._hf_hook.pre_forward(module, *inputs) |
|
for idx, input in enumerate(inputs): |
|
if isinstance(input, torch.Tensor): |
|
SafeTensors.add(f'{name}.input.{idx}', input); |
|
if STORE_WEIGHTS and not list(module.children()): |
|
for wtname, wt in list(module.named_parameters()) + list(module.named_buffers()): |
|
SafeTensors.add(f'{name}.{wtname}', wt) |
|
if HAS_HF_HOOK: |
|
outputs = module._hf_hook.post_forward(module, outputs) |
|
if isinstance(outputs, torch.Tensor): |
|
SafeTensors.add(f'{name}.output', outputs); |
|
else: |
|
for idx, output in enumerate(outputs): |
|
if isinstance(output, torch.Tensor): |
|
SafeTensors.add(f'{name}.output.{idx}', output); |
|
IDX += 1 |
|
|
|
for module in pipe.model.modules(): |
|
module.register_forward_hook(hook) |
|
|
|
with SafeTensors: |
|
output = pipe(prompt, do_sample=False, max_new_tokens=1, temperature=1.0, top_p=1.0) |
|
print() |
|
print(output) |
|
|