File size: 3,685 Bytes
937476a
a74d750
937476a
629637c
0b22441
 
 
54fe972
937476a
 
0b22441
cd02cbc
937476a
59b00ec
 
 
 
 
 
 
 
 
 
 
0b22441
 
 
7774d63
0b22441
59b00ec
62b9ba4
 
 
 
 
 
e7df8ec
0b22441
 
 
 
54fe972
0b22441
54fe972
 
e7df8ec
62b9ba4
 
937476a
e7df8ec
62b9ba4
0b22441
 
 
54fe972
 
 
0b22441
 
 
 
 
 
 
 
937476a
d46d5fb
937476a
62b9ba4
937476a
 
 
0b22441
 
 
937476a
 
a74d750
0b22441
 
a74d750
0b22441
 
937476a
a74d750
937476a
 
 
a74d750
937476a
 
 
 
 
0b22441
 
937476a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
#!/usr/bin/env python3
import os, sys

STORE_WEIGHTS = False
FAKE_H100 = False
TORCH_DTYPE = 'float64'
USE_GPU = False
DEVICE_MAP = 'auto'
model_id, revision = sys.argv[1:]
user, model = model_id.split('/')
prompt = 'Once upon a time,'
fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.safetensors'

import torch, numpy as np, random
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:2'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
if FAKE_H100:
    torch.cuda.is_available = lambda: True
    torch.cuda.get_device_capability = lambda: [9,0]
import accelerate, safetensors.torch, transformers, tqdm
from _safetensors import WritingSafeTensors

config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
Model = transformers.AutoModelForCausalLM
if config.model_type == 'deepseek_v3':
    #Model = transformers.DeepseekV3ForCausalLM
    pass

max_memory = accelerate.utils.get_max_memory()
if not USE_GPU:
    max_memory = {'cpu': max_memory['cpu']}
max_memory['cpu'] //= 3
model_kwparams = dict(pretrained_model_name_or_path=model_id, revision=revision, trust_remote_code=True,
    torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
    device_map=DEVICE_MAP, max_memory=max_memory, offload_state_dict=True, offload_buffers=True)
model = Model.from_pretrained(**model_kwparams)

if config.model_type == 'deepseek_v3':
    model._supports_cache_class = False

pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer)

SafeTensors = WritingSafeTensors(
        fn,
        prompt = prompt,
        store_weights = STORE_WEIGHTS,
        use_gpu = USE_GPU,
        **model_kwparams,
        **{
            f'{hw}:{idx}':
                str(getattr(torch, hw).get_device_properties(idx))
            for hw in ['npu','mlu','musa','xpu','cuda']
                if hasattr(torch, hw)
                    for idx in range(getattr(torch,hw).device_count())
        }
    )

IDX = 0 # IDX is unused
module_names = {mod:name for name, mod in pipe.model.named_modules()}
tensors = {}
def hook(module, inputs, outputs):
    global IDX
    name = module_names[module]
    HAS_HF_HOOK = hasattr(module, '_hf_hook')
    if HAS_HF_HOOK:
        inputs = module._hf_hook.pre_forward(module, *inputs)
    for idx, input in enumerate(inputs):
        if isinstance(input, torch.Tensor):
            SafeTensors.add(f'{name}.input.{idx}', input);
    if STORE_WEIGHTS and not list(module.children()):
        for wtname, wt in list(module.named_parameters()) + list(module.named_buffers()):
            SafeTensors.add(f'{name}.{wtname}', wt)
    if HAS_HF_HOOK:
        outputs = module._hf_hook.post_forward(module, outputs)
    if isinstance(outputs, torch.Tensor):
        SafeTensors.add(f'{name}.output', outputs);
    else:
        for idx, output in enumerate(outputs):
            if isinstance(output, torch.Tensor):
                SafeTensors.add(f'{name}.output.{idx}', output);
    IDX += 1

for module in pipe.model.modules():
    module.register_forward_hook(hook)

with SafeTensors:
    output = pipe(prompt, do_sample=False, max_new_tokens=1, temperature=1.0, top_p=1.0)
print()
print(output)