Your Name commited on
Commit
e7df8ec
·
1 Parent(s): 7774d63

float64: meta-llama_Llama-3.1-405B_b906e4dc842aa489c962f9db26554dcfdde901fe

Browse files

i updated run_test.py to generate this, but kept modifying it for the
next one i am trying and it is presently in further flux. pending commit.

meta-llama_Llama-3.1-405B_b906e4dc842aa489c962f9db26554dcfdde901fe.logits.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b890bc71f9c3441fa80df948922592544d69710f23e42d89f6d0855311987d7a
3
- size 852994056
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9903b0666a60a6c8af94cedda9fb452ed5384767f5f7c91188e63a2ac11a7669
3
+ size 3410907536
run_test.py CHANGED
@@ -25,12 +25,15 @@ Model = transformers.AutoModelForCausalLM
25
  if config.model_type == 'deepseek_v3':
26
  #Model = transformers.DeepseekV3ForCausalLM
27
  pass
28
- model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True, torch_dtype=torch.float64, use_safetensors=True, low_cpu_mem_usage=True)
 
 
 
29
  if config.model_type == 'deepseek_v3':
30
  model._supports_cache_class = False
31
  #model = accelerate.cpu_offload(model, 'cuda:0', offload_buffers=True)
32
 
33
- pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer, device='cpu')
34
 
35
  tensors = {}
36
  def store_tensor(descr, tensor):
 
25
  if config.model_type == 'deepseek_v3':
26
  #Model = transformers.DeepseekV3ForCausalLM
27
  pass
28
+
29
+ model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True, torch_dtype=torch.float64, use_safetensors=True, low_cpu_mem_usage=True,
30
+ device_map='auto', max_memory={'cpu':accelerate.utils.get_max_memory()['cpu']}, offload_state_dict=True, offload_buffers=True)
31
+
32
  if config.model_type == 'deepseek_v3':
33
  model._supports_cache_class = False
34
  #model = accelerate.cpu_offload(model, 'cuda:0', offload_buffers=True)
35
 
36
+ pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer)
37
 
38
  tensors = {}
39
  def store_tensor(descr, tensor):