karl commited on
Commit
59b00ec
·
1 Parent(s): 4aa812d

trying to increase accuracy and determinism

Browse files
Files changed (1) hide show
  1. run_test.py +14 -3
run_test.py CHANGED
@@ -1,19 +1,30 @@
1
  #!/usr/bin/env python3
2
  import os, sys
3
- import accelerate, safetensors.torch, transformers, torch, tqdm
4
 
5
  model_id, revision = sys.argv[1:]
6
  user, model = model_id.split('/')
7
-
8
  fn = f'{user}_{model}_{revision}.logits.safetensors'
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
11
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
12
  Model = transformers.AutoModelForCausalLM
13
  if config.model_type == 'deepseek_v3':
14
  #Model = transformers.DeepseekV3ForCausalLM
15
  pass
16
- model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True, torch_dtype='auto', device_map='cpu')
17
  if config.model_type == 'deepseek_v3':
18
  model._supports_cache_class = False
19
  model = accelerate.cpu_offload(model, 'cuda:0', offload_buffers=True)
 
1
  #!/usr/bin/env python3
2
  import os, sys
 
3
 
4
  model_id, revision = sys.argv[1:]
5
  user, model = model_id.split('/')
 
6
  fn = f'{user}_{model}_{revision}.logits.safetensors'
7
 
8
+ import torch, numpy as np, random
9
+ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
10
+ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
11
+ torch.backends.cudnn.allow_tf32 = False
12
+ torch.backends.cudnn.benchmark = False
13
+ torch.use_deterministic_algorithms(True)
14
+ os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:2'
15
+ os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
16
+ torch.manual_seed(0)
17
+ random.seed(0)
18
+ np.random.seed(0)
19
+ import accelerate, safetensors.torch, transformers, torch, tqdm
20
+
21
  config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
22
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
23
  Model = transformers.AutoModelForCausalLM
24
  if config.model_type == 'deepseek_v3':
25
  #Model = transformers.DeepseekV3ForCausalLM
26
  pass
27
+ model = Model.from_pretrained(model_id, revision=revision, trust_remote_code=True, torch_dtype=torch.float64, use_safetensors=True)
28
  if config.model_type == 'deepseek_v3':
29
  model._supports_cache_class = False
30
  model = accelerate.cpu_offload(model, 'cuda:0', offload_buffers=True)