LG-AI-EXAONE commited on
Commit
169cd5b
·
1 Parent(s): 76c60f1

Fix quantized weights

Browse files
Files changed (2) hide show
  1. config.json +3 -4
  2. model.safetensors +2 -2
config.json CHANGED
@@ -27,8 +27,7 @@
27
  "bits": 4,
28
  "group_size": 128,
29
  "modules_to_not_convert": [
30
- "lm_head",
31
- "gate"
32
  ],
33
  "quant_method": "awq",
34
  "version": "gemm",
@@ -43,8 +42,8 @@
43
  },
44
  "rope_theta": 1000000,
45
  "tie_word_embeddings": true,
46
- "torch_dtype": "bfloat16",
47
- "transformers_version": "4.48.3",
48
  "use_cache": false,
49
  "vocab_size": 102400
50
  }
 
27
  "bits": 4,
28
  "group_size": 128,
29
  "modules_to_not_convert": [
30
+ "lm_head"
 
31
  ],
32
  "quant_method": "awq",
33
  "version": "gemm",
 
42
  },
43
  "rope_theta": 1000000,
44
  "tie_word_embeddings": true,
45
+ "torch_dtype": "float16",
46
+ "transformers_version": "4.43.1",
47
  "use_cache": false,
48
  "vocab_size": 102400
49
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a68cb9c85c1d871b1b384938fe178b270c7498bff79ebcf64962c24e0cb6944b
3
- size 1638048680
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:736fa24988823b07e43792d1eb010b6ee3d1aefaddb2dbeaac6a3be9d7c9817a
3
+ size 1638048576