mgoin commited on
Commit
0e96445
·
verified ·
1 Parent(s): b1220d3

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +33 -0
README.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - fp8
4
+ ---
5
+
6
+ # Qwen2-72B-Instruct-FP8
7
+
8
+ Quantized with [AutoFP8](https://github.com/neuralmagic/autofp8) using the following script on 8xA100:
9
+
10
+ ```python
11
+ from datasets import load_dataset
12
+ from transformers import AutoTokenizer
13
+
14
+ from auto_fp8 import AutoFP8ForCausalLM, BaseQuantizeConfig
15
+
16
+ pretrained_model_dir = "Qwen/Qwen2-72B-Instruct"
17
+ quantized_model_dir = "Qwen2-72B-Instruct-FP8"
18
+
19
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
20
+ tokenizer.pad_token = tokenizer.eos_token
21
+
22
+ ds = load_dataset("mgoin/ultrachat_2k", split="train_sft").select(range(512))
23
+ examples = [tokenizer.apply_chat_template(batch["messages"], tokenize=False) for batch in ds]
24
+ examples = tokenizer(examples, padding=True, truncation=True, return_tensors="pt").to("cuda")
25
+
26
+ quantize_config = BaseQuantizeConfig(quant_method="fp8", activation_scheme="static")
27
+
28
+ model = AutoFP8ForCausalLM.from_pretrained(
29
+ pretrained_model_dir, quantize_config=quantize_config
30
+ )
31
+ model.quantize(examples)
32
+ model.save_quantized(quantized_model_dir)
33
+ ```