Ngit commited on
Commit
a869d2e
·
1 Parent(s): 809757b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -5,7 +5,7 @@ language:
5
 
6
  # Text Classification GoEmotions
7
 
8
- This model is a fined-tuned version of [nreimers/MiniLMv2-L6-H384-distilled-from-BERT-Large](https://huggingface.co/nreimers/MiniLMv2-L6-H384-distilled-from-BERT-Large) on the on the [Jigsaw 1st Kaggle competition](https://www.kaggle.com/competitions/jigsaw-toxic-comment-classification-challenge) dataset using [unitary/toxic-bert](https://huggingface.co/unitary/toxic-bert) as teacher model.
9
 
10
  # Load the Model
11
 
@@ -18,9 +18,9 @@ from tokenizers import Tokenizer
18
  from onnxruntime import InferenceSession
19
 
20
 
21
- # !git clone https://huggingface.co/Ngit/MiniLM-L6-toxic-all-labels
22
 
23
- model_name = "Ngit/MiniLM-L6-toxic-all-labels"
24
  tokenizer = Tokenizer.from_pretrained(model_name)
25
  tokenizer.enable_padding(
26
  pad_token="<pad>",
@@ -31,7 +31,7 @@ batch_size = 16
31
 
32
  texts = ["This is pure trash",]
33
  outputs = []
34
- model = InferenceSession("MiniLM-L6-toxic-all-labels-onnx/model_optimized.onnx", providers=['CUDAExecutionProvider'])
35
 
36
  with open(os.path.join("MiniLM-L6-toxic-all-labels-onnx", "config.json"), "r") as f:
37
  config = json.load(f)
 
5
 
6
  # Text Classification GoEmotions
7
 
8
+ This is a quantized onnx model and is a fined-tuned version of [nreimers/MiniLMv2-L6-H384-distilled-from-BERT-Large](https://huggingface.co/nreimers/MiniLMv2-L6-H384-distilled-from-BERT-Large) on the on the [Jigsaw 1st Kaggle competition](https://www.kaggle.com/competitions/jigsaw-toxic-comment-classification-challenge) dataset using [unitary/toxic-bert](https://huggingface.co/unitary/toxic-bert) as teacher model.
9
 
10
  # Load the Model
11
 
 
18
  from onnxruntime import InferenceSession
19
 
20
 
21
+ # !git clone https://huggingface.co/Ngit/MiniLM-L6-toxic-all-labels-onnx
22
 
23
+ model_name = "Ngit/MiniLM-L6-toxic-all-labels-onnx"
24
  tokenizer = Tokenizer.from_pretrained(model_name)
25
  tokenizer.enable_padding(
26
  pad_token="<pad>",
 
31
 
32
  texts = ["This is pure trash",]
33
  outputs = []
34
+ model = InferenceSession("MiniLM-L6-toxic-all-labels-onnx/model_optimized_quantized.onnx", providers=['CUDAExecutionProvider'])
35
 
36
  with open(os.path.join("MiniLM-L6-toxic-all-labels-onnx", "config.json"), "r") as f:
37
  config = json.load(f)