Update README.md
Browse files
README.md
CHANGED
@@ -26,5 +26,25 @@ See the paper [Tomayto, Tomahto. Beyond Token-level Answer Equivalence for Quest
|
|
26 |
|
27 |
# Example use
|
28 |
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
|
|
26 |
|
27 |
# Example use
|
28 |
|
29 |
+
```python
|
30 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
31 |
+
from torch.nn import functional as F
|
32 |
+
|
33 |
+
tokenizer = AutoTokenizer.from_pretrained("kortukov/answer-equivalence-bem")
|
34 |
+
model = AutoModelForSequenceClassification.from_pretrained("kortukov/answer-equivalence-bem")
|
35 |
+
|
36 |
+
question = "What does Ban Bossy encourage?"
|
37 |
+
reference = "leadership in girls"
|
38 |
+
candidate = "positions of power"
|
39 |
+
|
40 |
+
def tokenize_function(question, reference, candidate):
|
41 |
+
text = f"[CLS] {candidate} [SEP]"
|
42 |
+
text_pair = f"{reference} [SEP] {question} [SEP]"
|
43 |
+
return tokenizer(text=text, text_pair=text_pair, add_special_tokens=False, padding='max_length', truncation=True, return_tensors='pt')
|
44 |
+
|
45 |
+
inputs = tokenize_function(question, reference, candidate)
|
46 |
+
out = model(**inputs)
|
47 |
+
|
48 |
+
prediction = F.softmax(out.logits, dim=-1).argmax().item()
|
49 |
+
```
|
50 |
|