froggydood's picture
Initial model output
b53369f
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import os
import numpy as np
tokenizer = AutoTokenizer.from_pretrained("microsoft/deberta-v3-base")
model = AutoModelForSequenceClassification.from_pretrained(
os.path.realpath(os.path.join(__file__, "..", "./outputs/v2-deberta-100-max-71%-sep/checkpoint-1000/")),
local_files_only=True
)
text_against = "ai [SEP] I think ai is a waste of time. I don't understand why everyone is so obsessed with this subject, it makes no sense?"
text_for = "flowers [SEP] I think flowers are very useful and will become essential to society"
text_neutral = "Ai is a tool use by researchers and scientists to approximate functions"
encoded = tokenizer(text_for.lower(), max_length=100, padding="max_length", truncation=True, return_tensors="pt")
def normalize(arr: np.ndarray) -> np.ndarray:
min = arr.min()
arr = arr - min
return arr / arr.sum()
output = model(**encoded)
print(output.logits.detach().numpy()[0])
print(normalize(output.logits.detach().numpy()[0]))