Medissa commited on
Commit
4d4a47e
·
verified ·
1 Parent(s): 3804c64

Update tasks/text.py

Browse files
Files changed (1) hide show
  1. tasks/text.py +3 -4
tasks/text.py CHANGED
@@ -100,12 +100,13 @@ async def evaluate_text(request: TextEvaluationRequest):
100
  tokenizer = AutoTokenizer.from_pretrained(BINARY_MODEL)
101
  print('Loaded Tokenizer')
102
  model = AutoModelForSequenceClassification.from_pretrained(BINARY_MODEL)
103
- print('Loaded Model')
104
  model.to(device)
105
  model.eval()
106
-
107
  predictions = []
108
  for i,text in tqdm(enumerate(test_dataset["quote"])):
 
109
  with torch.no_grad():
110
  tokenized_text = tokenizer(text, truncation=True, padding='max_length', return_tensors = "pt")
111
  inputt = {k:v.to(device) for k,v in tokenized_text.items()}
@@ -116,8 +117,6 @@ async def evaluate_text(request: TextEvaluationRequest):
116
 
117
  prediction = "0_not_relevant" if binary_prediction==0 else 1
118
  predictions.append(prediction)
119
- if i%10:
120
- print(f'iteration: {i}')
121
 
122
  gc.collect()
123
 
 
100
  tokenizer = AutoTokenizer.from_pretrained(BINARY_MODEL)
101
  print('Loaded Tokenizer')
102
  model = AutoModelForSequenceClassification.from_pretrained(BINARY_MODEL)
103
+ print(device)
104
  model.to(device)
105
  model.eval()
106
+ print('Loaded Model')
107
  predictions = []
108
  for i,text in tqdm(enumerate(test_dataset["quote"])):
109
+ print(i)
110
  with torch.no_grad():
111
  tokenized_text = tokenizer(text, truncation=True, padding='max_length', return_tensors = "pt")
112
  inputt = {k:v.to(device) for k,v in tokenized_text.items()}
 
117
 
118
  prediction = "0_not_relevant" if binary_prediction==0 else 1
119
  predictions.append(prediction)
 
 
120
 
121
  gc.collect()
122