Medissa commited on
Commit
a99fef1
·
verified ·
1 Parent(s): aacb60a

Update tasks/text.py

Browse files
Files changed (1) hide show
  1. tasks/text.py +9 -9
tasks/text.py CHANGED
@@ -95,12 +95,12 @@ async def evaluate_text(request: TextEvaluationRequest):
95
  # YOUR MODEL INFERENCE CODE HERE
96
  # Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked.
97
  #--------------------------------------------------------------------------------------------
98
- print('Start Binary')
99
  # Binary Model
100
- tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_BINARY)
101
  print('Loaded Tokenizer')
102
  model = AutoModelForSequenceClassification.from_pretrained(BINARY_MODEL)
103
- print('Loaded Model')
104
  model.to(device)
105
  model.eval()
106
 
@@ -117,18 +117,18 @@ async def evaluate_text(request: TextEvaluationRequest):
117
  prediction = "0_not_relevant" if binary_prediction==0 else 1
118
  predictions.append(prediction)
119
  if i%10:
120
- print(f'iteration: {i}')
121
 
122
  gc.collect()
123
 
124
  ## 2. Taxonomy Model
125
- print('Start Multi')
126
- tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_MULTI_CLASS)
127
- print('Loaded Tokenizer')
128
  model = AutoModelForSequenceClassification.from_pretrained(MULTI_CLASS_MODEL)
129
  model.to(device)
130
  model.eval()
131
- print('Loaded Model')
132
  for i,text in tqdm(enumerate(test_dataset["quote"])):
133
  if isinstance(predictions[i], str):
134
  continue
@@ -142,7 +142,7 @@ async def evaluate_text(request: TextEvaluationRequest):
142
  prediction = ID2LABEL[taxonomy_prediction]
143
  predictions[i] = prediction
144
  if i%10:
145
- print(f'iteration: {i}')
146
  predictions = [LABEL_MAPPING[pred] for pred in predictions]
147
  #--------------------------------------------------------------------------------------------
148
  # YOUR MODEL INFERENCE STOPS HERE
 
95
  # YOUR MODEL INFERENCE CODE HERE
96
  # Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked.
97
  #--------------------------------------------------------------------------------------------
98
+ logger.info('Start Binary')
99
  # Binary Model
100
+ tokenizer = AutoTokenizer.from_pretrained(BINARY_MODEL)
101
  print('Loaded Tokenizer')
102
  model = AutoModelForSequenceClassification.from_pretrained(BINARY_MODEL)
103
+ logger.info('Loaded Model')
104
  model.to(device)
105
  model.eval()
106
 
 
117
  prediction = "0_not_relevant" if binary_prediction==0 else 1
118
  predictions.append(prediction)
119
  if i%10:
120
+ logger.info(f'iteration: {i}')
121
 
122
  gc.collect()
123
 
124
  ## 2. Taxonomy Model
125
+ logger.info('Start Multi')
126
+ tokenizer = AutoTokenizer.from_pretrained(MULTI_CLASS_MODEL)
127
+ logger.info('Loaded Tokenizer')
128
  model = AutoModelForSequenceClassification.from_pretrained(MULTI_CLASS_MODEL)
129
  model.to(device)
130
  model.eval()
131
+ logger.info('Loaded Model')
132
  for i,text in tqdm(enumerate(test_dataset["quote"])):
133
  if isinstance(predictions[i], str):
134
  continue
 
142
  prediction = ID2LABEL[taxonomy_prediction]
143
  predictions[i] = prediction
144
  if i%10:
145
+ logger.info(f'iteration: {i}')
146
  predictions = [LABEL_MAPPING[pred] for pred in predictions]
147
  #--------------------------------------------------------------------------------------------
148
  # YOUR MODEL INFERENCE STOPS HERE