Spaces:
Sleeping
Sleeping
File size: 1,273 Bytes
1eba40c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import streamlit as st
import pickle
import numpy as np
import os, glob, json, sys
import pickle
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
from src import data, utils
from src.embeddings import EmbeddingsRegressor
# load the models
with open('models/2d_ridge_roberta-suicide-regchain-pca-final.pkl', 'rb') as f:
regressor = pickle.load(f)
model_name = 'hackathon-somos-nlp-2023/roberta-base-bne-finetuned-suicide-es'
tokenizer = SentenceTransformer(model_name)
model = EmbeddingsRegressor(tokenizer, regressor, normalize_output=True)
predict = utils.make_predict(model.predict)
# model_selector = st.sidebar.selectbox(
# 'Select model:',
# ['roberta', 'roberta_seq_multi', 'roberta_seq_multi_2']
# )
text_input = st.text_input('Enter your text here:')
if text_input:
prediction = predict([text_input]).tolist()
prediction = np.array(prediction).reshape(-1,4)
prediction = utils.normalize(prediction)
preds_df = data.make_task_labels_from_d(prediction, include_d=True).rename(
columns={c:'d_'+c.replace('+','_').replace('|','_') for c in data.task_d_cols}
)
preds_df['b_label'] = np.clip(preds_df['b_label'], 0, 1)
# show the dataframe
table = st.table(preds_df)
|