AQASH / app.py
victorknox's picture
Update app.py
75f0536
raw
history blame
838 Bytes
import streamlit as st
#from transformers import pipeline
#pipe = pipeline('sentiment-analysis')
#text = st.text_area('enter some text!')
#if text:
# out = pipe(text)
#st.json(out)
from transformers import pipeline
model_name = "deepset/xlm-roberta-large-squad2"
qa_pl = pipeline('question-answering', model=model_name, tokenizer=model_name, device=0)
#predictions = []
# batches might be faster
ctx = st.text_area('Gib context')
q = st.text_area('Gib question')
if context:
result = qa_pl(context=ctx, question=q)
st.json(result["answer"])
#for ctx, q in test_df[["context", "question"]].to_numpy():
# result = qa_pl(context=ctx, question=q)
# predictions.append(result["answer"])
#model = AutoModelForQuestionAnswering.from_pretrained(model_name)
#tokenizer = AutoTokenizer.from_pretrained(model_name)