|
import gradio as gr |
|
from transformers import RobertaForQuestionAnswering |
|
from transformers import BertForQuestionAnswering |
|
from transformers import AutoTokenizer |
|
from transformers import pipeline |
|
|
|
|
|
model1 = RobertaForQuestionAnswering.from_pretrained("pedramyazdipoor/persian_xlm_roberta_large") |
|
tokenizer1 = AutoTokenizer.from_pretrained("pedramyazdipoor/persian_xlm_roberta_large") |
|
|
|
|
|
roberta_large = pipeline(task='question-answering', model=model1, tokenizer=tokenizer1) |
|
|
|
def Q_A(question, context): |
|
|
|
answer_pedram = roberta_large({"question":question, "context":context})['answer'] |
|
return answer_pedram |
|
|
|
|
|
|
|
|
|
title = "Question and answer based on Roberta model develop by nima asl toghiri" |
|
description = "سیستم پردازش زبانی پرسش و پاسخ" |
|
article = "آموزش داده شده با مدل زبانی روبرتا" |
|
|
|
|
|
demo = gr.Interface(fn=Q_A, |
|
inputs=[gr.Textbox(label='پرسش خود را وارد کنید:', show_label=True, text_align='right', lines=2), |
|
gr.Textbox(label='متن منبع خود را وارد کنید', show_label=True, text_align='right', lines=8)], |
|
outputs=gr.Text(show_copy_button=True), |
|
|
|
|
|
title=title, |
|
description=description, |
|
article=article) |
|
|
|
|
|
demo.launch(share=True) |