File size: 1,667 Bytes
1687c0f
 
 
 
a86eedf
f417b2c
 
1687c0f
 
 
f417b2c
1687c0f
 
5bcf8de
 
f417b2c
 
1687c0f
 
 
 
cd8db40
1687c0f
 
 
 
 
cd8db40
 
 
1687c0f
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr 
from transformers import RobertaForQuestionAnswering
from transformers import BertForQuestionAnswering
from transformers import AutoTokenizer
from transformers import pipeline


model1 = RobertaForQuestionAnswering.from_pretrained("pedramyazdipoor/persian_xlm_roberta_large")
tokenizer1 = AutoTokenizer.from_pretrained("pedramyazdipoor/persian_xlm_roberta_large")


roberta_large = pipeline(task='question-answering', model=model1, tokenizer=tokenizer1)

def Q_A(question, context):
    
    answer_pedram = roberta_large({"question":question, "context":context})['answer']
    return answer_pedram



# Create title, description and article strings
title = "Question and answer based on Roberta model develop by nima asl toghiri"
description = "سیستم پردازش زبانی پرسش و پاسخ"
article = "آموزش داده شده با مدل زبانی روبرتا"


demo = gr.Interface(fn=Q_A, # mapping function from input to output
                    inputs=[gr.Textbox(label='پرسش خود را وارد کنید:', show_label=True, text_align='right', lines=2),
                            gr.Textbox(label='متن منبع خود را وارد کنید', show_label=True, text_align='right', lines=8)], # what are the inputs?
                    outputs=gr.Text(show_copy_button=True), # what are the outputs?
                              # our fn has two outputs, therefore we have two outputs
                    # Create examples list from "examples/" directory
                    title=title,
                    description=description,
                    article=article)

# Launch the demo!
demo.launch(share=True)