|
import gradio as gr |
|
import re |
|
import spacy |
|
from transformers import pipeline |
|
|
|
|
|
nlp = spacy.load("en_core_web_sm") |
|
|
|
def preprocess_text(text): |
|
doc = nlp(text.lower()) |
|
tokens = [token.text for token in doc if not token.is_punct] |
|
return tokens |
|
|
|
|
|
qa_model = pipeline("question-answering", model="deepset/xlm-roberta-large-squad2") |
|
|
|
|
|
def answer_question(question, context): |
|
try: |
|
preprocessed_context = preprocess_text(context) |
|
result = qa_model(question=question, context=" ".join(preprocessed_context)) |
|
return result['answer'] |
|
except Exception as e: |
|
return f"Error: {str(e)}" |
|
|
|
|
|
def qa_app(text_file, question): |
|
try: |
|
with open(text_file.name, 'r') as file: |
|
context = file.read() |
|
return answer_question(question, context) |
|
except Exception as e: |
|
return f"Error reading file: {str(e)}" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=qa_app, |
|
inputs=[gr.File(label="Upload your text file"), gr.Textbox(label="Enter your question")], |
|
outputs="text", |
|
title="Multilingual Question Answering", |
|
description="Upload a text file and ask a question based on its content." |
|
) |
|
|
|
|
|
iface.launch() |
|
|
|
|