|
import gradio as gr |
|
from transformers import pipeline |
|
import logging |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") |
|
|
|
|
|
qa_pipeline = pipeline("table-question-answering", model="google/tapas-large-finetuned-wtq") |
|
|
|
def ask_table(data, question): |
|
try: |
|
logging.info(f"Received table: {data}, question: {question}") |
|
if not data or not question: |
|
return "Please provide both a table and a question." |
|
processed_table = [{"col_" + str(i): val for i, val in enumerate(row)} for row in data] |
|
answers = qa_pipeline(table=processed_table, query=question) |
|
logging.info(f"Answer: {answers}") |
|
return answers.get("answer", "No answer found.") |
|
except Exception as e: |
|
logging.error(f"Error: {str(e)}") |
|
return f"Error processing your request: {str(e)}" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=ask_table, |
|
inputs=[ |
|
gr.Dataframe( |
|
headers=["Column 1", "Column 2", "Column 3"], |
|
row_count=(2, "dynamic"), |
|
col_count=(2, "dynamic"), |
|
type="array", |
|
label="Input Table" |
|
), |
|
gr.Textbox( |
|
lines=2, |
|
placeholder="Enter your question about the table here...", |
|
label="Ask a Question" |
|
) |
|
], |
|
outputs="text", |
|
title="Table Question Answering", |
|
description="Provide a table with up to 50 rows and 20 columns. Ask clear and concise questions for best results.", |
|
) |
|
|
|
|
|
iface.launch(share=True) |