Kathar / app.py
SunilMahi's picture
Update app.py
3a0e699 verified
raw
history blame
1.6 kB
import gradio as gr
from transformers import pipeline
import logging
# Set up logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
# Load the Tapas model
qa_pipeline = pipeline("table-question-answering", model="google/tapas-large-finetuned-wtq")
def ask_table(data, question):
try:
logging.info(f"Received table: {data}, question: {question}")
if not data or not question:
return "Please provide both a table and a question."
processed_table = [{"col_" + str(i): val for i, val in enumerate(row)} for row in data]
answers = qa_pipeline(table=processed_table, query=question)
logging.info(f"Answer: {answers}")
return answers.get("answer", "No answer found.")
except Exception as e:
logging.error(f"Error: {str(e)}")
return f"Error processing your request: {str(e)}"
# Define Gradio interface
iface = gr.Interface(
fn=ask_table,
inputs=[
gr.Dataframe(
headers=["Column 1", "Column 2", "Column 3"],
row_count=(2, "dynamic"),
col_count=(2, "dynamic"),
type="array",
label="Input Table"
),
gr.Textbox(
lines=2,
placeholder="Enter your question about the table here...",
label="Ask a Question"
)
],
outputs="text",
title="Table Question Answering",
description="Provide a table with up to 50 rows and 20 columns. Ask clear and concise questions for best results.",
)
# Launch Gradio app
iface.launch(share=True)