Update app.py
Browse files
app.py
CHANGED
@@ -93,7 +93,7 @@ model = OCRVQAModel({
|
|
93 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
94 |
model = model.to(device)
|
95 |
|
96 |
-
def get_answer(image, question) -> str:
|
97 |
global model, device
|
98 |
|
99 |
result = model.inference(image, question, device)
|
@@ -112,27 +112,17 @@ with gr.Blocks() as demo:
|
|
112 |
"""
|
113 |
)
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
print("Loading image from URL...")
|
119 |
-
image = load_image_from_URL(image_url)
|
120 |
-
else:
|
121 |
-
# Or upload from your computer
|
122 |
-
print("Loading uploaded image...")
|
123 |
-
image = gr.Image(shape=(224, 224), type="pil")
|
124 |
-
# image = transforms.ToTensor()(image)
|
125 |
-
# image = transforms.ToPILImage()(image)
|
126 |
-
|
127 |
-
print(type(image))
|
128 |
|
129 |
-
|
|
|
130 |
|
131 |
with gr.Column():
|
132 |
-
|
133 |
answer = gr.Label(label="Answer")
|
134 |
-
ask = gr.Button(label="Get the answer")
|
135 |
|
136 |
-
ask.click(get_answer, inputs=[image, question], outputs=[answer])
|
137 |
|
138 |
-
demo.launch()
|
|
|
93 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
94 |
model = model.to(device)
|
95 |
|
96 |
+
def get_answer(image, url, question) -> str:
|
97 |
global model, device
|
98 |
|
99 |
result = model.inference(image, question, device)
|
|
|
112 |
"""
|
113 |
)
|
114 |
|
115 |
+
image = gr.Image(shape=(224, 224), type="pil")
|
116 |
+
image_url = gr.Textbox(lines=1, label="Image URL", placeholder="Or, paste the image URL here")
|
117 |
+
question = gr.Textbox(lines=5, label="Question")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
+
with gr.Column():
|
120 |
+
ask = gr.Button(label="Get the answer")
|
121 |
|
122 |
with gr.Column():
|
123 |
+
|
124 |
answer = gr.Label(label="Answer")
|
|
|
125 |
|
126 |
+
ask.click(get_answer, inputs=[image, image_url, question], outputs=[answer])
|
127 |
|
128 |
+
demo.launch(shared = True)
|