|
import gradio as gr |
|
from PIL import Image |
|
import torch |
|
from transformers import AutoProcessor |
|
|
|
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") |
|
model = torch.load("../finetunned_blipv2_epoch_5_loss_0.4936.pth").to(DEVICE) |
|
model.eval() |
|
|
|
|
|
def caption_image(image: Image.Image) -> str: |
|
""" |
|
Takes in an image and returns its caption using the trained model. |
|
""" |
|
image = image.convert("RGB") |
|
inputs = processor(images=image, return_tensors="pt").to(DEVICE) |
|
pixel_values = inputs.pixel_values |
|
|
|
with torch.no_grad(): |
|
generated_ids = model.generate( |
|
pixel_values=pixel_values, |
|
max_length=256 |
|
) |
|
|
|
generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] |
|
return generated_caption |
|
|
|
|
|
|
|
interface = gr.Interface( |
|
fn=caption_image, |
|
inputs=gr.Image(type="pil"), |
|
outputs=gr.Textbox(), |
|
title="Image Captioning with BLIP-2 and LoRa", |
|
description=("<div style='text-align: center; padding: 10px; border: 2px solid #FFC107; border-radius: 10px;'>" |
|
"<p>Welcome to our <strong>state-of-the-art</strong> image captioning tool!</p>" |
|
"<p>We combine the strengths of the <em>BLIP-2</em> model with <em>LoRa</em> to provide precise image captions.</p>" |
|
"<p>Our rich dataset has been labeled using multi-modal models. Upload an image to see its caption!</p></div>"), |
|
article=("<div style='text-align: center; padding: 10px; background-color: #E3F2FD; border-radius: 10px;'>" |
|
"<a href='https://diegobonilla98.github.io/PixLore/' style='color: #1976D2; font-weight: bold;'>GitHub Project</a></div>"), |
|
live=True, |
|
layout="vertical" |
|
) |
|
|
|
|
|
if __name__ == '__main__': |
|
interface.launch() |
|
|