import gradio as gr from PIL import Image import torch from transformers import AutoProcessor # Model and processor initialization DEVICE = "cuda" if torch.cuda.is_available() else "cpu" processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") model = torch.load("../finetunned_blipv2_epoch_5_loss_0.4936.pth").to(DEVICE) model.eval() def caption_image(image: Image.Image) -> str: """ Takes in an image and returns its caption using the trained model. """ image = image.convert("RGB") inputs = processor(images=image, return_tensors="pt").to(DEVICE) pixel_values = inputs.pixel_values with torch.no_grad(): generated_ids = model.generate( pixel_values=pixel_values, max_length=256 ) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] return generated_caption # Gradio interface interface = gr.Interface( fn=caption_image, # function to call inputs=gr.Image(type="pil"), # Image input outputs=gr.Textbox(), # Textbox output title="Image Captioning with BLIP-2 and LoRa", description=("
Welcome to our state-of-the-art image captioning tool!
" "We combine the strengths of the BLIP-2 model with LoRa to provide precise image captions.
" "Our rich dataset has been labeled using multi-modal models. Upload an image to see its caption!