import gradio as gr | |
from transformers import pipeline | |
pipe = pipeline("image-to-text", | |
model="Salesforce/blip-image-captioning-base") | |
def captioner(input): | |
out = pipe(input) | |
return out[0]['generated_text'] | |
# iface = gr.Interface(captioner, | |
# inputs=gr.Image(type='pil'), | |
# outputs="text") | |
# iface.launch() | |
demo = gr.Interface(fn=captioner, | |
inputs=[gr.Image(label="Upload image", type="pil")], | |
outputs=[gr.Textbox(label="Caption")], | |
title="Image Captioning with BLIP", | |
description="Caption any image using the BLIP model", | |
allow_flagging="never", | |
examples=["christmas_dog.jpg"]) | |
demo.launch() |