|
import gradio as gr |
|
import torch |
|
import clip |
|
from PIL import Image |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
model, preprocess = clip.load("ViT-B/32", device=device) |
|
|
|
def process_image_and_text(image, text): |
|
|
|
text = text.split(",") |
|
image = Image.fromarray(image) |
|
image = preprocess(image).unsqueeze(0).to(device) |
|
|
|
text_tokens = clip.tokenize(text).to(device) |
|
|
|
with torch.no_grad(): |
|
image_features = model.encode_image(image) |
|
print(image_features.size()) |
|
text_features = model.encode_text(text_tokens) |
|
|
|
logits_per_image, logits_per_text = model(image, text_tokens) |
|
probs = logits_per_image.softmax(dim=-1) |
|
|
|
return probs.cpu().numpy()[0] |
|
|
|
demo = gr.Interface(fn=process_image_and_text, inputs=['image', 'text'], outputs="text") |
|
demo.launch() |