|
import os |
|
import torch |
|
import argparse |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import matplotlib.pyplot as plt |
|
|
|
from PIL import Image |
|
from model import FoundModel |
|
from misc import load_config |
|
from torchvision import transforms as T |
|
|
|
import gradio as gr |
|
|
|
NORMALIZE = T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) |
|
CACHE = True |
|
|
|
def blend_images(bg, fg, alpha=0.5): |
|
bg = bg.convert('RGBA') |
|
fg = fg.convert('RGBA') |
|
blended = Image.blend(bg, fg, alpha=alpha) |
|
|
|
return blended |
|
|
|
|
|
def predict(img_input): |
|
|
|
config = "configs/found_DUTS-TR.yaml" |
|
model_weights = "data/weights/decoder_weights.pt" |
|
|
|
|
|
config = load_config(config) |
|
|
|
|
|
|
|
model = FoundModel(vit_model=config.model["pre_training"], |
|
vit_arch=config.model["arch"], |
|
vit_patch_size=config.model["patch_size"], |
|
enc_type_feats=config.found["feats"], |
|
bkg_type_feats=config.found["feats"], |
|
bkg_th=config.found["bkg_th"]) |
|
|
|
model.decoder_load_weights(model_weights) |
|
model.eval() |
|
print(f"Model {model_weights} loaded correctly.") |
|
|
|
|
|
img_pil = Image.open(img_input) |
|
img = img_pil.convert("RGB") |
|
|
|
t = T.Compose([T.ToTensor(), NORMALIZE]) |
|
img_t = t(img)[None,:,:,:] |
|
inputs = img_t |
|
|
|
|
|
with torch.no_grad(): |
|
preds, _, _, _ = model.forward_step(inputs, for_eval=True) |
|
|
|
|
|
sigmoid = nn.Sigmoid() |
|
h, w = img_t.shape[-2:] |
|
preds_up = F.interpolate( |
|
preds, scale_factor=model.vit_patch_size, mode="bilinear", align_corners=False |
|
)[..., :h, :w] |
|
preds_up = ( |
|
(sigmoid(preds_up.detach()) > 0.5).squeeze(0).float() |
|
) |
|
|
|
return blend_images(img_pil, T.ToPILImage()(preds_up)) |
|
|
|
|
|
title = 'FOUND - unsupervised object localization' |
|
description = 'Gradio Demo for our CVPR23 paper "Unsupervised Object Localization: Observing the Background to Discover Objects"\n \ |
|
The app is <i>running on CPUs</i>, inference times are therefore longer than those expected on GPU (80 FPS on a V100 GPU).\n \ |
|
Please see below for more details.' |
|
|
|
article = """ |
|
<h1 align="center">Unsupervised Object Localization: Observing the Background to Discover Objects</h1> |
|
|
|
## Highlights |
|
- Single **conv 1 x 1** layer trained to extract information from DINO [1] features. |
|
- **No supervision**. |
|
- Trained only for **2 epochs** on the dataset DUTS-TR. |
|
- Inference runs at **80 FPS** on a V100 GPU. |
|
- No post-processing applied in results here. |
|
|
|
<i> Images provided are taken from VOC07 [2], ECSSD [3] and DUT-OMRON [4].</i> |
|
|
|
## Citation |
|
``` |
|
@inproceedings{simeoni2023found, |
|
author = {Siméoni, Oriane and Sekkat, Chloé and Puy, Gilles and Vobecky, Antonin and Zablocki, Éloi and Pérez, Patrick}, |
|
title = {Unsupervised Object Localization: Observing the Background to Discover Objects}, |
|
booktitle = {{IEEE} Conference on Computer Vision and Pattern Recognition, {CVPR}}, |
|
year = {2023}, |
|
} |
|
``` |
|
|
|
### References |
|
|
|
[1] M. Caron et al. Emerging properties in self-supervised vision transformers, ICCV 2021 |
|
[2] M. Everingham et al. The PASCAL Visual Object Classes Challenge 2007 (VOC2007) Results |
|
[3] J. Shi et al. Hierarchical image saliency detection on extended CSSD, IEEE TPAMI 2016 |
|
[4] C. Yang et al. Saliency detection via graph-based manifold ranking, CVPR 2013 |
|
|
|
""" |
|
|
|
examples = ["data/examples/VOC_000030.jpg", |
|
"data/examples/ECSSD_0010.png", |
|
"data/examples/VOC07_000038.jpg", |
|
"data/examples/VOC07_000075.jpg", |
|
"data/examples/DUT-OMRON_im103.png", |
|
] |
|
|
|
|
|
iface = gr.Interface(fn=predict, |
|
title=title, |
|
description=description, |
|
article=article, |
|
inputs=gr.Image(type='filepath'), |
|
outputs=gr.Image(label="Unsupervised object localization", type="pil"), |
|
examples=examples, |
|
cache_examples=CACHE |
|
) |
|
|
|
iface.launch(show_error=True, |
|
enable_queue=True, |
|
inline=True, |
|
) |