Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -56,6 +56,7 @@ def siglip_detector(image, texts):
|
|
| 56 |
outputs = siglip_model(**inputs)
|
| 57 |
logits_per_image = outputs.logits_per_image
|
| 58 |
probs = torch.sigmoid(logits_per_image)
|
|
|
|
| 59 |
|
| 60 |
return probs
|
| 61 |
|
|
@@ -81,7 +82,7 @@ def infer(image, labels):
|
|
| 81 |
|
| 82 |
with gr.Blocks() as demo:
|
| 83 |
gr.Markdown("# Compare Multilingual Zero-shot Image Classification")
|
| 84 |
-
gr.Markdown("Compare the performance of SigLIP and othe rmodels on zero-shot classification in this Space
|
| 85 |
with gr.Row():
|
| 86 |
with gr.Column():
|
| 87 |
image_input = gr.Image(type="pil")
|
|
|
|
| 56 |
outputs = siglip_model(**inputs)
|
| 57 |
logits_per_image = outputs.logits_per_image
|
| 58 |
probs = torch.sigmoid(logits_per_image)
|
| 59 |
+
probs = normalize_tensor(probs)
|
| 60 |
|
| 61 |
return probs
|
| 62 |
|
|
|
|
| 82 |
|
| 83 |
with gr.Blocks() as demo:
|
| 84 |
gr.Markdown("# Compare Multilingual Zero-shot Image Classification")
|
| 85 |
+
gr.Markdown("Compare the performance of SigLIP and othe rmodels on zero-shot classification in this Space. Three models are compared: CLIP-ViT, NLLB-CLIP and SigLIP-Multilingual. Note that SigLIP outputs are normalized for visualization purposes.")
|
| 86 |
with gr.Row():
|
| 87 |
with gr.Column():
|
| 88 |
image_input = gr.Image(type="pil")
|