sitammeur commited on
Commit
9d07fd4
·
verified ·
1 Parent(s): e2a3844

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -48
app.py CHANGED
@@ -1,48 +1,48 @@
1
- # Necessary imports
2
- import gradio as gr
3
- from src.siglip.classifier import ZeroShotImageClassification
4
-
5
-
6
- # Examples to display in the interface
7
- examples = [
8
- [
9
- "images/baklava.png",
10
- "dessert on a plate, a serving of baklava, a plate and spoon",
11
- ],
12
- [
13
- "images/beignets.png",
14
- "a dog, a cat, a donut, a beignet",
15
- ],
16
- [
17
- "images/cat.png",
18
- "two sleeping cats, two cats playing, three cats laying down",
19
- ],
20
- ]
21
-
22
- # Title and description and article for the interface
23
- title = "Zero Shot Image Classification"
24
- description = "Classify image using zero-shot classification with SigLIP 2 zeroshot model! Provide an image input and a list of candidate labels separated by commas. Read more at the links below."
25
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2502.14786' target='_blank'>SigLIP 2: Multilingual Vision-Language Encoders with Improved Semantic Understanding, Localization, and Dense Features</a> | <a href='https://huggingface.co/google/siglip2-so400m-patch16-naflex' target='_blank'>Model Page</a></p>"
26
-
27
-
28
- # Launch the interface
29
- demo = gr.Interface(
30
- fn=ZeroShotImageClassification,
31
- inputs=[
32
- gr.Image(type="pil", label="Input", placeholder="Enter image to classify"),
33
- gr.Textbox(
34
- label="Candidate Labels",
35
- placeholder="Enter candidate labels separated by commas",
36
- ),
37
- ],
38
- outputs=gr.Label(label="Classification"),
39
- title=title,
40
- description=description,
41
- article=article,
42
- examples=examples,
43
- cache_examples=True,
44
- cache_mode="lazy",
45
- theme="Soft",
46
- flagging_mode="never",
47
- )
48
- demo.launch(debug=False)
 
1
+ # Necessary imports
2
+ import gradio as gr
3
+ from src.siglip.classifier import ZeroShotImageClassification
4
+
5
+
6
+ # Examples to display in the interface
7
+ examples = [
8
+ [
9
+ "images/baklava.png",
10
+ "dessert on a plate, a serving of baklava, a plate and spoon",
11
+ ],
12
+ [
13
+ "images/beignets.png",
14
+ "a dog, a cat, a donut, a beignet",
15
+ ],
16
+ [
17
+ "images/cat.png",
18
+ "two sleeping cats, two cats playing, three cats laying down",
19
+ ],
20
+ ]
21
+
22
+ # Title and description and article for the interface
23
+ title = "Zero Shot Image Classification"
24
+ description = "Classify image using zero-shot classification with SigLIP 2 model! Provide an image input and a list of candidate labels separated by commas. Read more at the links below."
25
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2502.14786' target='_blank'>SigLIP 2: Multilingual Vision-Language Encoders with Improved Semantic Understanding, Localization, and Dense Features</a> | <a href='https://huggingface.co/google/siglip2-so400m-patch16-naflex' target='_blank'>Model Page</a></p>"
26
+
27
+
28
+ # Launch the interface
29
+ demo = gr.Interface(
30
+ fn=ZeroShotImageClassification,
31
+ inputs=[
32
+ gr.Image(type="pil", label="Input", placeholder="Enter image to classify"),
33
+ gr.Textbox(
34
+ label="Candidate Labels",
35
+ placeholder="Enter candidate labels separated by commas",
36
+ ),
37
+ ],
38
+ outputs=gr.Label(label="Classification"),
39
+ title=title,
40
+ description=description,
41
+ article=article,
42
+ examples=examples,
43
+ cache_examples=True,
44
+ cache_mode="lazy",
45
+ theme="Soft",
46
+ flagging_mode="never",
47
+ )
48
+ demo.launch(debug=False)