Spaces:
Sleeping
Sleeping
Commit
·
efae727
1
Parent(s):
355f654
hide flagging option when displaying examples
Browse files- app.py +21 -9
- flagging.py +7 -4
- model_yolov5.py +22 -0
- utils.py +1 -22
app.py
CHANGED
|
@@ -1,14 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import glob
|
| 2 |
import gradio as gr
|
| 3 |
from huggingface_hub import get_token
|
| 4 |
from utils import (
|
| 5 |
-
load_model,
|
| 6 |
load_image_from_url,
|
| 7 |
-
inference,
|
| 8 |
load_badges,
|
| 9 |
FlaggedCounter,
|
| 10 |
)
|
| 11 |
from flagging import myHuggingFaceDatasetSaver
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
TITLE = """
|
|
@@ -69,7 +75,7 @@ with gr.Blocks(theme=theme, css=css) as demo:
|
|
| 69 |
flag = gr.Button("Flag", visible=False)
|
| 70 |
notice = gr.Markdown(value=NOTICE, visible=False)
|
| 71 |
|
| 72 |
-
gr.Examples(
|
| 73 |
examples=glob.glob("examples/*.jpg"),
|
| 74 |
inputs=img_input,
|
| 75 |
outputs=img_output,
|
|
@@ -90,11 +96,17 @@ with gr.Blocks(theme=theme, css=css) as demo:
|
|
| 90 |
)
|
| 91 |
|
| 92 |
# event listeners with decorators
|
| 93 |
-
@img_output.change(
|
| 94 |
-
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
return {
|
| 97 |
-
flag: gr.Button("Flag",
|
| 98 |
notice: gr.Markdown(value=NOTICE, visible=visible),
|
| 99 |
}
|
| 100 |
|
|
@@ -102,7 +114,7 @@ with gr.Blocks(theme=theme, css=css) as demo:
|
|
| 102 |
hf_writer.setup([img_input], "flagged")
|
| 103 |
|
| 104 |
# Sequential logic when flag button is clicked
|
| 105 |
-
flag.click(lambda: gr.Info("Thank you for contributing!")
|
| 106 |
lambda: {flag: gr.Button("Flag", interactive=False)}, [], [flag], show_api=False
|
| 107 |
).then(
|
| 108 |
lambda *args: hf_writer.flag(args),
|
|
@@ -118,4 +130,4 @@ with gr.Blocks(theme=theme, css=css) as demo:
|
|
| 118 |
demo.load(lambda: load_badges(flagged_counter.count()), [], badges, show_api=False)
|
| 119 |
|
| 120 |
if __name__ == "__main__":
|
| 121 |
-
demo.queue().launch(
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Main application for RGB detection demo.
|
| 3 |
+
Any new model should implement the following functions:
|
| 4 |
+
- load_model(model_path, img_size=640)
|
| 5 |
+
- inference(model, image)
|
| 6 |
+
"""
|
| 7 |
+
import os
|
| 8 |
import glob
|
| 9 |
import gradio as gr
|
| 10 |
from huggingface_hub import get_token
|
| 11 |
from utils import (
|
|
|
|
| 12 |
load_image_from_url,
|
|
|
|
| 13 |
load_badges,
|
| 14 |
FlaggedCounter,
|
| 15 |
)
|
| 16 |
from flagging import myHuggingFaceDatasetSaver
|
| 17 |
+
from model_yolov5 import load_model, inference
|
| 18 |
|
| 19 |
|
| 20 |
TITLE = """
|
|
|
|
| 75 |
flag = gr.Button("Flag", visible=False)
|
| 76 |
notice = gr.Markdown(value=NOTICE, visible=False)
|
| 77 |
|
| 78 |
+
examples = gr.Examples(
|
| 79 |
examples=glob.glob("examples/*.jpg"),
|
| 80 |
inputs=img_input,
|
| 81 |
outputs=img_output,
|
|
|
|
| 96 |
)
|
| 97 |
|
| 98 |
# event listeners with decorators
|
| 99 |
+
@img_output.change(
|
| 100 |
+
inputs=[img_input, img_output],
|
| 101 |
+
outputs=[flag, notice],
|
| 102 |
+
show_api=False,
|
| 103 |
+
preprocess=False,
|
| 104 |
+
show_progress="hidden",
|
| 105 |
+
)
|
| 106 |
+
def _show_hide_flagging(_img_input, _img_output):
|
| 107 |
+
visible = _img_output and _img_input["orig_name"] not in os.listdir("examples")
|
| 108 |
return {
|
| 109 |
+
flag: gr.Button("Flag", interactive=True, visible=visible),
|
| 110 |
notice: gr.Markdown(value=NOTICE, visible=visible),
|
| 111 |
}
|
| 112 |
|
|
|
|
| 114 |
hf_writer.setup([img_input], "flagged")
|
| 115 |
|
| 116 |
# Sequential logic when flag button is clicked
|
| 117 |
+
flag.click(lambda: gr.Info("Thank you for contributing!")).then(
|
| 118 |
lambda: {flag: gr.Button("Flag", interactive=False)}, [], [flag], show_api=False
|
| 119 |
).then(
|
| 120 |
lambda *args: hf_writer.flag(args),
|
|
|
|
| 130 |
demo.load(lambda: load_badges(flagged_counter.count()), [], badges, show_api=False)
|
| 131 |
|
| 132 |
if __name__ == "__main__":
|
| 133 |
+
demo.queue().launch()
|
flagging.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import json
|
| 2 |
from collections import OrderedDict
|
| 3 |
from pathlib import Path
|
|
@@ -6,6 +7,7 @@ import gradio as gr
|
|
| 6 |
from gradio.flagging import HuggingFaceDatasetSaver, client_utils
|
| 7 |
import huggingface_hub
|
| 8 |
|
|
|
|
| 9 |
class myHuggingFaceDatasetSaver(HuggingFaceDatasetSaver):
|
| 10 |
"""
|
| 11 |
Custom HuggingFaceDatasetSaver to save images/audio to disk.
|
|
@@ -14,7 +16,7 @@ class myHuggingFaceDatasetSaver(HuggingFaceDatasetSaver):
|
|
| 14 |
|
| 15 |
def __init__(self, *args, **kwargs):
|
| 16 |
super().__init__(*args, **kwargs)
|
| 17 |
-
|
| 18 |
def _deserialize_components(
|
| 19 |
self,
|
| 20 |
data_dir: Path,
|
|
@@ -39,7 +41,7 @@ class myHuggingFaceDatasetSaver(HuggingFaceDatasetSaver):
|
|
| 39 |
save_dir.mkdir(exist_ok=True, parents=True)
|
| 40 |
deserialized = component.flag(sample, save_dir)
|
| 41 |
if isinstance(component, gr.Image) and isinstance(sample, dict):
|
| 42 |
-
deserialized = json.loads(deserialized)[
|
| 43 |
|
| 44 |
# Add deserialized object to row
|
| 45 |
features[label] = {"dtype": "string", "_type": "Value"}
|
|
@@ -58,7 +60,8 @@ class myHuggingFaceDatasetSaver(HuggingFaceDatasetSaver):
|
|
| 58 |
features[label + " file"] = {"_type": _type}
|
| 59 |
break
|
| 60 |
if deserialized:
|
| 61 |
-
path_in_repo = str(
|
|
|
|
| 62 |
Path(deserialized).relative_to(self.dataset_dir)
|
| 63 |
).replace("\\", "/")
|
| 64 |
row.append(
|
|
@@ -74,4 +77,4 @@ class myHuggingFaceDatasetSaver(HuggingFaceDatasetSaver):
|
|
| 74 |
features["username"] = {"dtype": "string", "_type": "Value"}
|
| 75 |
row.append(flag_option)
|
| 76 |
row.append(username)
|
| 77 |
-
return features, row
|
|
|
|
| 1 |
+
import os
|
| 2 |
import json
|
| 3 |
from collections import OrderedDict
|
| 4 |
from pathlib import Path
|
|
|
|
| 7 |
from gradio.flagging import HuggingFaceDatasetSaver, client_utils
|
| 8 |
import huggingface_hub
|
| 9 |
|
| 10 |
+
|
| 11 |
class myHuggingFaceDatasetSaver(HuggingFaceDatasetSaver):
|
| 12 |
"""
|
| 13 |
Custom HuggingFaceDatasetSaver to save images/audio to disk.
|
|
|
|
| 16 |
|
| 17 |
def __init__(self, *args, **kwargs):
|
| 18 |
super().__init__(*args, **kwargs)
|
| 19 |
+
|
| 20 |
def _deserialize_components(
|
| 21 |
self,
|
| 22 |
data_dir: Path,
|
|
|
|
| 41 |
save_dir.mkdir(exist_ok=True, parents=True)
|
| 42 |
deserialized = component.flag(sample, save_dir)
|
| 43 |
if isinstance(component, gr.Image) and isinstance(sample, dict):
|
| 44 |
+
deserialized = json.loads(deserialized)["path"] # dirty hack
|
| 45 |
|
| 46 |
# Add deserialized object to row
|
| 47 |
features[label] = {"dtype": "string", "_type": "Value"}
|
|
|
|
| 60 |
features[label + " file"] = {"_type": _type}
|
| 61 |
break
|
| 62 |
if deserialized:
|
| 63 |
+
path_in_repo = str(
|
| 64 |
+
# returned filepath is absolute, we want it relative to compute URL
|
| 65 |
Path(deserialized).relative_to(self.dataset_dir)
|
| 66 |
).replace("\\", "/")
|
| 67 |
row.append(
|
|
|
|
| 77 |
features["username"] = {"dtype": "string", "_type": "Value"}
|
| 78 |
row.append(flag_option)
|
| 79 |
row.append(username)
|
| 80 |
+
return features, row
|
model_yolov5.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import yolov5
|
| 3 |
+
from yolov5.utils.plots import Annotator, colors
|
| 4 |
+
from huggingface_hub import get_token
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def load_model(model_path, img_size=640):
|
| 8 |
+
"""Load model from HuggingFace Hub."""
|
| 9 |
+
model = yolov5.load(model_path, hf_token=get_token())
|
| 10 |
+
model.img_size = img_size # add img_size attribute
|
| 11 |
+
return model
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def inference(model, image):
|
| 15 |
+
"""Run inference on image and return annotated image."""
|
| 16 |
+
results = model(image, size=model.img_size)
|
| 17 |
+
annotator = Annotator(np.asarray(image))
|
| 18 |
+
for *box, _, cls in reversed(results.pred[0]):
|
| 19 |
+
# label = f'{model.names[int(cls)]} {conf:.2f}'
|
| 20 |
+
# print(f'{cls} {conf:.2f} {box}')
|
| 21 |
+
annotator.box_label(box, "", color=colors(cls, True))
|
| 22 |
+
return annotator.im
|
utils.py
CHANGED
|
@@ -2,22 +2,12 @@ import time
|
|
| 2 |
import requests
|
| 3 |
from io import BytesIO
|
| 4 |
from dataclasses import dataclass
|
| 5 |
-
import numpy as np
|
| 6 |
import pandas as pd
|
| 7 |
from PIL import Image
|
| 8 |
-
import yolov5
|
| 9 |
-
from yolov5.utils.plots import Annotator, colors
|
| 10 |
import gradio as gr
|
| 11 |
from huggingface_hub import get_token
|
| 12 |
|
| 13 |
|
| 14 |
-
def load_model(model_path, img_size=640):
|
| 15 |
-
"""Load model from HuggingFace Hub."""
|
| 16 |
-
model = yolov5.load(model_path, hf_token=get_token())
|
| 17 |
-
model.img_size = img_size # add img_size attribute
|
| 18 |
-
return model
|
| 19 |
-
|
| 20 |
-
|
| 21 |
def load_image_from_url(url):
|
| 22 |
"""Load image from URL."""
|
| 23 |
if not url: # empty or None
|
|
@@ -30,17 +20,6 @@ def load_image_from_url(url):
|
|
| 30 |
return image.convert("RGB")
|
| 31 |
|
| 32 |
|
| 33 |
-
def inference(model, image):
|
| 34 |
-
"""Run inference on image and return annotated image."""
|
| 35 |
-
results = model(image, size=model.img_size)
|
| 36 |
-
annotator = Annotator(np.asarray(image))
|
| 37 |
-
for *box, _, cls in reversed(results.pred[0]):
|
| 38 |
-
# label = f'{model.names[int(cls)]} {conf:.2f}'
|
| 39 |
-
# print(f'{cls} {conf:.2f} {box}')
|
| 40 |
-
annotator.box_label(box, "", color=colors(cls, True))
|
| 41 |
-
return annotator.im
|
| 42 |
-
|
| 43 |
-
|
| 44 |
def load_badges(n):
|
| 45 |
"""Load badges."""
|
| 46 |
return f"""
|
|
@@ -80,7 +59,7 @@ class FlaggedCounter:
|
|
| 80 |
if "error" not in data and data["size"]["dataset"]["num_rows"] > 0:
|
| 81 |
print(f"[{i+1}/{self.trials}] {data}")
|
| 82 |
return data["size"]["dataset"]["num_rows"]
|
| 83 |
-
except
|
| 84 |
pass
|
| 85 |
print(f"[{i+1}/{self.trials}] {data}")
|
| 86 |
time.sleep(5)
|
|
|
|
| 2 |
import requests
|
| 3 |
from io import BytesIO
|
| 4 |
from dataclasses import dataclass
|
|
|
|
| 5 |
import pandas as pd
|
| 6 |
from PIL import Image
|
|
|
|
|
|
|
| 7 |
import gradio as gr
|
| 8 |
from huggingface_hub import get_token
|
| 9 |
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
def load_image_from_url(url):
|
| 12 |
"""Load image from URL."""
|
| 13 |
if not url: # empty or None
|
|
|
|
| 20 |
return image.convert("RGB")
|
| 21 |
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
def load_badges(n):
|
| 24 |
"""Load badges."""
|
| 25 |
return f"""
|
|
|
|
| 59 |
if "error" not in data and data["size"]["dataset"]["num_rows"] > 0:
|
| 60 |
print(f"[{i+1}/{self.trials}] {data}")
|
| 61 |
return data["size"]["dataset"]["num_rows"]
|
| 62 |
+
except requests.exceptions.RequestException:
|
| 63 |
pass
|
| 64 |
print(f"[{i+1}/{self.trials}] {data}")
|
| 65 |
time.sleep(5)
|