altndrr commited on
Commit
04dd7ac
·
1 Parent(s): 480bc12

Update interaction code

Browse files
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -43,7 +43,7 @@ MODEL = AutoModel.from_pretrained("altndrr/cased", trust_remote_code=True).to(DE
43
  PROCESSOR = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
44
 
45
 
46
- def save_original_image(image: gr.Image):
47
  if image is None:
48
  return None, None
49
 
@@ -51,13 +51,6 @@ def save_original_image(image: gr.Image):
51
  size = min(size) if isinstance(size, tuple) else size
52
  image = resize(image, size)
53
 
54
- return image, image.copy()
55
-
56
-
57
- def prepare_image(image: gr.Image):
58
- if image is None:
59
- return None, None
60
-
61
  PROCESSOR.image_processor.do_normalize = False
62
  image_tensor = PROCESSOR(images=[image], return_tensors="pt", padding=True)
63
  PROCESSOR.image_processor.do_normalize = True
@@ -87,9 +80,8 @@ with gr.Blocks(analytics_enabled=True, title=PAPER_TITLE, theme="soft") as demo:
87
  gr.Markdown(MARKDOWN_DESCRIPTION)
88
  with gr.Row():
89
  with gr.Column():
90
- curr_image = gr.Image(label="input", type="pil")
91
- _orig_image = gr.Image(
92
- label="orig. image", type="pil", visible=False, interactive=False
93
  )
94
  alpha_slider = gr.Slider(0.0, 1.0, value=0.7, step=0.1, label="alpha")
95
  with gr.Row():
@@ -97,9 +89,13 @@ with gr.Blocks(analytics_enabled=True, title=PAPER_TITLE, theme="soft") as demo:
97
  run_button = gr.Button(value="Submit", variant="primary")
98
  with gr.Column():
99
  output_label = gr.Label(label="output", num_top_classes=5)
 
 
 
 
100
  examples = gr.Examples(
101
  examples=glob(os.path.join(os.path.dirname(__file__), "examples", "*.jpg")),
102
- inputs=[_orig_image],
103
  outputs=[output_label],
104
  fn=image_inference,
105
  cache_examples=True,
@@ -108,10 +104,10 @@ with gr.Blocks(analytics_enabled=True, title=PAPER_TITLE, theme="soft") as demo:
108
 
109
  # INTERACTIONS
110
  # - change
111
- _orig_image.change(prepare_image, [_orig_image], [curr_image, _orig_image])
112
 
113
  # - upload
114
- curr_image.upload(save_original_image, [curr_image], [curr_image, _orig_image])
115
  curr_image.upload(lambda: None, [], [output_label])
116
 
117
  # - clear
 
43
  PROCESSOR = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
44
 
45
 
46
+ def image_preprocess(image: gr.Image):
47
  if image is None:
48
  return None, None
49
 
 
51
  size = min(size) if isinstance(size, tuple) else size
52
  image = resize(image, size)
53
 
 
 
 
 
 
 
 
54
  PROCESSOR.image_processor.do_normalize = False
55
  image_tensor = PROCESSOR(images=[image], return_tensors="pt", padding=True)
56
  PROCESSOR.image_processor.do_normalize = True
 
80
  gr.Markdown(MARKDOWN_DESCRIPTION)
81
  with gr.Row():
82
  with gr.Column():
83
+ curr_image = gr.Image(
84
+ label="input", type="pil", sources=["upload", "webcam", "clipboard"]
 
85
  )
86
  alpha_slider = gr.Slider(0.0, 1.0, value=0.7, step=0.1, label="alpha")
87
  with gr.Row():
 
89
  run_button = gr.Button(value="Submit", variant="primary")
90
  with gr.Column():
91
  output_label = gr.Label(label="output", num_top_classes=5)
92
+
93
+ _orig_image = gr.Image(label="original image", type="pil", visible=False, interactive=False)
94
+ _example_image = gr.Image(label="example image", type="pil", visible=False, interactive=False)
95
+
96
  examples = gr.Examples(
97
  examples=glob(os.path.join(os.path.dirname(__file__), "examples", "*.jpg")),
98
+ inputs=[_example_image],
99
  outputs=[output_label],
100
  fn=image_inference,
101
  cache_examples=True,
 
104
 
105
  # INTERACTIONS
106
  # - change
107
+ _example_image.change(image_preprocess, [_example_image], [curr_image, _orig_image])
108
 
109
  # - upload
110
+ curr_image.upload(image_preprocess, [curr_image], [curr_image, _orig_image])
111
  curr_image.upload(lambda: None, [], [output_label])
112
 
113
  # - clear