ginipick commited on
Commit
7776a83
ยท
verified ยท
1 Parent(s): 76fcb7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -113
app.py CHANGED
@@ -6,7 +6,7 @@ import gradio as gr
6
  import spaces
7
 
8
  from glob import glob
9
- from typing import Tuple
10
 
11
  from PIL import Image
12
  from gradio_imageslider import ImageSlider
@@ -16,48 +16,40 @@ from torchvision import transforms
16
  import requests
17
  from io import BytesIO
18
  import zipfile
19
-
20
 
21
  torch.set_float32_matmul_precision('high')
22
  torch.jit.script = lambda f: f
23
 
24
  device = "cuda" if torch.cuda.is_available() else "cpu"
25
 
26
- ### image_proc.py
27
  def refine_foreground(image, mask, r=90):
28
  if mask.size != image.size:
29
  mask = mask.resize(image.size)
30
- image = np.array(image) / 255.0
31
- mask = np.array(mask) / 255.0
32
- estimated_foreground = FB_blur_fusion_foreground_estimator_2(image, mask, r=r)
33
  image_masked = Image.fromarray((estimated_foreground * 255.0).astype(np.uint8))
34
  return image_masked
35
 
36
-
37
  def FB_blur_fusion_foreground_estimator_2(image, alpha, r=90):
38
- # Thanks to the source: https://github.com/Photoroom/fast-foreground-estimation
39
  alpha = alpha[:, :, None]
40
- F, blur_B = FB_blur_fusion_foreground_estimator(
41
- image, image, image, alpha, r)
42
  return FB_blur_fusion_foreground_estimator(image, F, blur_B, alpha, r=6)[0]
43
 
44
-
45
  def FB_blur_fusion_foreground_estimator(image, F, B, alpha, r=90):
46
  if isinstance(image, Image.Image):
47
  image = np.array(image) / 255.0
48
  blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None]
49
-
50
  blurred_FA = cv2.blur(F * alpha, (r, r))
51
  blurred_F = blurred_FA / (blurred_alpha + 1e-5)
52
-
53
  blurred_B1A = cv2.blur(B * (1 - alpha), (r, r))
54
  blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5)
55
- F = blurred_F + alpha * \
56
- (image - alpha * blurred_F - (1 - alpha) * blurred_B)
57
  F = np.clip(F, 0, 1)
58
  return F, blurred_B
59
 
60
-
61
  class ImagePreprocessor():
62
  def __init__(self, resolution: Tuple[int, int] = (1024, 1024)) -> None:
63
  self.transform_image = transforms.Compose([
@@ -65,12 +57,10 @@ class ImagePreprocessor():
65
  transforms.ToTensor(),
66
  transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
67
  ])
68
-
69
  def proc(self, image: Image.Image) -> torch.Tensor:
70
  image = self.transform_image(image)
71
  return image
72
 
73
-
74
  usage_to_weights_file = {
75
  'General': 'BiRefNet',
76
  'General-HR': 'BiRefNet_HR',
@@ -85,17 +75,19 @@ usage_to_weights_file = {
85
  'General-legacy': 'BiRefNet-legacy'
86
  }
87
 
88
- birefnet = AutoModelForImageSegmentation.from_pretrained('/'.join(('zhengpeng7', usage_to_weights_file['General'])), trust_remote_code=True)
 
 
 
 
89
  birefnet.to(device)
90
  birefnet.eval(); birefnet.half()
91
 
92
-
93
  @spaces.GPU
94
  def predict(images, resolution, weights_file):
95
- assert (images is not None), 'AssertionError: images cannot be None.'
96
-
97
  global birefnet
98
- # Load BiRefNet with chosen weights
99
  _weights_file = '/'.join(('zhengpeng7', usage_to_weights_file[weights_file] if weights_file is not None else usage_to_weights_file['General']))
100
  print('Using weights: {}.'.format(_weights_file))
101
  birefnet = AutoModelForImageSegmentation.from_pretrained(_weights_file, trust_remote_code=True)
@@ -103,28 +95,30 @@ def predict(images, resolution, weights_file):
103
  birefnet.eval(); birefnet.half()
104
 
105
  try:
106
- resolution = [int(int(reso)//32*32) for reso in resolution.strip().split('x')]
107
  except:
108
  if weights_file == 'General-HR':
109
- resolution = (2048, 2048)
110
  elif weights_file == 'General-Lite-2K':
111
- resolution = (2560, 1440)
112
  else:
113
- resolution = (1024, 1024)
114
- print('Invalid resolution input. Automatically changed to 1024x1024 / 2048x2048 / 2560x1440.')
115
 
 
116
  if isinstance(images, list):
117
- # For tab_batch
118
- save_paths = []
119
- save_dir = 'preds-BiRefNet'
120
- if not os.path.exists(save_dir):
121
- os.makedirs(save_dir)
122
  tab_is_batch = True
123
  else:
124
  images = [images]
125
  tab_is_batch = False
126
 
127
- for idx_image, image_src in enumerate(images):
 
 
 
 
 
 
128
  if isinstance(image_src, str):
129
  if os.path.isfile(image_src):
130
  image_ori = Image.open(image_src)
@@ -133,31 +127,30 @@ def predict(images, resolution, weights_file):
133
  image_data = BytesIO(response.content)
134
  image_ori = Image.open(image_data)
135
  else:
136
- image_ori = Image.fromarray(image_src)
137
-
 
 
138
  image = image_ori.convert('RGB')
139
- # Preprocess the image
140
- image_preprocessor = ImagePreprocessor(resolution=tuple(resolution))
141
- image_proc = image_preprocessor.proc(image)
142
- image_proc = image_proc.unsqueeze(0)
143
-
144
- # Prediction
145
  with torch.no_grad():
146
  preds = birefnet(image_proc.to(device).half())[-1].sigmoid().cpu()
147
  pred = preds[0].squeeze()
148
-
149
- # Show Results
150
  pred_pil = transforms.ToPILImage()(pred)
151
  image_masked = refine_foreground(image, pred_pil)
152
  image_masked.putalpha(pred_pil.resize(image.size))
153
-
154
  torch.cuda.empty_cache()
155
-
156
  if tab_is_batch:
157
- save_file_path = os.path.join(save_dir, "{}.png".format(os.path.splitext(os.path.basename(image_src))[0]))
158
- image_masked.save(save_file_path)
159
- save_paths.append(save_file_path)
160
-
 
 
 
 
 
161
  if tab_is_batch:
162
  zip_file_path = os.path.join(save_dir, "{}.zip".format(save_dir))
163
  with zipfile.ZipFile(zip_file_path, 'w') as zipf:
@@ -165,70 +158,137 @@ def predict(images, resolution, weights_file):
165
  zipf.write(file, os.path.basename(file))
166
  return save_paths, zip_file_path
167
  else:
168
- return (image_masked, image_ori)
169
-
170
-
171
- examples = [[_] for _ in glob('examples/*')][:]
172
- # Add the option of resolution in a text box.
173
- for idx_example, example in enumerate(examples):
174
- examples[idx_example].append('1024x1024')
175
- examples.append(examples[-1].copy())
176
- examples[-1][1] = '512x512'
177
-
178
- examples_url = [
179
- ['https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg'],
180
- ]
181
- for idx_example_url, example_url in enumerate(examples_url):
182
- examples_url[idx_example_url].append('1024x1024')
183
-
184
- descriptions = ('Upload a picture, our model will extract a highly accurate segmentation of the subject in it.\n)'
185
- ' The resolution used in our training was `1024x1024`, which is the suggested resolution to obtain good results! `2048x2048` is suggested for BiRefNet_HR.\n'
186
- ' Our codes can be found at https://github.com/ZhengPeng7/BiRefNet.\n'
187
- ' We also maintain the HF model of BiRefNet at https://huggingface.co/ZhengPeng7/BiRefNet for easier access.')
188
-
189
- tab_image = gr.Interface(
190
- fn=predict,
191
- inputs=[
192
- gr.Image(label='Upload an image'),
193
- gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"),
194
- gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.")
195
- ],
196
- outputs=ImageSlider(label="BiRefNet's prediction", type="pil"),
197
- examples=examples,
198
- api_name="image",
199
- description=descriptions,
200
- )
201
-
202
- tab_text = gr.Interface(
203
- fn=predict,
204
- inputs=[
205
- gr.Textbox(label="Paste an image URL"),
206
- gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"),
207
- gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.")
208
- ],
209
- outputs=ImageSlider(label="BiRefNet's prediction", type="pil"),
210
- examples=examples_url,
211
- api_name="text",
212
- description=descriptions+'\nTab-URL is partially modified from https://huggingface.co/spaces/not-lain/background-removal, thanks to this great work!',
213
  )
214
 
215
- tab_batch = gr.Interface(
216
- fn=predict,
217
- inputs=[
218
- gr.File(label="Upload multiple images", type="filepath", file_count="multiple"),
219
- gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"),
220
- gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.")
221
- ],
222
- outputs=[gr.Gallery(label="BiRefNet's predictions"), gr.File(label="Download masked images.")],
223
- api_name="batch",
224
- description=descriptions+'\nTab-batch is partially modified from https://huggingface.co/spaces/NegiTurkey/Multi_Birefnetfor_Background_Removal, thanks to this great work!',
225
- )
226
-
227
- demo = gr.TabbedInterface(
228
- [tab_image, tab_text, tab_batch],
229
- ['image', 'text', 'batch'],
230
- title="BiRefNet demo for subject extraction (general / matting / salient / camouflaged / portrait).",
231
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
233
  if __name__ == "__main__":
234
- demo.launch(debug=True)
 
6
  import spaces
7
 
8
  from glob import glob
9
+ from typing import Tuple, Optional
10
 
11
  from PIL import Image
12
  from gradio_imageslider import ImageSlider
 
16
  import requests
17
  from io import BytesIO
18
  import zipfile
19
+ import random
20
 
21
  torch.set_float32_matmul_precision('high')
22
  torch.jit.script = lambda f: f
23
 
24
  device = "cuda" if torch.cuda.is_available() else "cpu"
25
 
26
+ ### ์ด๋ฏธ์ง€ ํ›„์ฒ˜๋ฆฌ ํ•จ์ˆ˜๋“ค ###
27
  def refine_foreground(image, mask, r=90):
28
  if mask.size != image.size:
29
  mask = mask.resize(image.size)
30
+ image_np = np.array(image) / 255.0
31
+ mask_np = np.array(mask) / 255.0
32
+ estimated_foreground = FB_blur_fusion_foreground_estimator_2(image_np, mask_np, r=r)
33
  image_masked = Image.fromarray((estimated_foreground * 255.0).astype(np.uint8))
34
  return image_masked
35
 
 
36
  def FB_blur_fusion_foreground_estimator_2(image, alpha, r=90):
 
37
  alpha = alpha[:, :, None]
38
+ F, blur_B = FB_blur_fusion_foreground_estimator(image, image, image, alpha, r)
 
39
  return FB_blur_fusion_foreground_estimator(image, F, blur_B, alpha, r=6)[0]
40
 
 
41
  def FB_blur_fusion_foreground_estimator(image, F, B, alpha, r=90):
42
  if isinstance(image, Image.Image):
43
  image = np.array(image) / 255.0
44
  blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None]
 
45
  blurred_FA = cv2.blur(F * alpha, (r, r))
46
  blurred_F = blurred_FA / (blurred_alpha + 1e-5)
 
47
  blurred_B1A = cv2.blur(B * (1 - alpha), (r, r))
48
  blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5)
49
+ F = blurred_F + alpha * (image - alpha * blurred_F - (1 - alpha) * blurred_B)
 
50
  F = np.clip(F, 0, 1)
51
  return F, blurred_B
52
 
 
53
  class ImagePreprocessor():
54
  def __init__(self, resolution: Tuple[int, int] = (1024, 1024)) -> None:
55
  self.transform_image = transforms.Compose([
 
57
  transforms.ToTensor(),
58
  transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
59
  ])
 
60
  def proc(self, image: Image.Image) -> torch.Tensor:
61
  image = self.transform_image(image)
62
  return image
63
 
 
64
  usage_to_weights_file = {
65
  'General': 'BiRefNet',
66
  'General-HR': 'BiRefNet_HR',
 
75
  'General-legacy': 'BiRefNet-legacy'
76
  }
77
 
78
+ # ์ดˆ๊ธฐ ๋ชจ๋ธ ๋กœ๋”ฉ (๊ธฐ๋ณธ: General)
79
+ birefnet = AutoModelForImageSegmentation.from_pretrained(
80
+ '/'.join(('zhengpeng7', usage_to_weights_file['General'])),
81
+ trust_remote_code=True
82
+ )
83
  birefnet.to(device)
84
  birefnet.eval(); birefnet.half()
85
 
 
86
  @spaces.GPU
87
  def predict(images, resolution, weights_file):
88
+ assert images is not None, 'Images cannot be None.'
 
89
  global birefnet
90
+ # ์„ ํƒ๋œ ๊ฐ€์ค‘์น˜๋กœ ๋ชจ๋ธ ์žฌ๋กœ๋”ฉ
91
  _weights_file = '/'.join(('zhengpeng7', usage_to_weights_file[weights_file] if weights_file is not None else usage_to_weights_file['General']))
92
  print('Using weights: {}.'.format(_weights_file))
93
  birefnet = AutoModelForImageSegmentation.from_pretrained(_weights_file, trust_remote_code=True)
 
95
  birefnet.eval(); birefnet.half()
96
 
97
  try:
98
+ resolution_list = [int(int(reso)//32*32) for reso in resolution.strip().split('x')]
99
  except:
100
  if weights_file == 'General-HR':
101
+ resolution_list = [2048, 2048]
102
  elif weights_file == 'General-Lite-2K':
103
+ resolution_list = [2560, 1440]
104
  else:
105
+ resolution_list = [1024, 1024]
106
+ print('Invalid resolution input. Automatically changed to default.')
107
 
108
+ # ์ด๋ฏธ์ง€๊ฐ€ ๋‹จ์ผ ๊ฐ์ฒด์ธ์ง€, ๋ฆฌ์ŠคํŠธ(๋ฐฐ์น˜)์ธ์ง€ ํ™•์ธ
109
  if isinstance(images, list):
 
 
 
 
 
110
  tab_is_batch = True
111
  else:
112
  images = [images]
113
  tab_is_batch = False
114
 
115
+ save_paths = []
116
+ save_dir = 'preds-BiRefNet'
117
+ if tab_is_batch and not os.path.exists(save_dir):
118
+ os.makedirs(save_dir)
119
+
120
+ outputs = []
121
+ for idx, image_src in enumerate(images):
122
  if isinstance(image_src, str):
123
  if os.path.isfile(image_src):
124
  image_ori = Image.open(image_src)
 
127
  image_data = BytesIO(response.content)
128
  image_ori = Image.open(image_data)
129
  else:
130
+ if isinstance(image_src, np.ndarray):
131
+ image_ori = Image.fromarray(image_src)
132
+ else:
133
+ image_ori = image_src.convert('RGB')
134
  image = image_ori.convert('RGB')
135
+ preprocessor = ImagePreprocessor(resolution=tuple(resolution_list))
136
+ image_proc = preprocessor.proc(image).unsqueeze(0)
 
 
 
 
137
  with torch.no_grad():
138
  preds = birefnet(image_proc.to(device).half())[-1].sigmoid().cpu()
139
  pred = preds[0].squeeze()
 
 
140
  pred_pil = transforms.ToPILImage()(pred)
141
  image_masked = refine_foreground(image, pred_pil)
142
  image_masked.putalpha(pred_pil.resize(image.size))
 
143
  torch.cuda.empty_cache()
 
144
  if tab_is_batch:
145
+ file_path = os.path.join(save_dir, "{}.png".format(
146
+ os.path.splitext(os.path.basename(image_src))[0] if isinstance(image_src, str) else f"img_{idx}"
147
+ ))
148
+ image_masked.save(file_path)
149
+ save_paths.append(file_path)
150
+ outputs.append(image_masked)
151
+ else:
152
+ outputs = [image_masked, image_ori]
153
+
154
  if tab_is_batch:
155
  zip_file_path = os.path.join(save_dir, "{}.zip".format(save_dir))
156
  with zipfile.ZipFile(zip_file_path, 'w') as zipf:
 
158
  zipf.write(file, os.path.basename(file))
159
  return save_paths, zip_file_path
160
  else:
161
+ # ๋ฐ˜ํ™˜๊ฐ’์„ ๋ฆฌ์ŠคํŠธ ํ˜•ํƒœ๋กœ ๋งŒ๋“ค์–ด ImageSlider์—์„œ ํ‘œ์‹œ๋˜๋„๋ก ํ•จ.
162
+ return outputs
163
+
164
+ # ์˜ˆ์ œ ๋ฐ์ดํ„ฐ (์ด๋ฏธ์ง€, URL, ๋ฐฐ์น˜)
165
+ examples_image = [[path, "1024x1024", "General"] for path in glob('examples/*')]
166
+ examples_text = [[url, "1024x1024", "General"] for url in ["https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg"]]
167
+ examples_batch = [[file, "1024x1024", "General"] for file in glob('examples/*')]
168
+
169
+ descriptions = (
170
+ "Upload a picture, our model will extract a highly accurate segmentation of the subject in it.\n"
171
+ "The resolution used in our training was `1024x1024`, which is suggested for good results! "
172
+ "`2048x2048` is suggested for BiRefNet_HR.\n"
173
+ "Our codes can be found at https://github.com/ZhengPeng7/BiRefNet.\n"
174
+ "We also maintain the HF model of BiRefNet at https://huggingface.co/ZhengPeng7/BiRefNet for easier access."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  )
176
 
177
+ # ๊ฐœ์„ ๋œ UI ์Šคํƒ€์ผ (๋ฐฐ๊ฒฝ, ์ปจํ…Œ์ด๋„ˆ, ์ขŒ์ธก ์‚ฌ์ด๋“œ๋ฐ”, ๋ฒ„ํŠผ ์• ๋‹ˆ๋ฉ”์ด์…˜ ๋“ฑ)
178
+ css = """
179
+ body {
180
+ background: linear-gradient(135deg, #667eea, #764ba2);
181
+ font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
182
+ color: #333;
183
+ margin: 0;
184
+ padding: 0;
185
+ }
186
+ .gradio-container {
187
+ background: rgba(255, 255, 255, 0.95);
188
+ border-radius: 15px;
189
+ padding: 30px 40px;
190
+ box-shadow: 0 8px 30px rgba(0, 0, 0, 0.3);
191
+ margin: 40px auto;
192
+ max-width: 1200px;
193
+ }
194
+ .gradio-container h1 {
195
+ color: #333;
196
+ text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.2);
197
+ }
198
+ .fillable {
199
+ width: 95% !important;
200
+ max-width: unset !important;
201
+ }
202
+ #examples_container {
203
+ margin: auto;
204
+ width: 90%;
205
+ }
206
+ #examples_row {
207
+ justify-content: center;
208
+ }
209
+ .sidebar {
210
+ background: rgba(255, 255, 255, 0.98);
211
+ border-radius: 10px;
212
+ padding: 20px;
213
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2);
214
+ }
215
+ button, .btn {
216
+ background: linear-gradient(90deg, #ff8a00, #e52e71);
217
+ border: none;
218
+ color: #fff;
219
+ padding: 12px 24px;
220
+ text-transform: uppercase;
221
+ font-weight: bold;
222
+ letter-spacing: 1px;
223
+ border-radius: 5px;
224
+ cursor: pointer;
225
+ transition: transform 0.2s ease-in-out;
226
+ }
227
+ button:hover, .btn:hover {
228
+ transform: scale(1.05);
229
+ }
230
+ """
231
+
232
+ title = """
233
+ <h1 align="center" style="margin-bottom: 0.2em;">BiRefNet Demo for Subject Extraction</h1>
234
+ <p align="center" style="font-size:1.1em; color:#555;">
235
+ Upload an image or provide an image URL to extract the subject with high-precision segmentation.
236
+ </p>
237
+ """
238
+
239
+ with gr.Blocks(css=css, title="BiRefNet Demo") as demo:
240
+ gr.Markdown(title)
241
+ with gr.Tabs():
242
+ with gr.Tab("Image"):
243
+ with gr.Row():
244
+ with gr.Column(scale=1):
245
+ image_input = gr.Image(type='pil', label='Upload an Image')
246
+ resolution_input = gr.Textbox(lines=1, placeholder="e.g., 1024x1024", label="Resolution")
247
+ weights_radio = gr.Radio(list(usage_to_weights_file.keys()), value="General", label="Weights")
248
+ predict_btn = gr.Button("Predict")
249
+ with gr.Column(scale=2):
250
+ output_slider = ImageSlider(label="BiRefNet's Prediction", type="pil")
251
+ gr.Examples(examples=examples_image, inputs=[image_input, resolution_input, weights_radio], label="Examples")
252
+ with gr.Tab("Text"):
253
+ with gr.Row():
254
+ with gr.Column(scale=1):
255
+ image_url = gr.Textbox(label="Paste an Image URL")
256
+ resolution_input_text = gr.Textbox(lines=1, placeholder="e.g., 1024x1024", label="Resolution")
257
+ weights_radio_text = gr.Radio(list(usage_to_weights_file.keys()), value="General", label="Weights")
258
+ predict_btn_text = gr.Button("Predict")
259
+ with gr.Column(scale=2):
260
+ output_slider_text = ImageSlider(label="BiRefNet's Prediction", type="pil")
261
+ gr.Examples(examples=examples_text, inputs=[image_url, resolution_input_text, weights_radio_text], label="Examples")
262
+ with gr.Tab("Batch"):
263
+ with gr.Row():
264
+ with gr.Column(scale=1):
265
+ file_input = gr.File(label="Upload Multiple Images", type="filepath", file_count="multiple")
266
+ resolution_input_batch = gr.Textbox(lines=1, placeholder="e.g., 1024x1024", label="Resolution")
267
+ weights_radio_batch = gr.Radio(list(usage_to_weights_file.keys()), value="General", label="Weights")
268
+ predict_btn_batch = gr.Button("Predict")
269
+ with gr.Column(scale=2):
270
+ output_gallery = gr.Gallery(label="BiRefNet's Predictions").style(grid=[3], height="auto")
271
+ zip_output = gr.File(label="Download Masked Images")
272
+ gr.Examples(examples=examples_batch, inputs=[file_input, resolution_input_batch, weights_radio_batch], label="Examples")
273
+ with gr.Row():
274
+ gr.Markdown("<p align='center'>Model by <a href='https://huggingface.co/ZhengPeng7/BiRefNet'>ZhengPeng7/BiRefNet</a></p>")
275
+
276
+ # ๊ฐ ํƒญ์˜ Predict ๋ฒ„ํŠผ๊ณผ predict ํ•จ์ˆ˜ ์—ฐ๊ฒฐ
277
+ predict_btn.click(
278
+ fn=predict,
279
+ inputs=[image_input, resolution_input, weights_radio],
280
+ outputs=output_slider
281
+ )
282
+ predict_btn_text.click(
283
+ fn=predict,
284
+ inputs=[image_url, resolution_input_text, weights_radio_text],
285
+ outputs=output_slider_text
286
+ )
287
+ predict_btn_batch.click(
288
+ fn=predict,
289
+ inputs=[file_input, resolution_input_batch, weights_radio_batch],
290
+ outputs=[output_gallery, zip_output]
291
+ )
292
 
293
  if __name__ == "__main__":
294
+ demo.launch(share=False, debug=True)