Update app.py
Browse files
app.py
CHANGED
|
@@ -245,7 +245,8 @@ with block:
|
|
| 245 |
gr.Markdown("## Canny Edge")
|
| 246 |
with gr.Row():
|
| 247 |
with gr.Column():
|
| 248 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 249 |
low_threshold = gr.Slider(label="low_threshold", minimum=1, maximum=255, value=100, step=1)
|
| 250 |
high_threshold = gr.Slider(label="high_threshold", minimum=1, maximum=255, value=200, step=1)
|
| 251 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
|
@@ -260,7 +261,8 @@ with block:
|
|
| 260 |
gr.Markdown("## HED Edge "SoftEdge"")
|
| 261 |
with gr.Row():
|
| 262 |
with gr.Column():
|
| 263 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 264 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 265 |
run_button = gr.Button(label="Run")
|
| 266 |
with gr.Column():
|
|
@@ -273,7 +275,8 @@ with block:
|
|
| 273 |
gr.Markdown("## Pidi Edge "SoftEdge"")
|
| 274 |
with gr.Row():
|
| 275 |
with gr.Column():
|
| 276 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 277 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 278 |
run_button = gr.Button(label="Run")
|
| 279 |
with gr.Column():
|
|
@@ -286,7 +289,8 @@ with block:
|
|
| 286 |
gr.Markdown("## MLSD Edge")
|
| 287 |
with gr.Row():
|
| 288 |
with gr.Column():
|
| 289 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 290 |
value_threshold = gr.Slider(label="value_threshold", minimum=0.01, maximum=2.0, value=0.1, step=0.01)
|
| 291 |
distance_threshold = gr.Slider(label="distance_threshold", minimum=0.01, maximum=20.0, value=0.1, step=0.01)
|
| 292 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
|
|
@@ -301,7 +305,8 @@ with block:
|
|
| 301 |
gr.Markdown("## MIDAS Depth")
|
| 302 |
with gr.Row():
|
| 303 |
with gr.Column():
|
| 304 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 305 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
|
| 306 |
run_button = gr.Button(label="Run")
|
| 307 |
with gr.Column():
|
|
@@ -315,7 +320,8 @@ with block:
|
|
| 315 |
gr.Markdown("## Zoe Depth")
|
| 316 |
with gr.Row():
|
| 317 |
with gr.Column():
|
| 318 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 319 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 320 |
run_button = gr.Button(label="Run")
|
| 321 |
with gr.Column():
|
|
@@ -328,7 +334,8 @@ with block:
|
|
| 328 |
gr.Markdown("## Normal Bae")
|
| 329 |
with gr.Row():
|
| 330 |
with gr.Column():
|
| 331 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 332 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 333 |
run_button = gr.Button(label="Run")
|
| 334 |
with gr.Column():
|
|
@@ -341,7 +348,8 @@ with block:
|
|
| 341 |
gr.Markdown("## DWPose")
|
| 342 |
with gr.Row():
|
| 343 |
with gr.Column():
|
| 344 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 345 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 346 |
run_button = gr.Button(label="Run")
|
| 347 |
with gr.Column():
|
|
@@ -354,7 +362,8 @@ with block:
|
|
| 354 |
gr.Markdown("## Openpose")
|
| 355 |
with gr.Row():
|
| 356 |
with gr.Column():
|
| 357 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 358 |
hand_and_face = gr.Checkbox(label='Hand and Face', value=False)
|
| 359 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 360 |
run_button = gr.Button(label="Run")
|
|
@@ -368,7 +377,8 @@ with block:
|
|
| 368 |
gr.Markdown("## Lineart Anime \n<p>Check Invert to use with Mochi Diffusion.")
|
| 369 |
with gr.Row():
|
| 370 |
with gr.Column():
|
| 371 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 372 |
invert = gr.Checkbox(label='Invert', value=True)
|
| 373 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 374 |
run_button = gr.Button(label="Run")
|
|
@@ -382,7 +392,8 @@ with block:
|
|
| 382 |
gr.Markdown("## Lineart \n<p>Check Invert to use with Mochi Diffusion. Inverted image can also be created here for use with ControlNet Scribble.")
|
| 383 |
with gr.Row():
|
| 384 |
with gr.Column():
|
| 385 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 386 |
coarse = gr.Checkbox(label='Using coarse model', value=False)
|
| 387 |
invert = gr.Checkbox(label='Invert', value=True)
|
| 388 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
|
@@ -422,7 +433,8 @@ with block:
|
|
| 422 |
gr.Markdown("## Oneformer COCO Segmentation")
|
| 423 |
with gr.Row():
|
| 424 |
with gr.Column():
|
| 425 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 426 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 427 |
run_button = gr.Button(label="Run")
|
| 428 |
with gr.Column():
|
|
@@ -435,7 +447,8 @@ with block:
|
|
| 435 |
gr.Markdown("## Oneformer ADE20K Segmentation")
|
| 436 |
with gr.Row():
|
| 437 |
with gr.Column():
|
| 438 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 439 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=640, step=64)
|
| 440 |
run_button = gr.Button(label="Run")
|
| 441 |
with gr.Column():
|
|
@@ -448,7 +461,8 @@ with block:
|
|
| 448 |
gr.Markdown("## Content Shuffle")
|
| 449 |
with gr.Row():
|
| 450 |
with gr.Column():
|
| 451 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 452 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 453 |
run_button = gr.Button(label="Run")
|
| 454 |
with gr.Column():
|
|
@@ -461,7 +475,8 @@ with block:
|
|
| 461 |
gr.Markdown("## Color Shuffle")
|
| 462 |
with gr.Row():
|
| 463 |
with gr.Column():
|
| 464 |
-
input_image = gr.Image(source='upload', type="numpy")
|
|
|
|
| 465 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 466 |
run_button = gr.Button(label="Run")
|
| 467 |
with gr.Column():
|
|
|
|
| 245 |
gr.Markdown("## Canny Edge")
|
| 246 |
with gr.Row():
|
| 247 |
with gr.Column():
|
| 248 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 249 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 250 |
low_threshold = gr.Slider(label="low_threshold", minimum=1, maximum=255, value=100, step=1)
|
| 251 |
high_threshold = gr.Slider(label="high_threshold", minimum=1, maximum=255, value=200, step=1)
|
| 252 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
|
|
|
| 261 |
gr.Markdown("## HED Edge "SoftEdge"")
|
| 262 |
with gr.Row():
|
| 263 |
with gr.Column():
|
| 264 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 265 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 266 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 267 |
run_button = gr.Button(label="Run")
|
| 268 |
with gr.Column():
|
|
|
|
| 275 |
gr.Markdown("## Pidi Edge "SoftEdge"")
|
| 276 |
with gr.Row():
|
| 277 |
with gr.Column():
|
| 278 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 279 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 280 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 281 |
run_button = gr.Button(label="Run")
|
| 282 |
with gr.Column():
|
|
|
|
| 289 |
gr.Markdown("## MLSD Edge")
|
| 290 |
with gr.Row():
|
| 291 |
with gr.Column():
|
| 292 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 293 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 294 |
value_threshold = gr.Slider(label="value_threshold", minimum=0.01, maximum=2.0, value=0.1, step=0.01)
|
| 295 |
distance_threshold = gr.Slider(label="distance_threshold", minimum=0.01, maximum=20.0, value=0.1, step=0.01)
|
| 296 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
|
|
|
|
| 305 |
gr.Markdown("## MIDAS Depth")
|
| 306 |
with gr.Row():
|
| 307 |
with gr.Column():
|
| 308 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 309 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 310 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
|
| 311 |
run_button = gr.Button(label="Run")
|
| 312 |
with gr.Column():
|
|
|
|
| 320 |
gr.Markdown("## Zoe Depth")
|
| 321 |
with gr.Row():
|
| 322 |
with gr.Column():
|
| 323 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 324 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 325 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 326 |
run_button = gr.Button(label="Run")
|
| 327 |
with gr.Column():
|
|
|
|
| 334 |
gr.Markdown("## Normal Bae")
|
| 335 |
with gr.Row():
|
| 336 |
with gr.Column():
|
| 337 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 338 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 339 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 340 |
run_button = gr.Button(label="Run")
|
| 341 |
with gr.Column():
|
|
|
|
| 348 |
gr.Markdown("## DWPose")
|
| 349 |
with gr.Row():
|
| 350 |
with gr.Column():
|
| 351 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 352 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 353 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 354 |
run_button = gr.Button(label="Run")
|
| 355 |
with gr.Column():
|
|
|
|
| 362 |
gr.Markdown("## Openpose")
|
| 363 |
with gr.Row():
|
| 364 |
with gr.Column():
|
| 365 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 366 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 367 |
hand_and_face = gr.Checkbox(label='Hand and Face', value=False)
|
| 368 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 369 |
run_button = gr.Button(label="Run")
|
|
|
|
| 377 |
gr.Markdown("## Lineart Anime \n<p>Check Invert to use with Mochi Diffusion.")
|
| 378 |
with gr.Row():
|
| 379 |
with gr.Column():
|
| 380 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 381 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 382 |
invert = gr.Checkbox(label='Invert', value=True)
|
| 383 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 384 |
run_button = gr.Button(label="Run")
|
|
|
|
| 392 |
gr.Markdown("## Lineart \n<p>Check Invert to use with Mochi Diffusion. Inverted image can also be created here for use with ControlNet Scribble.")
|
| 393 |
with gr.Row():
|
| 394 |
with gr.Column():
|
| 395 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 396 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 397 |
coarse = gr.Checkbox(label='Using coarse model', value=False)
|
| 398 |
invert = gr.Checkbox(label='Invert', value=True)
|
| 399 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
|
|
|
| 433 |
gr.Markdown("## Oneformer COCO Segmentation")
|
| 434 |
with gr.Row():
|
| 435 |
with gr.Column():
|
| 436 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 437 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 438 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 439 |
run_button = gr.Button(label="Run")
|
| 440 |
with gr.Column():
|
|
|
|
| 447 |
gr.Markdown("## Oneformer ADE20K Segmentation")
|
| 448 |
with gr.Row():
|
| 449 |
with gr.Column():
|
| 450 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 451 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 452 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=640, step=64)
|
| 453 |
run_button = gr.Button(label="Run")
|
| 454 |
with gr.Column():
|
|
|
|
| 461 |
gr.Markdown("## Content Shuffle")
|
| 462 |
with gr.Row():
|
| 463 |
with gr.Column():
|
| 464 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 465 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 466 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 467 |
run_button = gr.Button(label="Run")
|
| 468 |
with gr.Column():
|
|
|
|
| 475 |
gr.Markdown("## Color Shuffle")
|
| 476 |
with gr.Row():
|
| 477 |
with gr.Column():
|
| 478 |
+
# input_image = gr.Image(source='upload', type="numpy")
|
| 479 |
+
input_image = gr.Image(label="Input Image", type="numpy", height=512)
|
| 480 |
resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
|
| 481 |
run_button = gr.Button(label="Run")
|
| 482 |
with gr.Column():
|