nvn04 commited on
Commit
49514fe
·
verified ·
1 Parent(s): 7e2ef3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +138 -215
app.py CHANGED
@@ -17,12 +17,21 @@ from model.pipeline import CatVTONPipeline, CatVTONPix2PixPipeline
17
  from model.flux.pipeline_flux_tryon import FluxTryOnPipeline
18
  from utils import init_weight_dtype, resize_and_crop, resize_and_padding
19
 
 
20
  def parse_args():
21
  parser = argparse.ArgumentParser(description="Simple example of a training script.")
22
  parser.add_argument(
23
  "--base_model_path",
24
  type=str,
25
- default="runwayml/stable-diffusion-inpainting",
 
 
 
 
 
 
 
 
26
  help=(
27
  "The path to the base model to use for evaluation. This can be a local path or a model identifier from the Model Hub."
28
  ),
@@ -105,7 +114,10 @@ def image_grid(imgs, rows, cols):
105
 
106
 
107
  args = parse_args()
108
- repo_path = snapshot_download(repo_id=args.resume_path)
 
 
 
109
  # Pipeline
110
  pipeline = CatVTONPipeline(
111
  base_ckpt=args.base_model_path,
@@ -123,98 +135,6 @@ automasker = AutoMasker(
123
  device='cuda',
124
  )
125
 
126
- def submit_function(
127
- person_image,
128
- cloth_image,
129
- cloth_type,
130
- num_inference_steps,
131
- guidance_scale,
132
- seed,
133
- show_type
134
- ):
135
- person_image, mask = person_image["background"], person_image["layers"][0]
136
- #mask = Image.open(mask).convert("L")
137
-
138
- if len(person_image["layers"]) > 0:
139
- mask = Image.open(person_image["layers"][0]).convert("L")
140
- else:
141
- raise ValueError("No layers found in person image.")
142
-
143
-
144
-
145
- if len(np.unique(np.array(mask))) == 1:
146
- mask = None
147
- else:
148
- mask = np.array(mask)
149
- mask[mask > 0] = 255
150
- mask = Image.fromarray(mask)
151
-
152
- tmp_folder = args.output_dir
153
- date_str = datetime.now().strftime("%Y%m%d%H%M%S")
154
- result_save_path = os.path.join(tmp_folder, date_str[:8], date_str[8:] + ".png")
155
- if not os.path.exists(os.path.join(tmp_folder, date_str[:8])):
156
- os.makedirs(os.path.join(tmp_folder, date_str[:8]))
157
-
158
- generator = None
159
- if seed != -1:
160
- generator = torch.Generator(device='cuda').manual_seed(seed)
161
-
162
- #person_image = Image.open(person_image).convert("RGB")
163
- if person_image.get("background"):
164
- person_image = Image.open(person_image["background"]).convert("RGB")
165
- else:
166
- raise ValueError("Background image not found.")
167
-
168
-
169
- cloth_image = Image.open(cloth_image).convert("RGB")
170
- person_image = resize_and_crop(person_image, (args.width, args.height))
171
- cloth_image = resize_and_padding(cloth_image, (args.width, args.height))
172
-
173
- # Process mask
174
- if mask is not None:
175
- mask = resize_and_crop(mask, (args.width, args.height))
176
- else:
177
- mask = automasker(
178
- person_image,
179
- cloth_type
180
- )['mask']
181
- mask = mask_processor.blur(mask, blur_factor=9)
182
-
183
- # Inference
184
- # try:
185
- result_image = pipeline(
186
- image=person_image,
187
- condition_image=cloth_image,
188
- mask=mask,
189
- num_inference_steps=num_inference_steps,
190
- guidance_scale=guidance_scale,
191
- generator=generator
192
- )[0]
193
- except Exception as e:
194
- raise gr.Error(
195
- "An error occurred. Please try again later: {}".format(e)
196
- )
197
-
198
- # Post-process
199
- masked_person = vis_mask(person_image, mask)
200
- save_result_image = image_grid([person_image, masked_person, cloth_image, result_image], 1, 4)
201
- save_result_image.save(result_save_path)
202
- if show_type == "result only":
203
- return result_image
204
- else:
205
- width, height = person_image.size
206
- if show_type == "input & result":
207
- condition_width = width // 2
208
- conditions = image_grid([person_image, cloth_image], 2, 1)
209
- else:
210
- condition_width = width // 3
211
- conditions = image_grid([person_image, masked_person , cloth_image], 3, 1)
212
- conditions = conditions.resize((condition_width, height), Image.NEAREST)
213
- new_result_image = Image.new("RGB", (width + condition_width + 5, height))
214
- new_result_image.paste(conditions, (0, 0))
215
- new_result_image.paste(result_image, (condition_width + 5, 0))
216
- return new_result_image
217
-
218
 
219
  @spaces.GPU(duration=120)
220
  def submit_function(
@@ -419,143 +339,146 @@ def submit_function_flux(
419
  def person_example_fn(image_path):
420
  return image_path
421
 
 
422
  HEADER = ""
423
 
424
  def app_gradio():
425
  with gr.Blocks(title="CatVTON") as demo:
426
  gr.Markdown(HEADER)
427
- with gr.Row():
428
- with gr.Column(scale=1, min_width=350):
429
- with gr.Row():
430
- image_path = gr.Image(
431
- type="filepath",
432
- interactive=True,
433
- visible=False,
434
- )
435
- person_image = gr.ImageEditor(
436
- interactive=True, label="Person Image", type="filepath"
437
- )
438
-
439
- with gr.Row():
440
- with gr.Column(scale=1, min_width=230):
441
- cloth_image = gr.Image(
442
- interactive=True, label="Condition Image", type="filepath"
443
- )
444
- with gr.Column(scale=1, min_width=120):
445
- gr.Markdown(
446
- '<span style="color: #808080; font-size: small;">Two ways to provide Mask:<br>1. Upload the person image and use the `🖌️` above to draw the Mask (higher priority)<br>2. Select the `Try-On Cloth Type` to generate automatically </span>'
447
  )
448
- cloth_type = gr.Radio(
449
- label="Try-On Cloth Type",
450
- choices=["upper", "lower", "overall"],
451
- value="upper",
452
  )
453
 
454
-
455
- submit = gr.Button("Submit")
456
- gr.Markdown(
457
- '<center><span style="color: #FF0000">!!! Click only Once, Wait for Delay !!!</span></center>'
458
- )
459
-
460
- gr.Markdown(
461
- '<span style="color: #808080; font-size: small;">Advanced options can adjust details:<br>1. `Inference Step` may enhance details;<br>2. `CFG` is highly correlated with saturation;<br>3. `Random seed` may improve pseudo-shadow.</span>'
462
- )
463
- with gr.Accordion("Advanced Options", open=False):
464
- num_inference_steps = gr.Slider(
465
- label="Inference Step", minimum=10, maximum=100, step=5, value=50
466
- )
467
- # Guidence Scale
468
- guidance_scale = gr.Slider(
469
- label="CFG Strenth", minimum=0.0, maximum=7.5, step=0.5, value=2.5
 
 
 
470
  )
471
- # Random Seed
472
- seed = gr.Slider(
473
- label="Seed", minimum=-1, maximum=10000, step=1, value=42
474
  )
475
- show_type = gr.Radio(
476
- label="Show Type",
477
- choices=["result only", "input & result", "input & mask & result"],
478
- value="input & mask & result",
479
- )
480
-
481
- with gr.Column(scale=2, min_width=500):
482
- result_image = gr.Image(interactive=False, label="Result")
483
- with gr.Row():
484
- # Photo Examples
485
- root_path = "resource/demo/example"
486
- with gr.Column():
487
- men_exm = gr.Examples(
488
- examples=[
489
- os.path.join(root_path, "person", "men", _)
490
- for _ in os.listdir(os.path.join(root_path, "person", "men"))
491
- ],
492
- examples_per_page=4,
493
- inputs=image_path,
494
- label="Person Examples ①",
495
- )
496
- women_exm = gr.Examples(
497
- examples=[
498
- os.path.join(root_path, "person", "women", _)
499
- for _ in os.listdir(os.path.join(root_path, "person", "women"))
500
- ],
501
- examples_per_page=4,
502
- inputs=image_path,
503
- label="Person Examples ②",
504
  )
505
- gr.Markdown(
506
- '<span style="color: #808080; font-size: small;">*Person examples come from the demos of <a href="https://huggingface.co/spaces/levihsu/OOTDiffusion">OOTDiffusion</a> and <a href="https://www.outfitanyone.org">OutfitAnyone</a>. </span>'
 
507
  )
508
- with gr.Column():
509
- condition_upper_exm = gr.Examples(
510
- examples=[
511
- os.path.join(root_path, "condition", "upper", _)
512
- for _ in os.listdir(os.path.join(root_path, "condition", "upper"))
513
- ],
514
- examples_per_page=4,
515
- inputs=cloth_image,
516
- label="Condition Upper Examples",
517
  )
518
- condition_overall_exm = gr.Examples(
519
- examples=[
520
- os.path.join(root_path, "condition", "overall", _)
521
- for _ in os.listdir(os.path.join(root_path, "condition", "overall"))
522
- ],
523
- examples_per_page=4,
524
- inputs=cloth_image,
525
- label="Condition Overall Examples",
526
- )
527
- condition_person_exm = gr.Examples(
528
- examples=[
529
- os.path.join(root_path, "condition", "person", _)
530
- for _ in os.listdir(os.path.join(root_path, "condition", "person"))
531
- ],
532
- examples_per_page=4,
533
- inputs=cloth_image,
534
- label="Condition Reference Person Examples",
535
- )
536
- gr.Markdown(
537
- '<span style="color: #808080; font-size: small;">*Condition examples come from the Internet. </span>'
538
  )
539
 
540
- image_path.change(
541
- person_example_fn, inputs=image_path, outputs=person_image
542
- )
543
-
544
- submit.click(
545
- submit_function,
546
- [
547
- person_image,
548
- cloth_image,
549
- cloth_type,
550
- num_inference_steps,
551
- guidance_scale,
552
- seed,
553
- show_type,
554
- ],
555
- result_image,
556
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
557
  demo.queue().launch(share=True, show_error=True)
558
 
559
 
560
  if __name__ == "__main__":
561
- app_gradio()
 
17
  from model.flux.pipeline_flux_tryon import FluxTryOnPipeline
18
  from utils import init_weight_dtype, resize_and_crop, resize_and_padding
19
 
20
+
21
  def parse_args():
22
  parser = argparse.ArgumentParser(description="Simple example of a training script.")
23
  parser.add_argument(
24
  "--base_model_path",
25
  type=str,
26
+ default="booksforcharlie/stable-diffusion-inpainting",
27
+ help=(
28
+ "The path to the base model to use for evaluation. This can be a local path or a model identifier from the Model Hub."
29
+ ),
30
+ )
31
+ parser.add_argument(
32
+ "--p2p_base_model_path",
33
+ type=str,
34
+ default="timbrooks/instruct-pix2pix",
35
  help=(
36
  "The path to the base model to use for evaluation. This can be a local path or a model identifier from the Model Hub."
37
  ),
 
114
 
115
 
116
  args = parse_args()
117
+
118
+ # Mask-based CatVTON
119
+ catvton_repo = "zhengchong/CatVTON"
120
+ repo_path = snapshot_download(repo_id=catvton_repo)
121
  # Pipeline
122
  pipeline = CatVTONPipeline(
123
  base_ckpt=args.base_model_path,
 
135
  device='cuda',
136
  )
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
  @spaces.GPU(duration=120)
140
  def submit_function(
 
339
  def person_example_fn(image_path):
340
  return image_path
341
 
342
+
343
  HEADER = ""
344
 
345
  def app_gradio():
346
  with gr.Blocks(title="CatVTON") as demo:
347
  gr.Markdown(HEADER)
348
+ with gr.Tab("Mask-based & SD1.5"):
349
+ with gr.Row():
350
+ with gr.Column(scale=1, min_width=350):
351
+ with gr.Row():
352
+ image_path = gr.Image(
353
+ type="filepath",
354
+ interactive=True,
355
+ visible=False,
 
 
 
 
 
 
 
 
 
 
 
 
356
  )
357
+ person_image = gr.ImageEditor(
358
+ interactive=True, label="Person Image", type="filepath"
 
 
359
  )
360
 
361
+ with gr.Row():
362
+ with gr.Column(scale=1, min_width=230):
363
+ cloth_image = gr.Image(
364
+ interactive=True, label="Condition Image", type="filepath"
365
+ )
366
+ with gr.Column(scale=1, min_width=120):
367
+ gr.Markdown(
368
+ '<span style="color: #808080; font-size: small;">Two ways to provide Mask:<br>1. Upload the person image and use the `🖌️` above to draw the Mask (higher priority)<br>2. Select the `Try-On Cloth Type` to generate automatically </span>'
369
+ )
370
+ cloth_type = gr.Radio(
371
+ label="Try-On Cloth Type",
372
+ choices=["upper", "lower", "overall"],
373
+ value="upper",
374
+ )
375
+
376
+
377
+ submit = gr.Button("Submit")
378
+ gr.Markdown(
379
+ '<center><span style="color: #FF0000">!!! Click only Once, Wait for Delay !!!</span></center>'
380
  )
381
+
382
+ gr.Markdown(
383
+ '<span style="color: #808080; font-size: small;">Advanced options can adjust details:<br>1. `Inference Step` may enhance details;<br>2. `CFG` is highly correlated with saturation;<br>3. `Random seed` may improve pseudo-shadow.</span>'
384
  )
385
+ with gr.Accordion("Advanced Options", open=False):
386
+ num_inference_steps = gr.Slider(
387
+ label="Inference Step", minimum=10, maximum=100, step=5, value=50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
  )
389
+ # Guidence Scale
390
+ guidance_scale = gr.Slider(
391
+ label="CFG Strenth", minimum=0.0, maximum=7.5, step=0.5, value=2.5
392
  )
393
+ # Random Seed
394
+ seed = gr.Slider(
395
+ label="Seed", minimum=-1, maximum=10000, step=1, value=42
 
 
 
 
 
 
396
  )
397
+ show_type = gr.Radio(
398
+ label="Show Type",
399
+ choices=["result only", "input & result", "input & mask & result"],
400
+ value="input & mask & result",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
  )
402
 
403
+ with gr.Column(scale=2, min_width=500):
404
+ result_image = gr.Image(interactive=False, label="Result")
405
+ with gr.Row():
406
+ # Photo Examples
407
+ root_path = "resource/demo/example"
408
+ with gr.Column():
409
+ men_exm = gr.Examples(
410
+ examples=[
411
+ os.path.join(root_path, "person", "men", _)
412
+ for _ in os.listdir(os.path.join(root_path, "person", "men"))
413
+ ],
414
+ examples_per_page=4,
415
+ inputs=image_path,
416
+ label="Person Examples ①",
417
+ )
418
+ women_exm = gr.Examples(
419
+ examples=[
420
+ os.path.join(root_path, "person", "women", _)
421
+ for _ in os.listdir(os.path.join(root_path, "person", "women"))
422
+ ],
423
+ examples_per_page=4,
424
+ inputs=image_path,
425
+ label="Person Examples ②",
426
+ )
427
+ gr.Markdown(
428
+ '<span style="color: #808080; font-size: small;">*Person examples come from the demos of <a href="https://huggingface.co/spaces/levihsu/OOTDiffusion">OOTDiffusion</a> and <a href="https://www.outfitanyone.org">OutfitAnyone</a>. </span>'
429
+ )
430
+ with gr.Column():
431
+ condition_upper_exm = gr.Examples(
432
+ examples=[
433
+ os.path.join(root_path, "condition", "upper", _)
434
+ for _ in os.listdir(os.path.join(root_path, "condition", "upper"))
435
+ ],
436
+ examples_per_page=4,
437
+ inputs=cloth_image,
438
+ label="Condition Upper Examples",
439
+ )
440
+ condition_overall_exm = gr.Examples(
441
+ examples=[
442
+ os.path.join(root_path, "condition", "overall", _)
443
+ for _ in os.listdir(os.path.join(root_path, "condition", "overall"))
444
+ ],
445
+ examples_per_page=4,
446
+ inputs=cloth_image,
447
+ label="Condition Overall Examples",
448
+ )
449
+ condition_person_exm = gr.Examples(
450
+ examples=[
451
+ os.path.join(root_path, "condition", "person", _)
452
+ for _ in os.listdir(os.path.join(root_path, "condition", "person"))
453
+ ],
454
+ examples_per_page=4,
455
+ inputs=cloth_image,
456
+ label="Condition Reference Person Examples",
457
+ )
458
+ gr.Markdown(
459
+ '<span style="color: #808080; font-size: small;">*Condition examples come from the Internet. </span>'
460
+ )
461
+
462
+ image_path.change(
463
+ person_example_fn, inputs=image_path, outputs=person_image
464
+ )
465
+
466
+ submit.click(
467
+ submit_function,
468
+ [
469
+ person_image,
470
+ cloth_image,
471
+ cloth_type,
472
+ num_inference_steps,
473
+ guidance_scale,
474
+ seed,
475
+ show_type,
476
+ ],
477
+ result_image,
478
+ )
479
+
480
  demo.queue().launch(share=True, show_error=True)
481
 
482
 
483
  if __name__ == "__main__":
484
+ app_gradio()