Stylique commited on
Commit
716768e
·
verified ·
1 Parent(s): 90b8e48

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +362 -223
app.py CHANGED
@@ -1,223 +1,362 @@
1
- import numpy as np
2
- from PIL import Image
3
- from huggingface_hub import snapshot_download
4
- from leffa.transform import LeffaTransform
5
- from leffa.model import LeffaModel
6
- from leffa.inference import LeffaInference
7
- from leffa_utils.garment_agnostic_mask_predictor import AutoMasker
8
- from leffa_utils.densepose_predictor import DensePosePredictor
9
- from leffa_utils.utils import resize_and_center, list_dir, get_agnostic_mask_hd, get_agnostic_mask_dc
10
- from preprocess.humanparsing.run_parsing import Parsing
11
- from preprocess.openpose.run_openpose import OpenPose
12
-
13
- import gradio as gr
14
-
15
- # Download checkpoints once at startup
16
- snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts")
17
-
18
-
19
- class LeffaPredictor:
20
- def __init__(self):
21
- self.mask_predictor = AutoMasker(
22
- densepose_path="./ckpts/densepose",
23
- schp_path="./ckpts/schp",
24
- )
25
- self.densepose_predictor = DensePosePredictor(
26
- config_path="./ckpts/densepose/densepose_rcnn_R_50_FPN_s1x.yaml",
27
- weights_path="./ckpts/densepose/model_final_162be9.pkl",
28
- )
29
- self.parsing = Parsing(
30
- atr_path="./ckpts/humanparsing/parsing_atr.onnx",
31
- lip_path="./ckpts/humanparsing/parsing_lip.onnx",
32
- )
33
- self.openpose = OpenPose(
34
- body_model_path="./ckpts/openpose/body_pose_model.pth",
35
- )
36
-
37
- # Virtual try‑on HD
38
- vt_hd = LeffaModel(
39
- pretrained_model_name_or_path="./ckpts/stable-diffusion-inpainting",
40
- pretrained_model="./ckpts/virtual_tryon.pth",
41
- dtype="float16",
42
- )
43
- self.vt_hd_inf = LeffaInference(model=vt_hd)
44
-
45
- # Virtual try‑on DressCode
46
- vt_dc = LeffaModel(
47
- pretrained_model_name_or_path="./ckpts/stable-diffusion-inpainting",
48
- pretrained_model="./ckpts/virtual_tryon_dc.pth",
49
- dtype="float16",
50
- )
51
- self.vt_dc_inf = LeffaInference(model=vt_dc)
52
-
53
- # Pose transfer
54
- pt = LeffaModel(
55
- pretrained_model_name_or_path="./ckpts/stable-diffusion-xl-1.0-inpainting-0.1",
56
- pretrained_model="./ckpts/pose_transfer.pth",
57
- dtype="float16",
58
- )
59
- self.pt_inf = LeffaInference(model=pt)
60
-
61
- def _prepare(self, src_path, ref_path):
62
- src = Image.open(src_path)
63
- ref = Image.open(ref_path)
64
- src = resize_and_center(src, 768, 1024)
65
- ref = resize_and_center(ref, 768, 1024)
66
- return src, ref
67
-
68
- def predict_virtual_tryon(
69
- self, src_path, ref_path,
70
- accelerate_ref, steps, scale, seed,
71
- model_type, garment_type, repaint
72
- ):
73
- src, ref = self._prepare(src_path, ref_path)
74
- src_arr = np.array(src.convert("RGB"))
75
-
76
- # 1) parsing + keypoints → agnostic mask
77
- parse, _ = self.parsing(src.resize((384, 512)))
78
- kpts = self.openpose(src.resize((384, 512)))
79
- if model_type == "viton_hd":
80
- mask = get_agnostic_mask_hd(parse, kpts, garment_type)
81
- else:
82
- mask = get_agnostic_mask_dc(parse, kpts, garment_type)
83
- mask = mask.resize((768, 1024))
84
-
85
- # 2) DensePose → seg or IUV
86
- if model_type == "viton_hd":
87
- seg = self.densepose_predictor.predict_seg(src_arr)[:, :, ::-1]
88
- densepose = Image.fromarray(seg)
89
- inf = self.vt_hd_inf
90
- else:
91
- iuv = self.densepose_predictor.predict_iuv(src_arr)
92
- seg = np.concatenate([iuv[:, :, :1]] * 3, axis=-1)
93
- densepose = Image.fromarray(seg)
94
- inf = self.vt_dc_inf
95
-
96
- # 3) run Leffa
97
- data = {
98
- "src_image": [src],
99
- "ref_image": [ref],
100
- "mask": [mask],
101
- "densepose": [densepose],
102
- }
103
- data = LeffaTransform()(data)
104
- out = inf(
105
- data,
106
- ref_acceleration=accelerate_ref,
107
- num_inference_steps=int(steps),
108
- guidance_scale=float(scale),
109
- seed=int(seed),
110
- repaint=repaint,
111
- )
112
- gen = out["generated_image"][0]
113
- return np.array(gen), np.array(mask), np.array(densepose)
114
-
115
- def predict_pose_transfer(
116
- self, src_path, ref_path,
117
- accelerate_ref, steps, scale, seed
118
- ):
119
- src, ref = self._prepare(src_path, ref_path)
120
- src_arr = np.array(src)
121
- mask = Image.fromarray(np.ones_like(src_arr) * 255)
122
- iuv = self.densepose_predictor.predict_iuv(src_arr)[:, :, ::-1]
123
- densepose = Image.fromarray(iuv)
124
-
125
- data = {
126
- "src_image": [src],
127
- "ref_image": [ref],
128
- "mask": [mask],
129
- "densepose": [densepose],
130
- }
131
- data = LeffaTransform()(data)
132
- out = self.pt_inf(
133
- data,
134
- ref_acceleration=accelerate_ref,
135
- num_inference_steps=int(steps),
136
- guidance_scale=float(scale),
137
- seed=int(seed),
138
- )
139
- gen = out["generated_image"][0]
140
- return np.array(gen), np.array(mask), np.array(densepose)
141
-
142
-
143
- if __name__ == "__main__":
144
- lp = LeffaPredictor()
145
- examples = "./ckpts/examples"
146
- person1 = list_dir(f"{examples}/person1")
147
- person2 = list_dir(f"{examples}/person2")
148
- garments = list_dir(f"{examples}/garment")
149
-
150
- title = "## Leffa: Controllable Person Image Generation"
151
- note = "Note: Virtual Try‑On uses VITON‑HD/DressCode; Pose Transfer uses DeepFashion."
152
-
153
- with gr.Blocks(theme=gr.themes.Default(
154
- primary_hue=gr.themes.colors.pink,
155
- secondary_hue=gr.themes.colors.red
156
- )).queue() as demo:
157
-
158
- gr.Markdown(title)
159
-
160
- with gr.Tab("Virtual Try‑On"):
161
- with gr.Row():
162
- with gr.Column():
163
- vt_src = gr.Image(source="upload", type="filepath", label="Person")
164
- gr.Examples(examples=person1, inputs=vt_src)
165
-
166
- with gr.Column():
167
- vt_ref = gr.Image(source="upload", type="filepath", label="Garment")
168
- gr.Examples(examples=garments, inputs=vt_ref)
169
-
170
- with gr.Column():
171
- vt_out = gr.Image(label="Result")
172
- vt_mask = gr.Image(label="Mask")
173
- vt_dp = gr.Image(label="DensePose")
174
- vt_btn = gr.Button("Generate")
175
-
176
- with gr.Accordion("Advanced Options", open=False):
177
- vt_model = gr.Radio(["viton_hd","dress_code"], value="viton_hd", label="Model")
178
- vt_garment = gr.Radio(["upper_body","lower_body","dresses"], value="upper_body", label="Garment Type")
179
- vt_accel_ref = gr.Checkbox(label="Accelerate Reference UNet")
180
- vt_repaint = gr.Checkbox(label="Repaint Mode")
181
- vt_steps = gr.Slider(30,100,value=30,step=1,label="Steps")
182
- vt_scale = gr.Slider(0.1,5.0,value=2.5,step=0.1,label="Guidance Scale")
183
- vt_seed = gr.Number(value=42, label="Seed")
184
-
185
- vt_btn.click(
186
- fn=lp.predict_virtual_tryon,
187
- inputs=[vt_src, vt_ref, vt_accel_ref, vt_steps, vt_scale, vt_seed, vt_model, vt_garment, vt_repaint],
188
- outputs=[vt_out, vt_mask, vt_dp],
189
- )
190
-
191
- with gr.Tab("Pose Transfer"):
192
- with gr.Row():
193
- with gr.Column():
194
- pt_src = gr.Image(source="upload", type="filepath", label="Source Pose")
195
- gr.Examples(examples=person2, inputs=pt_src)
196
-
197
- with gr.Column():
198
- pt_ref = gr.Image(source="upload", type="filepath", label="Target Person")
199
- gr.Examples(examples=person1, inputs=pt_ref)
200
-
201
- with gr.Column():
202
- pt_out = gr.Image(label="Result")
203
- pt_mask = gr.Image(label="Mask")
204
- pt_dp = gr.Image(label="DensePose")
205
- pt_btn = gr.Button("Generate")
206
-
207
- with gr.Accordion("Advanced Options", open=False):
208
- pt_accel_ref = gr.Checkbox(label="Accelerate Reference UNet")
209
- pt_steps = gr.Slider(30,100,value=30,step=1,label="Steps")
210
- pt_scale = gr.Slider(0.1,5.0,value=2.5,step=0.1,label="Guidance Scale")
211
- pt_seed = gr.Number(value=42, label="Seed")
212
-
213
- pt_btn.click(
214
- fn=lp.predict_pose_transfer,
215
- inputs=[pt_src, pt_ref, pt_accel_ref, pt_steps, pt_scale, pt_seed],
216
- outputs=[pt_out, pt_mask, pt_dp],
217
- )
218
-
219
- gr.Markdown(note)
220
-
221
- # expose publicly
222
- demo.launch(share=True, server_port=7860,
223
- allowed_paths=["./ckpts/examples"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from PIL import Image
3
+ from huggingface_hub import snapshot_download
4
+ from leffa.transform import LeffaTransform
5
+ from leffa.model import LeffaModel
6
+ from leffa.inference import LeffaInference
7
+ from leffa_utils.garment_agnostic_mask_predictor import AutoMasker
8
+ from leffa_utils.densepose_predictor import DensePosePredictor
9
+ from leffa_utils.utils import resize_and_center, list_dir, get_agnostic_mask_hd, get_agnostic_mask_dc
10
+ from preprocess.humanparsing.run_parsing import Parsing
11
+ from preprocess.openpose.run_openpose import OpenPose
12
+
13
+ import gradio as gr
14
+
15
+ # Download checkpoints
16
+ snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts")
17
+
18
+
19
+ class LeffaPredictor(object):
20
+ def __init__(self):
21
+ self.mask_predictor = AutoMasker(
22
+ densepose_path="./ckpts/densepose",
23
+ schp_path="./ckpts/schp",
24
+ )
25
+
26
+ self.densepose_predictor = DensePosePredictor(
27
+ config_path="./ckpts/densepose/densepose_rcnn_R_50_FPN_s1x.yaml",
28
+ weights_path="./ckpts/densepose/model_final_162be9.pkl",
29
+ )
30
+
31
+ self.parsing = Parsing(
32
+ atr_path="./ckpts/humanparsing/parsing_atr.onnx",
33
+ lip_path="./ckpts/humanparsing/parsing_lip.onnx",
34
+ )
35
+
36
+ self.openpose = OpenPose(
37
+ body_model_path="./ckpts/openpose/body_pose_model.pth",
38
+ )
39
+
40
+ vt_model_hd = LeffaModel(
41
+ pretrained_model_name_or_path="./ckpts/stable-diffusion-inpainting",
42
+ pretrained_model="./ckpts/virtual_tryon.pth",
43
+ dtype="float16",
44
+ )
45
+ self.vt_inference_hd = LeffaInference(model=vt_model_hd)
46
+
47
+ vt_model_dc = LeffaModel(
48
+ pretrained_model_name_or_path="./ckpts/stable-diffusion-inpainting",
49
+ pretrained_model="./ckpts/virtual_tryon_dc.pth",
50
+ dtype="float16",
51
+ )
52
+ self.vt_inference_dc = LeffaInference(model=vt_model_dc)
53
+
54
+ pt_model = LeffaModel(
55
+ pretrained_model_name_or_path="./ckpts/stable-diffusion-xl-1.0-inpainting-0.1",
56
+ pretrained_model="./ckpts/pose_transfer.pth",
57
+ dtype="float16",
58
+ )
59
+ self.pt_inference = LeffaInference(model=pt_model)
60
+
61
+ def leffa_predict(
62
+ self,
63
+ src_image_path,
64
+ ref_image_path,
65
+ control_type,
66
+ ref_acceleration=False,
67
+ step=50,
68
+ scale=2.5,
69
+ seed=42,
70
+ vt_model_type="viton_hd",
71
+ vt_garment_type="upper_body",
72
+ vt_repaint=False
73
+ ):
74
+ assert control_type in [
75
+ "virtual_tryon", "pose_transfer"], "Invalid control type: {}".format(control_type)
76
+ src_image = Image.open(src_image_path)
77
+ ref_image = Image.open(ref_image_path)
78
+ src_image = resize_and_center(src_image, 768, 1024)
79
+ ref_image = resize_and_center(ref_image, 768, 1024)
80
+
81
+ src_image_array = np.array(src_image)
82
+
83
+ # Mask
84
+ if control_type == "virtual_tryon":
85
+ src_image = src_image.convert("RGB")
86
+ model_parse, _ = self.parsing(src_image.resize((384, 512)))
87
+ keypoints = self.openpose(src_image.resize((384, 512)))
88
+ if vt_model_type == "viton_hd":
89
+ mask = get_agnostic_mask_hd(
90
+ model_parse, keypoints, vt_garment_type)
91
+ elif vt_model_type == "dress_code":
92
+ mask = get_agnostic_mask_dc(
93
+ model_parse, keypoints, vt_garment_type)
94
+ mask = mask.resize((768, 1024))
95
+ # garment_type_hd = "upper" if vt_garment_type in [
96
+ # "upper_body", "dresses"] else "lower"
97
+ # mask = self.mask_predictor(src_image, garment_type_hd)["mask"]
98
+ elif control_type == "pose_transfer":
99
+ mask = Image.fromarray(np.ones_like(src_image_array) * 255)
100
+
101
+ # DensePose
102
+ if control_type == "virtual_tryon":
103
+ if vt_model_type == "viton_hd":
104
+ src_image_seg_array = self.densepose_predictor.predict_seg(
105
+ src_image_array)[:, :, ::-1]
106
+ src_image_seg = Image.fromarray(src_image_seg_array)
107
+ densepose = src_image_seg
108
+ elif vt_model_type == "dress_code":
109
+ src_image_iuv_array = self.densepose_predictor.predict_iuv(
110
+ src_image_array)
111
+ src_image_seg_array = src_image_iuv_array[:, :, 0:1]
112
+ src_image_seg_array = np.concatenate(
113
+ [src_image_seg_array] * 3, axis=-1)
114
+ src_image_seg = Image.fromarray(src_image_seg_array)
115
+ densepose = src_image_seg
116
+ elif control_type == "pose_transfer":
117
+ src_image_iuv_array = self.densepose_predictor.predict_iuv(
118
+ src_image_array)[:, :, ::-1]
119
+ src_image_iuv = Image.fromarray(src_image_iuv_array)
120
+ densepose = src_image_iuv
121
+
122
+ # Leffa
123
+ transform = LeffaTransform()
124
+
125
+ data = {
126
+ "src_image": [src_image],
127
+ "ref_image": [ref_image],
128
+ "mask": [mask],
129
+ "densepose": [densepose],
130
+ }
131
+ data = transform(data)
132
+ if control_type == "virtual_tryon":
133
+ if vt_model_type == "viton_hd":
134
+ inference = self.vt_inference_hd
135
+ elif vt_model_type == "dress_code":
136
+ inference = self.vt_inference_dc
137
+ elif control_type == "pose_transfer":
138
+ inference = self.pt_inference
139
+ output = inference(
140
+ data,
141
+ ref_acceleration=ref_acceleration,
142
+ num_inference_steps=step,
143
+ guidance_scale=scale,
144
+ seed=seed,
145
+ repaint=vt_repaint,)
146
+ gen_image = output["generated_image"][0]
147
+ # gen_image.save("gen_image.png")
148
+ return np.array(gen_image), np.array(mask), np.array(densepose)
149
+
150
+ def leffa_predict_vt(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed, vt_model_type, vt_garment_type, vt_repaint):
151
+ return self.leffa_predict(src_image_path, ref_image_path, "virtual_tryon", ref_acceleration, step, scale, seed, vt_model_type, vt_garment_type, vt_repaint)
152
+
153
+ def leffa_predict_pt(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed):
154
+ return self.leffa_predict(src_image_path, ref_image_path, "pose_transfer", ref_acceleration, step, scale, seed)
155
+
156
+
157
+ if __name__ == "__main__":
158
+
159
+ leffa_predictor = LeffaPredictor()
160
+ example_dir = "./ckpts/examples"
161
+ person1_images = list_dir(f"{example_dir}/person1")
162
+ person2_images = list_dir(f"{example_dir}/person2")
163
+ garment_images = list_dir(f"{example_dir}/garment")
164
+
165
+ title = "## Leffa: Learning Flow Fields in Attention for Controllable Person Image Generation"
166
+ link = """[📚 Paper](https://arxiv.org/abs/2412.08486) - [🤖 Code](https://github.com/franciszzj/Leffa) - [🔥 Demo](https://huggingface.co/spaces/franciszzj/Leffa) - [🤗 Model](https://huggingface.co/franciszzj/Leffa)
167
+
168
+ Star ⭐ us if you like it!
169
+ """
170
+ news = """## News
171
+ - 09/Jan/2025. Inference defaults to float16, generating an image in 6 seconds (on A100).
172
+
173
+ More news can be found in the [GitHub repository](https://github.com/franciszzj/Leffa).
174
+ """
175
+ description = "Leffa is a unified framework for controllable person image generation that enables precise manipulation of both appearance (i.e., virtual try-on) and pose (i.e., pose transfer)."
176
+ note = "Note: The models used in the demo are trained solely on academic datasets. Virtual try-on uses VITON-HD/DressCode, and pose transfer uses DeepFashion."
177
+
178
+ with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.pink, secondary_hue=gr.themes.colors.red)).queue() as demo:
179
+ gr.Markdown(title)
180
+ gr.Markdown(link)
181
+ gr.Markdown(news)
182
+ gr.Markdown(description)
183
+
184
+ with gr.Tab("Control Appearance (Virtual Try-on)"):
185
+ with gr.Row():
186
+ with gr.Column():
187
+ gr.Markdown("#### Person Image")
188
+ vt_src_image = gr.Image(
189
+ sources=["upload"],
190
+ type="filepath",
191
+ label="Person Image",
192
+ width=512,
193
+ height=512,
194
+ )
195
+
196
+ gr.Examples(
197
+ inputs=vt_src_image,
198
+ examples_per_page=10,
199
+ examples=person1_images,
200
+ )
201
+
202
+ with gr.Column():
203
+ gr.Markdown("#### Garment Image")
204
+ vt_ref_image = gr.Image(
205
+ sources=["upload"],
206
+ type="filepath",
207
+ label="Garment Image",
208
+ width=512,
209
+ height=512,
210
+ )
211
+
212
+ gr.Examples(
213
+ inputs=vt_ref_image,
214
+ examples_per_page=10,
215
+ examples=garment_images,
216
+ )
217
+
218
+ with gr.Column():
219
+ gr.Markdown("#### Generated Image")
220
+ vt_gen_image = gr.Image(
221
+ label="Generated Image",
222
+ width=512,
223
+ height=512,
224
+ )
225
+
226
+ with gr.Row():
227
+ vt_gen_button = gr.Button("Generate")
228
+
229
+ with gr.Accordion("Advanced Options", open=False):
230
+ vt_model_type = gr.Radio(
231
+ label="Model Type",
232
+ choices=[("VITON-HD (Recommended)", "viton_hd"),
233
+ ("DressCode (Experimental)", "dress_code")],
234
+ value="viton_hd",
235
+ )
236
+
237
+ vt_garment_type = gr.Radio(
238
+ label="Garment Type",
239
+ choices=[("Upper", "upper_body"),
240
+ ("Lower", "lower_body"),
241
+ ("Dress", "dresses")],
242
+ value="upper_body",
243
+ )
244
+
245
+ vt_ref_acceleration = gr.Radio(
246
+ label="Accelerate Reference UNet (may slightly reduce performance)",
247
+ choices=[("True", True), ("False", False)],
248
+ value=False,
249
+ )
250
+
251
+ vt_repaint = gr.Radio(
252
+ label="Repaint Mode",
253
+ choices=[("True", True), ("False", False)],
254
+ value=False,
255
+ )
256
+
257
+ vt_step = gr.Number(
258
+ label="Inference Steps", minimum=30, maximum=100, step=1, value=30)
259
+
260
+ vt_scale = gr.Number(
261
+ label="Guidance Scale", minimum=0.1, maximum=5.0, step=0.1, value=2.5)
262
+
263
+ vt_seed = gr.Number(
264
+ label="Random Seed", minimum=-1, maximum=2147483647, step=1, value=42)
265
+
266
+ with gr.Accordion("Debug", open=False):
267
+ vt_mask = gr.Image(
268
+ label="Generated Mask",
269
+ width=256,
270
+ height=256,
271
+ )
272
+
273
+ vt_densepose = gr.Image(
274
+ label="Generated DensePose",
275
+ width=256,
276
+ height=256,
277
+ )
278
+
279
+ vt_gen_button.click(fn=leffa_predictor.leffa_predict_vt, inputs=[
280
+ vt_src_image, vt_ref_image, vt_ref_acceleration, vt_step, vt_scale, vt_seed, vt_model_type, vt_garment_type, vt_repaint], outputs=[vt_gen_image, vt_mask, vt_densepose])
281
+
282
+ with gr.Tab("Control Pose (Pose Transfer)"):
283
+ with gr.Row():
284
+ with gr.Column():
285
+ gr.Markdown("#### Person Image")
286
+ pt_ref_image = gr.Image(
287
+ sources=["upload"],
288
+ type="filepath",
289
+ label="Person Image",
290
+ width=512,
291
+ height=512,
292
+ )
293
+
294
+ gr.Examples(
295
+ inputs=pt_ref_image,
296
+ examples_per_page=10,
297
+ examples=person1_images,
298
+ )
299
+
300
+ with gr.Column():
301
+ gr.Markdown("#### Target Pose Person Image")
302
+ pt_src_image = gr.Image(
303
+ sources=["upload"],
304
+ type="filepath",
305
+ label="Target Pose Person Image",
306
+ width=512,
307
+ height=512,
308
+ )
309
+
310
+ gr.Examples(
311
+ inputs=pt_src_image,
312
+ examples_per_page=10,
313
+ examples=person2_images,
314
+ )
315
+
316
+ with gr.Column():
317
+ gr.Markdown("#### Generated Image")
318
+ pt_gen_image = gr.Image(
319
+ label="Generated Image",
320
+ width=512,
321
+ height=512,
322
+ )
323
+
324
+ with gr.Row():
325
+ pose_transfer_gen_button = gr.Button("Generate")
326
+
327
+ with gr.Accordion("Advanced Options", open=False):
328
+ pt_ref_acceleration = gr.Radio(
329
+ label="Accelerate Reference UNet",
330
+ choices=[("True", True), ("False", False)],
331
+ value=False,
332
+ )
333
+
334
+ pt_step = gr.Number(
335
+ label="Inference Steps", minimum=30, maximum=100, step=1, value=30)
336
+
337
+ pt_scale = gr.Number(
338
+ label="Guidance Scale", minimum=0.1, maximum=5.0, step=0.1, value=2.5)
339
+
340
+ pt_seed = gr.Number(
341
+ label="Random Seed", minimum=-1, maximum=2147483647, step=1, value=42)
342
+
343
+ with gr.Accordion("Debug", open=False):
344
+ pt_mask = gr.Image(
345
+ label="Generated Mask",
346
+ width=256,
347
+ height=256,
348
+ )
349
+
350
+ pt_densepose = gr.Image(
351
+ label="Generated DensePose",
352
+ width=256,
353
+ height=256,
354
+ )
355
+
356
+ pose_transfer_gen_button.click(fn=leffa_predictor.leffa_predict_pt, inputs=[
357
+ pt_src_image, pt_ref_image, pt_ref_acceleration, pt_step, pt_scale, pt_seed], outputs=[pt_gen_image, pt_mask, pt_densepose])
358
+
359
+ gr.Markdown(note)
360
+
361
+ demo.launch(share=True, server_port=7860,
362
+ allowed_paths=["./ckpts/examples"])