chips commited on
Commit
93d55f4
·
1 Parent(s): f381d75

moved generation to FAL

Browse files
Files changed (2) hide show
  1. app.py +4 -1
  2. base_generator.py +48 -1
app.py CHANGED
@@ -129,8 +129,11 @@ async def run_virtual_tryon_pipeline(
129
  #lets use openai for this. Also, return errors where needed
130
  r.set(request_id, "Creating base image")
131
  # STEP 1: make base image
 
 
 
132
  try:
133
- base_image = base_generator.create_image(talent_lora_url, talent_trigger_word, "", 1, num_images)
134
  print(f"base_image: {base_image}")
135
  except Exception as e:
136
  r.set(request_id, "error generating base image")
 
129
  #lets use openai for this. Also, return errors where needed
130
  r.set(request_id, "Creating base image")
131
  # STEP 1: make base image
132
+
133
+ ## analyse the garment images
134
+ garment_description = describe_garment(front_final_image)
135
  try:
136
+ base_image = base_generator.create_image(talent_lora_url, talent_trigger_word, garment_description, 1, num_images)
137
  print(f"base_image: {base_image}")
138
  except Exception as e:
139
  r.set(request_id, "error generating base image")
base_generator.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import random
3
  import requests
 
4
 
5
  #Todo
6
  # Something to select the pose from the list
@@ -30,7 +31,7 @@ def run_workflow(body):
30
  response = requests.post(url, headers=headers, json=body)
31
  return response.json()
32
 
33
- def create_image(character_lora, character_keyword, outfit_desc, pose_id, num_outputs):
34
  seed = random.randint(0, 1000000)
35
  print(f"seed: {seed}")
36
  prompt = {"56": {"_meta": {"title": "CLIP Text Encode (Prompt)"}, "inputs": {"clip": ["365", 1], "text": ""}, "class_type": "CLIPTextEncode"}, "159": {"_meta": {"title": "Load VAE"}, "inputs": {"vae_name": "flux1-ae.safetensors"}, "class_type": "VAELoader"}, "175": {"_meta": {"title": "Apply ControlNet"}, "inputs": {"vae": ["159", 0], "image": ["369", 0], "negative": ["56", 0], "positive": ["199", 0], "strength": 0.7000000000000001, "control_net": ["260", 0], "end_percent": 0.5, "start_percent": 0}, "class_type": "ControlNetApplyAdvanced"}, "199": {"_meta": {"title": "CLIP Text Encode (Prompt)"}, "inputs": {"clip": ["365", 1], "text": f"Fashion model {character_keyword} wearing {outfit_desc}. posing in front of white background"}, "class_type": "CLIPTextEncode"}, "260": {"_meta": {"title": "Load ControlNet Model"}, "inputs": {"control_net_name": "flux.1-dev-controlnet-union.safetensors"}, "class_type": "ControlNetLoader"}, "263": {"_meta": {"title": "Save Image"}, "inputs": {"images": ["311", 0], "filename_prefix": "ControlNet"}, "class_type": "SaveImage"}, "307": {"_meta": {"title": "FluxGuidance"}, "inputs": {"guidance": 3.5, "conditioning": ["175", 0]}, "class_type": "FluxGuidance"}, "308": {"_meta": {"title": "KSampler"}, "inputs": {"cfg": 1, "seed": seed, "model": ["365", 0], "steps": 20, "denoise": 1, "negative": ["335", 0], "positive": ["307", 0], "scheduler": "simple", "latent_image": ["344", 0], "sampler_name": "euler"}, "class_type": "KSampler"}, "310": {"_meta": {"title": "DualCLIPLoader"}, "inputs": {"type": "flux", "device": "default", "clip_name1": "t5xxl_fp8_e4m3fn.safetensors", "clip_name2": "clip_l.safetensors"}, "class_type": "DualCLIPLoader"}, "311": {"_meta": {"title": "VAE Decode"}, "inputs": {"vae": ["159", 0], "samples": ["308", 0]}, "class_type": "VAEDecode"}, "335": {"_meta": {"title": "FluxGuidance"}, "inputs": {"guidance": 3.5, "conditioning": ["175", 1]}, "class_type": "FluxGuidance"}, "344": {"_meta": {"title": "Empty Latent Image"}, "inputs": {"width": 544, "height": 960, "batch_size": num_outputs}, "class_type": "EmptyLatentImage"}, "363": {"_meta": {"title": "Load Diffusion Model"}, "inputs": {"unet_name": "flux1-dev-fp8-e4m3fn.safetensors", "weight_dtype": "fp8_e4m3fn"}, "class_type": "UNETLoader"}, "365": {"_meta": {"title": "Load LoRA"}, "inputs": {"clip": ["310", 0], "model": ["363", 0], "lora_name": character_lora, "strength_clip": 0.99, "strength_model": 0.84}, "class_type": "LoraLoader"}, "369": {"_meta": {"title": "Load Image"}, "inputs": {"image": "Pose_Female_Front_full_standing_02.webp_00001_.png", "upload": "image"}, "class_type": "LoadImage"}}
@@ -43,6 +44,52 @@ def create_image(character_lora, character_keyword, outfit_desc, pose_id, num_ou
43
  print(run)
44
  return(run)
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  if __name__ == "__main__":
47
  character_lora = "yOA2a06KWEMR-ewq3j8io_pytorch_lora_weights.safetensors"
48
  character_keyword = "crxter_scandi"
 
1
  import os
2
  import random
3
  import requests
4
+ import fal_client
5
 
6
  #Todo
7
  # Something to select the pose from the list
 
31
  response = requests.post(url, headers=headers, json=body)
32
  return response.json()
33
 
34
+ def comfy_create_image(character_lora, character_keyword, outfit_desc, pose_id, num_outputs):
35
  seed = random.randint(0, 1000000)
36
  print(f"seed: {seed}")
37
  prompt = {"56": {"_meta": {"title": "CLIP Text Encode (Prompt)"}, "inputs": {"clip": ["365", 1], "text": ""}, "class_type": "CLIPTextEncode"}, "159": {"_meta": {"title": "Load VAE"}, "inputs": {"vae_name": "flux1-ae.safetensors"}, "class_type": "VAELoader"}, "175": {"_meta": {"title": "Apply ControlNet"}, "inputs": {"vae": ["159", 0], "image": ["369", 0], "negative": ["56", 0], "positive": ["199", 0], "strength": 0.7000000000000001, "control_net": ["260", 0], "end_percent": 0.5, "start_percent": 0}, "class_type": "ControlNetApplyAdvanced"}, "199": {"_meta": {"title": "CLIP Text Encode (Prompt)"}, "inputs": {"clip": ["365", 1], "text": f"Fashion model {character_keyword} wearing {outfit_desc}. posing in front of white background"}, "class_type": "CLIPTextEncode"}, "260": {"_meta": {"title": "Load ControlNet Model"}, "inputs": {"control_net_name": "flux.1-dev-controlnet-union.safetensors"}, "class_type": "ControlNetLoader"}, "263": {"_meta": {"title": "Save Image"}, "inputs": {"images": ["311", 0], "filename_prefix": "ControlNet"}, "class_type": "SaveImage"}, "307": {"_meta": {"title": "FluxGuidance"}, "inputs": {"guidance": 3.5, "conditioning": ["175", 0]}, "class_type": "FluxGuidance"}, "308": {"_meta": {"title": "KSampler"}, "inputs": {"cfg": 1, "seed": seed, "model": ["365", 0], "steps": 20, "denoise": 1, "negative": ["335", 0], "positive": ["307", 0], "scheduler": "simple", "latent_image": ["344", 0], "sampler_name": "euler"}, "class_type": "KSampler"}, "310": {"_meta": {"title": "DualCLIPLoader"}, "inputs": {"type": "flux", "device": "default", "clip_name1": "t5xxl_fp8_e4m3fn.safetensors", "clip_name2": "clip_l.safetensors"}, "class_type": "DualCLIPLoader"}, "311": {"_meta": {"title": "VAE Decode"}, "inputs": {"vae": ["159", 0], "samples": ["308", 0]}, "class_type": "VAEDecode"}, "335": {"_meta": {"title": "FluxGuidance"}, "inputs": {"guidance": 3.5, "conditioning": ["175", 1]}, "class_type": "FluxGuidance"}, "344": {"_meta": {"title": "Empty Latent Image"}, "inputs": {"width": 544, "height": 960, "batch_size": num_outputs}, "class_type": "EmptyLatentImage"}, "363": {"_meta": {"title": "Load Diffusion Model"}, "inputs": {"unet_name": "flux1-dev-fp8-e4m3fn.safetensors", "weight_dtype": "fp8_e4m3fn"}, "class_type": "UNETLoader"}, "365": {"_meta": {"title": "Load LoRA"}, "inputs": {"clip": ["310", 0], "model": ["363", 0], "lora_name": character_lora, "strength_clip": 0.99, "strength_model": 0.84}, "class_type": "LoraLoader"}, "369": {"_meta": {"title": "Load Image"}, "inputs": {"image": "Pose_Female_Front_full_standing_02.webp_00001_.png", "upload": "image"}, "class_type": "LoadImage"}}
 
44
  print(run)
45
  return(run)
46
 
47
+ def on_queue_update(update):
48
+ if isinstance(update, fal_client.InProgress):
49
+ for log in update.logs:
50
+ print(log["message"])
51
+
52
+ def create_image(character_lora, character_keyword, outfit_desc, pose_id, num_outputs):
53
+ seed = random.randint(0, 1000000)
54
+ print(f"seed: {seed}")
55
+ prompt = "Fashion model {character_keyword} wearing {outfit_desc}. posing in front of white background"
56
+
57
+ result = fal_client.subscribe(
58
+ "fal-ai/flux-general",
59
+ arguments={
60
+ "seed": seed,
61
+ "loras": [{
62
+ "path": character_lora,
63
+ "scale": "1"
64
+ }],
65
+ "prompt": prompt,
66
+ "image_size": "portrait_9_16",
67
+ "num_images": num_outputs,
68
+ "controlnets": [],
69
+ "guidance_scale": 3.5,
70
+ "controlnet_unions": [{
71
+ "path": "InstantX/FLUX.1-dev-Controlnet-Union",
72
+ "variant": None,
73
+ "controls": [{
74
+ "control_mode": "pose",
75
+ "control_image_url": "blob:https://fal.ai/2fdf0ff3-73e0-445c-b521-51b3dcf7f2ff",
76
+ "end_percentage": 0.8
77
+ }]
78
+ }],
79
+ "num_inference_steps": 28,
80
+ "enable_safety_checker": True,
81
+ "control_loras": [],
82
+ "use_beta_schedule": True
83
+ },
84
+ with_logs=True,
85
+ on_queue_update=on_queue_update,
86
+ )
87
+ print(result)
88
+
89
+
90
+
91
+
92
+
93
  if __name__ == "__main__":
94
  character_lora = "yOA2a06KWEMR-ewq3j8io_pytorch_lora_weights.safetensors"
95
  character_keyword = "crxter_scandi"