fffiloni commited on
Commit
9344e65
·
verified ·
1 Parent(s): 657e8ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -25
app.py CHANGED
@@ -26,15 +26,22 @@ clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
26
  import numpy as np
27
  import random
28
  import torch
29
- from diffusers import FluxPipeline
 
 
30
 
31
- pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
32
- pipe.enable_model_cpu_offload()
 
 
 
 
 
33
 
34
  MAX_SEED = np.iinfo(np.int32).max
35
  MAX_IMAGE_SIZE = 2048
36
 
37
- #pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
38
 
39
  @spaces.GPU(duration=75)
40
  def infer_flux(prompt, seed=42, randomize_seed=True, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
@@ -42,18 +49,18 @@ def infer_flux(prompt, seed=42, randomize_seed=True, width=1024, height=1024, gu
42
  seed = random.randint(0, MAX_SEED)
43
  generator = torch.Generator().manual_seed(seed)
44
 
45
- image = pipe(
46
- prompt,
47
- height=1024,
48
- width=1024,
49
- guidance_scale=3.5,
50
- num_inference_steps=25,
51
- max_sequence_length=512,
52
- generator=torch.Generator("cpu").manual_seed(0)
53
- ).images[0]
54
- image.save("flux-dev.png")
55
- return "flux-dev.png"
56
-
57
  @spaces.GPU
58
  def llama_gen_fragrance(scene):
59
 
@@ -281,14 +288,6 @@ def infer(image_input):
281
  image_desc = extract_field(parsed, "Image Description")
282
  yield result, parsed, image_desc
283
 
284
- def generate_flacon(image_desc, progress=gr.Progress(track_tqdm=True)):
285
-
286
- print(image_desc)
287
- gr.Info("Generating a nice marketing image with FLUX...")
288
- gen_bottle = infer_flux(image_desc)
289
-
290
- return gen_bottle
291
-
292
  css="""
293
  #col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
294
  """
@@ -314,6 +313,6 @@ with gr.Blocks(css=css) as demo:
314
  bottle_res = gr.Image(label="Flacon")
315
 
316
  submit_btn.click(fn=infer, inputs=[image_in], outputs=[fragrance, json_res, flacon_desc])
317
- get_flacon_btn.click(fn=generate_flacon, inputs=[flacon_desc], outputs=[bottle_res])
318
 
319
  demo.queue(max_size=12).launch(ssr_mode=False, mcp_server=True)
 
26
  import numpy as np
27
  import random
28
  import torch
29
+ from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
30
+ from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
31
+ from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
32
 
33
+ dtype = torch.bfloat16
34
+ device = "cuda" if torch.cuda.is_available() else "cpu"
35
+
36
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
37
+ good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
38
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
39
+ torch.cuda.empty_cache()
40
 
41
  MAX_SEED = np.iinfo(np.int32).max
42
  MAX_IMAGE_SIZE = 2048
43
 
44
+ pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
45
 
46
  @spaces.GPU(duration=75)
47
  def infer_flux(prompt, seed=42, randomize_seed=True, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
 
49
  seed = random.randint(0, MAX_SEED)
50
  generator = torch.Generator().manual_seed(seed)
51
 
52
+ for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
53
+ prompt=prompt,
54
+ guidance_scale=guidance_scale,
55
+ num_inference_steps=num_inference_steps,
56
+ width=width,
57
+ height=height,
58
+ generator=generator,
59
+ output_type="pil",
60
+ good_vae=good_vae,
61
+ ):
62
+ yield img
63
+
64
  @spaces.GPU
65
  def llama_gen_fragrance(scene):
66
 
 
288
  image_desc = extract_field(parsed, "Image Description")
289
  yield result, parsed, image_desc
290
 
 
 
 
 
 
 
 
 
291
  css="""
292
  #col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
293
  """
 
313
  bottle_res = gr.Image(label="Flacon")
314
 
315
  submit_btn.click(fn=infer, inputs=[image_in], outputs=[fragrance, json_res, flacon_desc])
316
+ get_flacon_btn.click(fn=infer_flux, inputs=[flacon_desc], outputs=[bottle_res])
317
 
318
  demo.queue(max_size=12).launch(ssr_mode=False, mcp_server=True)