fffiloni commited on
Commit
3523de3
·
verified ·
1 Parent(s): fe118aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -23
app.py CHANGED
@@ -26,17 +26,10 @@ clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
26
  import numpy as np
27
  import random
28
  import torch
29
- from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
30
- from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
31
- from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
32
 
33
- dtype = torch.bfloat16
34
- device = "cuda" if torch.cuda.is_available() else "cpu"
35
-
36
- taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
37
- good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
38
- pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
39
- torch.cuda.empty_cache()
40
 
41
  MAX_SEED = np.iinfo(np.int32).max
42
  MAX_IMAGE_SIZE = 2048
@@ -44,23 +37,21 @@ MAX_IMAGE_SIZE = 2048
44
  #pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
45
 
46
  @spaces.GPU
47
- def infer_flux(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
48
  if randomize_seed:
49
  seed = random.randint(0, MAX_SEED)
50
  generator = torch.Generator().manual_seed(seed)
51
 
52
- img = pipe(
53
- prompt=prompt,
54
- guidance_scale=guidance_scale,
55
- num_inference_steps=num_inference_steps,
56
- width=width,
57
- height=height,
58
- generator=generator,
59
- output_type="pil",
60
- good_vae=good_vae,
61
- ).images[0]
62
-
63
- img.save("flux-dev.png")
64
  return "flux-dev.png"
65
 
66
  @spaces.GPU
 
26
  import numpy as np
27
  import random
28
  import torch
29
+ from diffusers import FluxPipeline
 
 
30
 
31
+ pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
32
+ pipe.enable_model_cpu_offload()
 
 
 
 
 
33
 
34
  MAX_SEED = np.iinfo(np.int32).max
35
  MAX_IMAGE_SIZE = 2048
 
37
  #pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
38
 
39
  @spaces.GPU
40
+ def infer_flux(prompt, seed=42, randomize_seed=True, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
41
  if randomize_seed:
42
  seed = random.randint(0, MAX_SEED)
43
  generator = torch.Generator().manual_seed(seed)
44
 
45
+ image = pipe(
46
+ prompt,
47
+ height=1024,
48
+ width=1024,
49
+ guidance_scale=3.5,
50
+ num_inference_steps=50,
51
+ max_sequence_length=512,
52
+ generator=torch.Generator("cpu").manual_seed(0)
53
+ ).images[0]
54
+ image.save("flux-dev.png")
 
 
55
  return "flux-dev.png"
56
 
57
  @spaces.GPU