Spaces:
Running
on
Zero
Running
on
Zero
gaparmar
commited on
Commit
·
2bba48b
1
Parent(s):
bb4cdae
vae bf16
Browse files
app.py
CHANGED
@@ -19,7 +19,8 @@ import argparse
|
|
19 |
precision = get_precision()
|
20 |
transformer = NunchakuFluxTransformer2dModel.from_pretrained(f"nunchaku-tech/nunchaku-flux.1-schnell/svdq-{precision}_r32-flux.1-schnell.safetensors")
|
21 |
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", transformer=transformer, torch_dtype=torch.bfloat16).to("cuda")
|
22 |
-
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1").to("cuda")
|
|
|
23 |
|
24 |
m_clip = CLIPModel.from_pretrained("multimodalart/clip-vit-base-patch32").to("cuda")
|
25 |
prep_clip = CLIPProcessor.from_pretrained("multimodalart/clip-vit-base-patch32")
|
|
|
19 |
precision = get_precision()
|
20 |
transformer = NunchakuFluxTransformer2dModel.from_pretrained(f"nunchaku-tech/nunchaku-flux.1-schnell/svdq-{precision}_r32-flux.1-schnell.safetensors")
|
21 |
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", transformer=transformer, torch_dtype=torch.bfloat16).to("cuda")
|
22 |
+
# pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1").to("cuda")
|
23 |
+
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to("cuda")
|
24 |
|
25 |
m_clip = CLIPModel.from_pretrained("multimodalart/clip-vit-base-patch32").to("cuda")
|
26 |
prep_clip = CLIPProcessor.from_pretrained("multimodalart/clip-vit-base-patch32")
|