Fix inference script variable
Browse files
README.md
CHANGED
|
@@ -37,7 +37,7 @@ To use the model, just call the custom pipeline using [diffusers](https://github
|
|
| 37 |
|
| 38 |
```py
|
| 39 |
from diffusers import DiffusionPipeline
|
| 40 |
-
|
| 41 |
"jimmycarter/LibreFLUX",
|
| 42 |
custom_pipeline="jimmycarter/LibreFLUX",
|
| 43 |
use_safetensors=True,
|
|
@@ -46,7 +46,7 @@ pipeline = DiffusionPipeline.from_pretrained(
|
|
| 46 |
# High VRAM
|
| 47 |
prompt = "Photograph of a chalk board on which is written: 'I thought what I'd do was, I'd pretend I was one of those deaf-mutes.'"
|
| 48 |
negative_prompt = "blurry"
|
| 49 |
-
images =
|
| 50 |
prompt=prompt,
|
| 51 |
negative_prompt=negative_prompt,
|
| 52 |
)
|
|
@@ -56,6 +56,7 @@ images[0].save('chalkboard.png')
|
|
| 56 |
# ! pip install optimum-quanto
|
| 57 |
# Then
|
| 58 |
from optimum.quanto import freeze, quantize, qint8
|
|
|
|
| 59 |
quantize(
|
| 60 |
pipe.transformer,
|
| 61 |
weights=qint8,
|
|
@@ -66,7 +67,7 @@ quantize(
|
|
| 66 |
)
|
| 67 |
freeze(pipe.transformer)
|
| 68 |
pipe.enable_model_cpu_offload()
|
| 69 |
-
images =
|
| 70 |
prompt=prompt,
|
| 71 |
negative_prompt=negative_prompt,
|
| 72 |
device=None,
|
|
|
|
| 37 |
|
| 38 |
```py
|
| 39 |
from diffusers import DiffusionPipeline
|
| 40 |
+
pipe = DiffusionPipeline.from_pretrained(
|
| 41 |
"jimmycarter/LibreFLUX",
|
| 42 |
custom_pipeline="jimmycarter/LibreFLUX",
|
| 43 |
use_safetensors=True,
|
|
|
|
| 46 |
# High VRAM
|
| 47 |
prompt = "Photograph of a chalk board on which is written: 'I thought what I'd do was, I'd pretend I was one of those deaf-mutes.'"
|
| 48 |
negative_prompt = "blurry"
|
| 49 |
+
images = pipe(
|
| 50 |
prompt=prompt,
|
| 51 |
negative_prompt=negative_prompt,
|
| 52 |
)
|
|
|
|
| 56 |
# ! pip install optimum-quanto
|
| 57 |
# Then
|
| 58 |
from optimum.quanto import freeze, quantize, qint8
|
| 59 |
+
# quantize and freeze will take a short amount of time, so be patient.
|
| 60 |
quantize(
|
| 61 |
pipe.transformer,
|
| 62 |
weights=qint8,
|
|
|
|
| 67 |
)
|
| 68 |
freeze(pipe.transformer)
|
| 69 |
pipe.enable_model_cpu_offload()
|
| 70 |
+
images = pipe(
|
| 71 |
prompt=prompt,
|
| 72 |
negative_prompt=negative_prompt,
|
| 73 |
device=None,
|