AlekseyCalvin commited on
Commit
11e846d
·
verified ·
1 Parent(s): a293504

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +0 -61
pipeline.py CHANGED
@@ -68,67 +68,6 @@ def prepare_timesteps(
68
 
69
  # FLUX pipeline function
70
  class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin):
71
- def __init__(
72
- self,
73
- scheduler: FlowMatchEulerDiscreteScheduler,
74
- vae: AutoencoderKL,
75
- text_encoder: CLIPTextModel,
76
- tokenizer: CLIPTokenizer,
77
- text_encoder_2: T5EncoderModel,
78
- tokenizer_2: T5TokenizerFast,
79
- transformer: FluxTransformer2DModel,
80
- ):
81
- super().__init__()
82
-
83
- self.register_modules(
84
- vae=vae,
85
- text_encoder=text_encoder,
86
- text_encoder_2=text_encoder_2,
87
- tokenizer=tokenizer,
88
- tokenizer_2=tokenizer_2,
89
- transformer=transformer,
90
- scheduler=scheduler,
91
- )
92
- self.vae_scale_factor = (
93
- 2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16
94
- )
95
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
96
- self.tokenizer_max_length = (
97
- self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
98
- )
99
- self.default_sample_size = 64
100
- r"""
101
- The Flux pipeline for text-to-image generation.
102
-
103
- Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
104
-
105
- Args:
106
- transformer ([`FluxTransformer2DModel`]):
107
- Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
108
- scheduler ([`FlowMatchEulerDiscreteScheduler`]):
109
- A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
110
- vae ([`AutoencoderKL`]):
111
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
112
- text_encoder ([`CLIPTextModel`]):
113
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
114
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
115
- text_encoder_2 ([`T5EncoderModel`]):
116
- [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
117
- the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
118
- tokenizer (`CLIPTokenizer`):
119
- Tokenizer of class
120
- [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
121
- tokenizer_2 (`T5TokenizerFast`):
122
- Second Tokenizer of class
123
- [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
124
- """
125
-
126
- model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
127
- _optional_components = []
128
- _callback_tensor_inputs = ["latents", "prompt_embeds"] model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
129
- _optional_components = []
130
- _callback_tensor_inputs = ["latents", "prompt_embeds"]
131
-
132
  def __init__(
133
  self,
134
  scheduler: FlowMatchEulerDiscreteScheduler,
 
68
 
69
  # FLUX pipeline function
70
  class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  def __init__(
72
  self,
73
  scheduler: FlowMatchEulerDiscreteScheduler,