AlekseyCalvin commited on
Commit
fc923ab
·
verified ·
1 Parent(s): 58dbf90

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +1 -27
pipeline.py CHANGED
@@ -592,6 +592,7 @@ class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile
592
  negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
593
  callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
594
  max_sequence_length=max_sequence_length,
 
595
  )
596
 
597
  self._guidance_scale = guidance_scale
@@ -615,33 +616,6 @@ class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile
615
  self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
616
  )
617
 
618
- # 3. Encode prompt
619
- lora_scale = (
620
- self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
621
- )
622
- (
623
- prompt_embeds,
624
- negative_prompt_embeds,
625
- pooled_prompt_embeds,
626
- negative_pooled_prompt_embeds,
627
- text_ids,
628
- ) = self.encode_prompt(
629
- prompt=prompt,
630
- prompt_2=prompt_2,
631
- negative_prompt=negative_prompt,
632
- negative_prompt_2=negative_prompt_2,
633
- do_classifier_free_guidance=self.do_classifier_free_guidance,
634
- prompt_embeds=prompt_embeds,
635
- negative_prompt_embeds=negative_prompt_embeds,
636
- pooled_prompt_embeds=pooled_prompt_embeds,
637
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
638
- device=device,
639
- clip_skip=self.clip_skip,
640
- num_images_per_prompt=num_images_per_prompt,
641
- max_sequence_length=max_sequence_length,
642
- lora_scale=lora_scale,
643
- )
644
-
645
  if self.do_classifier_free_guidance:
646
  prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
647
  pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
 
592
  negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
593
  callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
594
  max_sequence_length=max_sequence_length,
595
+ lora_scale=lora_scale
596
  )
597
 
598
  self._guidance_scale = guidance_scale
 
616
  self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
617
  )
618
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619
  if self.do_classifier_free_guidance:
620
  prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
621
  pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)