Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -89,11 +89,13 @@ def generate(segment, image, prompt, size, guidance_scale, num_inference_steps,
|
|
| 89 |
# config=offload_config,
|
| 90 |
#)
|
| 91 |
pipe.text_encoder.to("cuda")
|
|
|
|
| 92 |
with torch.no_grad():
|
| 93 |
prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_attention_mask, pooled_prompt_embeds, negative_pooled_prompt_embeds = pipe.encode_prompt(
|
| 94 |
prompt=prompt, do_classifier_free_guidance=True, negative_prompt=negative_prompt, device=device
|
| 95 |
)
|
| 96 |
pipe.text_encoder.to("cpu")
|
|
|
|
| 97 |
transformer_dtype = pipe.transformer.dtype
|
| 98 |
prompt_embeds = prompt_embeds.to(transformer_dtype)
|
| 99 |
prompt_attention_mask = prompt_attention_mask.to(transformer_dtype)
|
|
|
|
| 89 |
# config=offload_config,
|
| 90 |
#)
|
| 91 |
pipe.text_encoder.to("cuda")
|
| 92 |
+
pipe.text_encoder_2.to("cuda")
|
| 93 |
with torch.no_grad():
|
| 94 |
prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_attention_mask, pooled_prompt_embeds, negative_pooled_prompt_embeds = pipe.encode_prompt(
|
| 95 |
prompt=prompt, do_classifier_free_guidance=True, negative_prompt=negative_prompt, device=device
|
| 96 |
)
|
| 97 |
pipe.text_encoder.to("cpu")
|
| 98 |
+
pipe.text_encoder_2.to("cpu")
|
| 99 |
transformer_dtype = pipe.transformer.dtype
|
| 100 |
prompt_embeds = prompt_embeds.to(transformer_dtype)
|
| 101 |
prompt_attention_mask = prompt_attention_mask.to(transformer_dtype)
|