Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
6db905d
1
Parent(s):
92de8aa
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,6 +3,7 @@ import torch
|
|
| 3 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
|
|
|
| 6 |
import lora
|
| 7 |
from time import sleep
|
| 8 |
import copy
|
|
@@ -19,6 +20,8 @@ with open("sdxl_loras.json", "r") as file:
|
|
| 19 |
"trigger_word": item["trigger_word"],
|
| 20 |
"weights": item["weights"],
|
| 21 |
"is_compatible": item["is_compatible"],
|
|
|
|
|
|
|
| 22 |
}
|
| 23 |
for item in data
|
| 24 |
]
|
|
@@ -72,9 +75,10 @@ def update_selection(selected_state: gr.SelectData):
|
|
| 72 |
image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5, cross_attention_kwargs={{"scale": lora_scale}}).images[0]
|
| 73 |
image.save("image.png")
|
| 74 |
'''
|
| 75 |
-
|
| 76 |
use_with_diffusers += "This LoRA is not compatible with diffusers natively yet. But you can still use it on diffusers with `bmaltais/kohya_ss` LoRA class, check out this [Google Colab](https://colab.research.google.com/drive/14aEJsKdEQ9_kyfsiV6JDok799kxPul0j )"
|
| 77 |
-
|
|
|
|
| 78 |
use_with_uis = f'''
|
| 79 |
## Use it with Comfy UI, Invoke AI, SD.Next, AUTO1111:
|
| 80 |
|
|
@@ -143,13 +147,26 @@ def run_lora(prompt, negative, lora_scale, selected_state):
|
|
| 143 |
else:
|
| 144 |
pipe.unload_lora_weights()
|
| 145 |
is_compatible = sdxl_loras[selected_state.index]["is_compatible"]
|
| 146 |
-
|
| 147 |
if is_compatible:
|
| 148 |
pipe.load_lora_weights(full_path_lora)
|
| 149 |
cross_attention_kwargs = {"scale": lora_scale}
|
| 150 |
else:
|
| 151 |
-
|
| 152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
image = pipe(
|
| 155 |
prompt=prompt,
|
|
|
|
| 3 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
| 6 |
+
from cog_sdxl_dataset_and_utils import TokenEmbeddingsHandler
|
| 7 |
import lora
|
| 8 |
from time import sleep
|
| 9 |
import copy
|
|
|
|
| 20 |
"trigger_word": item["trigger_word"],
|
| 21 |
"weights": item["weights"],
|
| 22 |
"is_compatible": item["is_compatible"],
|
| 23 |
+
"is_pivotal": item.get("is_pivotal", False),
|
| 24 |
+
"text_embedding_weights": item.get("text_embedding_weights", None)
|
| 25 |
}
|
| 26 |
for item in data
|
| 27 |
]
|
|
|
|
| 75 |
image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5, cross_attention_kwargs={{"scale": lora_scale}}).images[0]
|
| 76 |
image.save("image.png")
|
| 77 |
'''
|
| 78 |
+
elif not is_pivotal:
|
| 79 |
use_with_diffusers += "This LoRA is not compatible with diffusers natively yet. But you can still use it on diffusers with `bmaltais/kohya_ss` LoRA class, check out this [Google Colab](https://colab.research.google.com/drive/14aEJsKdEQ9_kyfsiV6JDok799kxPul0j )"
|
| 80 |
+
else:
|
| 81 |
+
use_with_diffusers += f"This LoRA is not compatible with diffusers natively yet. But you can still use it on diffusers with sdxl-cog `TokenEmbeddingsHandler` class, check out the [model repo](https://huggingface.co/{lora_repo})"
|
| 82 |
use_with_uis = f'''
|
| 83 |
## Use it with Comfy UI, Invoke AI, SD.Next, AUTO1111:
|
| 84 |
|
|
|
|
| 147 |
else:
|
| 148 |
pipe.unload_lora_weights()
|
| 149 |
is_compatible = sdxl_loras[selected_state.index]["is_compatible"]
|
|
|
|
| 150 |
if is_compatible:
|
| 151 |
pipe.load_lora_weights(full_path_lora)
|
| 152 |
cross_attention_kwargs = {"scale": lora_scale}
|
| 153 |
else:
|
| 154 |
+
is_pivotal = sdxl_loras[selected_state.index]["is_pivotal"]
|
| 155 |
+
if(is_pivotal):
|
| 156 |
+
|
| 157 |
+
pipe.load_lora_weights(full_path_lora)
|
| 158 |
+
cross_attention_kwargs = {"scale": lora_scale}
|
| 159 |
+
|
| 160 |
+
#Add the textual inversion embeddings from pivotal tuning models
|
| 161 |
+
text_embedding_name = sdxl_loras[selected_state.index]["text_embedding_weights"]
|
| 162 |
+
text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
|
| 163 |
+
tokenizers = [pipe.tokenizer, pipe.tokenizer_2]
|
| 164 |
+
embedding_path = hf_hub_download(repo_id=repo_name, filename=text_embedding_name, repo_type="model")
|
| 165 |
+
embhandler = TokenEmbeddingsHandler(text_encoders, tokenizers)
|
| 166 |
+
embhandler.load_embeddings(embedding_path)
|
| 167 |
+
else:
|
| 168 |
+
merge_incompatible_lora(full_path_lora, lora_scale)
|
| 169 |
+
last_merged = True
|
| 170 |
|
| 171 |
image = pipe(
|
| 172 |
prompt=prompt,
|