Commit
·
8d7ec14
1
Parent(s):
452bc23
auto
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .eslintignore +5 -0
- .eslintrc.js +96 -0
- .git-blame-ignore-revs +2 -0
- .pylintrc +3 -0
- CITATION.cff +7 -0
- CODEOWNERS +1 -0
- MyRP.ipynb +160 -0
- __pycache__/launch.cpython-310.pyc +0 -0
- __pycache__/webui.cpython-310.pyc +0 -0
- apatch.sh +58 -0
- commit.bat +30 -0
- config.json +326 -0
- configs/alt-diffusion-inference.yaml +72 -0
- configs/alt-diffusion-m18-inference.yaml +73 -0
- configs/instruct-pix2pix.yaml +98 -0
- configs/sd_xl_inpaint.yaml +98 -0
- configs/v1-inference.yaml +70 -0
- configs/v1-inpainting-inference.yaml +70 -0
- downs.sh +105 -0
- embeddings/Place Textual Inversion embeddings here.txt +0 -0
- environment-wsl2.yaml +11 -0
- extensions-builtin/LDSR/__pycache__/ldsr_model_arch.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/preload.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/sd_hijack_autoencoder.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/sd_hijack_ddpm_v1.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/vqvae_quantize.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/ldsr_model_arch.py +250 -0
- extensions-builtin/LDSR/preload.py +6 -0
- extensions-builtin/LDSR/scripts/__pycache__/ldsr_model.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/scripts/ldsr_model.py +70 -0
- extensions-builtin/LDSR/sd_hijack_autoencoder.py +293 -0
- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +1443 -0
- extensions-builtin/LDSR/vqvae_quantize.py +147 -0
- extensions-builtin/Lora/__pycache__/extra_networks_lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/lora_patches.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/networks.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/preload.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/ui_edit_user_metadata.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/ui_extra_networks_lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/extra_networks_lora.py +67 -0
- extensions-builtin/Lora/lora.py +9 -0
- extensions-builtin/Lora/lora_logger.py +33 -0
- extensions-builtin/Lora/lora_patches.py +6 -0
- extensions-builtin/Lora/network.py +190 -0
- extensions-builtin/Lora/networks.py +219 -0
- extensions-builtin/Lora/preload.py +8 -0
- extensions-builtin/Lora/scripts/__pycache__/lora_script.cpython-310.pyc +0 -0
- extensions-builtin/Lora/scripts/lora_script.py +100 -0
.eslintignore
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
extensions
|
2 |
+
extensions-disabled
|
3 |
+
extensions-builtin/sd_forge_controlnet
|
4 |
+
repositories
|
5 |
+
venv
|
.eslintrc.js
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* global module */
|
2 |
+
module.exports = {
|
3 |
+
env: {
|
4 |
+
browser: true,
|
5 |
+
es2021: true,
|
6 |
+
},
|
7 |
+
extends: "eslint:recommended",
|
8 |
+
parserOptions: {
|
9 |
+
ecmaVersion: "latest",
|
10 |
+
},
|
11 |
+
rules: {
|
12 |
+
"arrow-spacing": "error",
|
13 |
+
"block-spacing": "error",
|
14 |
+
"brace-style": "error",
|
15 |
+
"comma-dangle": ["error", "only-multiline"],
|
16 |
+
"comma-spacing": "error",
|
17 |
+
"comma-style": ["error", "last"],
|
18 |
+
"curly": ["error", "multi-line", "consistent"],
|
19 |
+
"eol-last": "error",
|
20 |
+
"func-call-spacing": "error",
|
21 |
+
"function-call-argument-newline": ["error", "consistent"],
|
22 |
+
"function-paren-newline": ["error", "consistent"],
|
23 |
+
"indent": ["error", 4],
|
24 |
+
"key-spacing": "error",
|
25 |
+
"keyword-spacing": "error",
|
26 |
+
"linebreak-style": ["error", "unix"],
|
27 |
+
"no-extra-semi": "error",
|
28 |
+
"no-mixed-spaces-and-tabs": "error",
|
29 |
+
"no-multi-spaces": "error",
|
30 |
+
"no-redeclare": ["error", {builtinGlobals: false}],
|
31 |
+
"no-trailing-spaces": "error",
|
32 |
+
"no-unused-vars": "off",
|
33 |
+
"no-whitespace-before-property": "error",
|
34 |
+
"object-curly-newline": ["error", {consistent: true, multiline: true}],
|
35 |
+
"object-curly-spacing": ["error", "never"],
|
36 |
+
"operator-linebreak": ["error", "after"],
|
37 |
+
"quote-props": ["error", "consistent-as-needed"],
|
38 |
+
"semi": ["error", "always"],
|
39 |
+
"semi-spacing": "error",
|
40 |
+
"semi-style": ["error", "last"],
|
41 |
+
"space-before-blocks": "error",
|
42 |
+
"space-before-function-paren": ["error", "never"],
|
43 |
+
"space-in-parens": ["error", "never"],
|
44 |
+
"space-infix-ops": "error",
|
45 |
+
"space-unary-ops": "error",
|
46 |
+
"switch-colon-spacing": "error",
|
47 |
+
"template-curly-spacing": ["error", "never"],
|
48 |
+
"unicode-bom": "error",
|
49 |
+
},
|
50 |
+
globals: {
|
51 |
+
//script.js
|
52 |
+
gradioApp: "readonly",
|
53 |
+
executeCallbacks: "readonly",
|
54 |
+
onAfterUiUpdate: "readonly",
|
55 |
+
onOptionsChanged: "readonly",
|
56 |
+
onUiLoaded: "readonly",
|
57 |
+
onUiUpdate: "readonly",
|
58 |
+
uiCurrentTab: "writable",
|
59 |
+
uiElementInSight: "readonly",
|
60 |
+
uiElementIsVisible: "readonly",
|
61 |
+
//ui.js
|
62 |
+
opts: "writable",
|
63 |
+
all_gallery_buttons: "readonly",
|
64 |
+
selected_gallery_button: "readonly",
|
65 |
+
selected_gallery_index: "readonly",
|
66 |
+
switch_to_txt2img: "readonly",
|
67 |
+
switch_to_img2img_tab: "readonly",
|
68 |
+
switch_to_img2img: "readonly",
|
69 |
+
switch_to_sketch: "readonly",
|
70 |
+
switch_to_inpaint: "readonly",
|
71 |
+
switch_to_inpaint_sketch: "readonly",
|
72 |
+
switch_to_extras: "readonly",
|
73 |
+
get_tab_index: "readonly",
|
74 |
+
create_submit_args: "readonly",
|
75 |
+
restart_reload: "readonly",
|
76 |
+
updateInput: "readonly",
|
77 |
+
onEdit: "readonly",
|
78 |
+
//extraNetworks.js
|
79 |
+
requestGet: "readonly",
|
80 |
+
popup: "readonly",
|
81 |
+
// from python
|
82 |
+
localization: "readonly",
|
83 |
+
// progrssbar.js
|
84 |
+
randomId: "readonly",
|
85 |
+
requestProgress: "readonly",
|
86 |
+
// imageviewer.js
|
87 |
+
modalPrevImage: "readonly",
|
88 |
+
modalNextImage: "readonly",
|
89 |
+
// localStorage.js
|
90 |
+
localSet: "readonly",
|
91 |
+
localGet: "readonly",
|
92 |
+
localRemove: "readonly",
|
93 |
+
// resizeHandle.js
|
94 |
+
setupResizeHandle: "writable"
|
95 |
+
}
|
96 |
+
};
|
.git-blame-ignore-revs
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# Apply ESlint
|
2 |
+
9c54b78d9dde5601e916f308d9a9d6953ec39430
|
.pylintrc
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# See https://pylint.pycqa.org/en/latest/user_guide/messages/message_control.html
|
2 |
+
[MESSAGES CONTROL]
|
3 |
+
disable=C,R,W,E,I
|
CITATION.cff
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cff-version: 1.2.0
|
2 |
+
message: "If you use this software, please cite it as below."
|
3 |
+
authors:
|
4 |
+
- given-names: AUTOMATIC1111
|
5 |
+
title: "Stable Diffusion Web UI"
|
6 |
+
date-released: 2022-08-22
|
7 |
+
url: "https://github.com/AUTOMATIC1111/stable-diffusion-webui"
|
CODEOWNERS
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
* @lllyasviel
|
MyRP.ipynb
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"id": "d6bdd156-e441-4f7b-bd7e-d397fc335428",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [],
|
9 |
+
"source": [
|
10 |
+
"!kill -9 $(ps -ef | grep 'relauncher.py' | grep -v 'grep' | awk '{print $2}')\n",
|
11 |
+
"!kill -9 $(ps -ef | grep 'webui' | grep -v 'grep' | awk '{print $2}')\n",
|
12 |
+
"!kill -9 $(ps -ef | grep 'launch.py' | grep -v 'grep' | awk '{print $2}')\n",
|
13 |
+
"!kill -9 $(ps -ef | grep 'run.sh' | grep -v 'grep' | awk '{print $2}')\n",
|
14 |
+
"!kill -9 $(ps -ef | grep 'run.py' | grep -v 'grep' | awk '{print $2}')"
|
15 |
+
]
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"cell_type": "code",
|
19 |
+
"execution_count": null,
|
20 |
+
"id": "f34d6131-8170-4173-ac5c-f835c9fa6ea5",
|
21 |
+
"metadata": {
|
22 |
+
"scrolled": true
|
23 |
+
},
|
24 |
+
"outputs": [],
|
25 |
+
"source": [
|
26 |
+
"%cd /workspace\n",
|
27 |
+
"!sh /workspace/run.sh\n",
|
28 |
+
"#final_args = f\"python /workspace/SD/launch.py --port 7860 --api --theme dark --allow-code --enable-insecure-extension-access --listen --share --cuda-malloc --cuda-stream --pin-shared-memory\"\n",
|
29 |
+
"#!{final_args}"
|
30 |
+
]
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"cell_type": "code",
|
34 |
+
"execution_count": null,
|
35 |
+
"id": "1778d96c-dca4-4d2a-bbe6-3ee6b29fc504",
|
36 |
+
"metadata": {},
|
37 |
+
"outputs": [],
|
38 |
+
"source": []
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"cell_type": "code",
|
42 |
+
"execution_count": null,
|
43 |
+
"id": "a6dc2425-7df8-4d99-80b3-312bac52019f",
|
44 |
+
"metadata": {},
|
45 |
+
"outputs": [],
|
46 |
+
"source": [
|
47 |
+
"!git -C /workspace/SD/models fetch --all\n",
|
48 |
+
"!git -C /workspace/SD/models reset --hard origin/main\n",
|
49 |
+
"!git -C /workspace/SD/models pull\n",
|
50 |
+
"!git -C /workspace/SD/models repack -a -d --depth=250 --window=250\n",
|
51 |
+
"!rsync -r \"/workspace/SD/models/embeddings/\" \"/workspace/SD/embeddings/\"\n",
|
52 |
+
"!rm -r \"/workspace/SD/models/.git/lfs\""
|
53 |
+
]
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"cell_type": "code",
|
57 |
+
"execution_count": null,
|
58 |
+
"id": "6d5e74dc-c597-4182-927b-fb506987d7ca",
|
59 |
+
"metadata": {},
|
60 |
+
"outputs": [],
|
61 |
+
"source": [
|
62 |
+
"!sh /workspace/out.sh 0"
|
63 |
+
]
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"cell_type": "code",
|
67 |
+
"execution_count": null,
|
68 |
+
"id": "abb78189-95bc-4d04-b897-b75ffa5ec539",
|
69 |
+
"metadata": {},
|
70 |
+
"outputs": [],
|
71 |
+
"source": [
|
72 |
+
"import os\n",
|
73 |
+
"if os.path.exists(\"/workspace/SD/output\"):\n",
|
74 |
+
" !runpodctl send /workspace/SD/output\n",
|
75 |
+
"else:\n",
|
76 |
+
" if os.path.exists(\"/workspace/output\"):\n",
|
77 |
+
" !runpodctl send /workspace/output"
|
78 |
+
]
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"cell_type": "code",
|
82 |
+
"execution_count": null,
|
83 |
+
"id": "9ebbbb83-c8ef-47d2-b038-92145196385e",
|
84 |
+
"metadata": {},
|
85 |
+
"outputs": [],
|
86 |
+
"source": []
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"cell_type": "code",
|
90 |
+
"execution_count": null,
|
91 |
+
"id": "645dbd2b-0f6f-4791-8b30-435649d27ed2",
|
92 |
+
"metadata": {},
|
93 |
+
"outputs": [],
|
94 |
+
"source": [
|
95 |
+
"import torch\n",
|
96 |
+
"import torchvision\n",
|
97 |
+
"print(torch.__version__)\n",
|
98 |
+
"print(torchvision.__version__)"
|
99 |
+
]
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"cell_type": "code",
|
103 |
+
"execution_count": null,
|
104 |
+
"id": "87d3aadd-7a32-4132-a1ec-0d53b75518be",
|
105 |
+
"metadata": {},
|
106 |
+
"outputs": [],
|
107 |
+
"source": []
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"cell_type": "code",
|
111 |
+
"execution_count": null,
|
112 |
+
"id": "c7f291ff-c792-454e-9a20-211759f9e208",
|
113 |
+
"metadata": {},
|
114 |
+
"outputs": [],
|
115 |
+
"source": []
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"cell_type": "code",
|
119 |
+
"execution_count": null,
|
120 |
+
"id": "94503b7f-e460-4eda-8679-058497ae771a",
|
121 |
+
"metadata": {
|
122 |
+
"scrolled": true
|
123 |
+
},
|
124 |
+
"outputs": [],
|
125 |
+
"source": [
|
126 |
+
"checkpointDownloadIndex = 4\n",
|
127 |
+
"\n",
|
128 |
+
"%cd /workspace\n",
|
129 |
+
"!git clone https://github.com/zuv0/s\n",
|
130 |
+
"!sh ./s/rp.sh f\"{checkpointDownloadIndex} 0 0 0\""
|
131 |
+
]
|
132 |
+
}
|
133 |
+
],
|
134 |
+
"metadata": {
|
135 |
+
"kernelspec": {
|
136 |
+
"display_name": "Python 3 (ipykernel)",
|
137 |
+
"language": "python",
|
138 |
+
"name": "python3"
|
139 |
+
},
|
140 |
+
"language_info": {
|
141 |
+
"codemirror_mode": {
|
142 |
+
"name": "ipython",
|
143 |
+
"version": 3
|
144 |
+
},
|
145 |
+
"file_extension": ".py",
|
146 |
+
"mimetype": "text/x-python",
|
147 |
+
"name": "python",
|
148 |
+
"nbconvert_exporter": "python",
|
149 |
+
"pygments_lexer": "ipython3",
|
150 |
+
"version": "3.10.12"
|
151 |
+
},
|
152 |
+
"vscode": {
|
153 |
+
"interpreter": {
|
154 |
+
"hash": "c36c8ff63afb68809d72fa6323bde02fb9b90fe01b492d36c13befd021790766"
|
155 |
+
}
|
156 |
+
}
|
157 |
+
},
|
158 |
+
"nbformat": 4,
|
159 |
+
"nbformat_minor": 5
|
160 |
+
}
|
__pycache__/launch.cpython-310.pyc
ADDED
Binary file (1.15 kB). View file
|
|
__pycache__/webui.cpython-310.pyc
ADDED
Binary file (4.32 kB). View file
|
|
apatch.sh
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
if [ ! -d "/workspace/firstContainer/SD" ]; then
|
4 |
+
exit 101
|
5 |
+
fi
|
6 |
+
|
7 |
+
apt-get install zip -y
|
8 |
+
pip install pcloud
|
9 |
+
pip uninstall -y setuptools
|
10 |
+
pip install setuptools==69.5.1
|
11 |
+
|
12 |
+
cd /workspace/firstContainer
|
13 |
+
|
14 |
+
rm -rf out.sh
|
15 |
+
rm -rf pOut.py
|
16 |
+
rm -rf myRun.ipynb
|
17 |
+
curl -L -O https://huggingface.co/zuv0/SDXLF17/resolve/main/out_rp.sh
|
18 |
+
\mv out_rp.sh out.sh
|
19 |
+
curl -L -O https://huggingface.co/zuv0/SDXLF17/resolve/main/myRun.ipynb
|
20 |
+
|
21 |
+
cd SD
|
22 |
+
git gc
|
23 |
+
|
24 |
+
if [ ! -d "models" ]; then
|
25 |
+
git clone https://huggingface.co/zuv0/modelsXL models
|
26 |
+
cd models
|
27 |
+
\mv -f /workspace/firstContainer/SD/repositories/BLIP/_git /workspace/firstContainer/SD/repositories/BLIP/.git
|
28 |
+
\mv -f /workspace/firstContainer/SD/repositories/generative-models/_git /workspace/firstContainer/SD/repositories/generative-models/.git
|
29 |
+
\mv -f /workspace/firstContainer/SD/repositories/k-diffusion/_git /workspace/firstContainer/SD/repositories/k-diffusion/.git
|
30 |
+
\mv -f /workspace/firstContainer/SD/repositories/stable-diffusion-stability-ai/_git /workspace/firstContainer/SD/repositories/stable-diffusion-stability-ai/.git
|
31 |
+
\mv -f /workspace/firstContainer/SD/repositories/stable-diffusion-webui-assets/_git /workspace/firstContainer/SD/repositories/stable-diffusion-webui-assets/.git
|
32 |
+
cd /workspace/*/SD/extensions-builtin/forge_legacy_preprocessors/annotator/oneformer/oneformer/data
|
33 |
+
curl -L -O https://github.com/Mikubill/sd-webui-controlnet/blob/main/annotator/oneformer/oneformer/data/bpe_simple_vocab_16e6.txt.gz
|
34 |
+
else
|
35 |
+
cd models
|
36 |
+
fi
|
37 |
+
|
38 |
+
cd Lora
|
39 |
+
rm -rf RC*
|
40 |
+
rm -rf RA*
|
41 |
+
|
42 |
+
if [ -d "XLRN" ]; then
|
43 |
+
cd XLRN
|
44 |
+
rm -rf *
|
45 |
+
fi
|
46 |
+
|
47 |
+
cd /workspace/firstContainer
|
48 |
+
|
49 |
+
sh ./SD/downs.sh $1 0
|
50 |
+
sh forcePull.sh
|
51 |
+
sh gc.sh
|
52 |
+
|
53 |
+
pip cache purge
|
54 |
+
sudo apt clean
|
55 |
+
|
56 |
+
rm -rf apatch.sh
|
57 |
+
|
58 |
+
exit 0
|
commit.bat
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
git config user.name "anonymous"
|
4 |
+
git config user.email [email protected]
|
5 |
+
|
6 |
+
set /P comment=Comment?
|
7 |
+
|
8 |
+
if not "%comment%" == "" goto :next
|
9 |
+
set comment=auto
|
10 |
+
:next
|
11 |
+
|
12 |
+
git reset
|
13 |
+
|
14 |
+
::git add *.* --force
|
15 |
+
::git add *
|
16 |
+
git add --all
|
17 |
+
|
18 |
+
echo ----- git add done ------
|
19 |
+
|
20 |
+
git status
|
21 |
+
|
22 |
+
echo ready. press any key to commit START.
|
23 |
+
::timeout /t 1
|
24 |
+
pause
|
25 |
+
|
26 |
+
git commit -m "%comment%"
|
27 |
+
|
28 |
+
git push -u origin main
|
29 |
+
|
30 |
+
pause
|
config.json
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"ldsr_steps": 100,
|
3 |
+
"ldsr_cached": false,
|
4 |
+
"SCUNET_tile": 256,
|
5 |
+
"SCUNET_tile_overlap": 8,
|
6 |
+
"SWIN_tile": 192,
|
7 |
+
"SWIN_tile_overlap": 8,
|
8 |
+
"SWIN_torch_compile": false,
|
9 |
+
"control_net_detectedmap_dir": "detected_maps",
|
10 |
+
"control_net_models_path": "",
|
11 |
+
"control_net_modules_path": "",
|
12 |
+
"control_net_unit_count": 3,
|
13 |
+
"control_net_model_cache_size": 5,
|
14 |
+
"control_net_no_detectmap": false,
|
15 |
+
"control_net_detectmap_autosaving": false,
|
16 |
+
"control_net_allow_script_control": false,
|
17 |
+
"control_net_sync_field_args": true,
|
18 |
+
"controlnet_show_batch_images_in_ui": false,
|
19 |
+
"controlnet_increment_seed_during_batch": false,
|
20 |
+
"controlnet_disable_openpose_edit": false,
|
21 |
+
"controlnet_disable_photopea_edit": false,
|
22 |
+
"controlnet_photopea_warning": true,
|
23 |
+
"controlnet_input_thumbnail": true,
|
24 |
+
"sd_checkpoint_hash": "821aa5537f8ddafdbf963827551865c31c5bbfab1abe7925cb5f006c8f71e485",
|
25 |
+
"sd_model_checkpoint": "autismmixSDXL_autismmixPony.safetensors [821aa5537f]",
|
26 |
+
"use_downcasted_alpha_bar": true,
|
27 |
+
"CLIP_stop_at_last_layers": 2,
|
28 |
+
"k_sched_type": "Automatic",
|
29 |
+
"sd_vae": "sdxlVAE_sdxlVAE.safetensors",
|
30 |
+
"uni_pc_order": 3,
|
31 |
+
"outdir_samples": "",
|
32 |
+
"outdir_txt2img_samples": "output/txt2img-images",
|
33 |
+
"outdir_img2img_samples": "output/img2img-images",
|
34 |
+
"outdir_extras_samples": "output/extras-images",
|
35 |
+
"outdir_grids": "",
|
36 |
+
"outdir_txt2img_grids": "output/txt2img-grids",
|
37 |
+
"outdir_img2img_grids": "output/img2img-grids",
|
38 |
+
"outdir_save": "log/images",
|
39 |
+
"outdir_init_images": "output/init-images",
|
40 |
+
"samples_save": true,
|
41 |
+
"samples_format": "webp",
|
42 |
+
"samples_filename_pattern": "[seed]-",
|
43 |
+
"save_images_add_number": true,
|
44 |
+
"save_images_replace_action": "Replace",
|
45 |
+
"grid_save": false,
|
46 |
+
"grid_format": "png",
|
47 |
+
"grid_extended_filename": true,
|
48 |
+
"grid_only_if_multiple": true,
|
49 |
+
"grid_prevent_empty_spots": false,
|
50 |
+
"grid_zip_filename_pattern": "",
|
51 |
+
"n_rows": -1,
|
52 |
+
"font": "",
|
53 |
+
"grid_text_active_color": "#000000",
|
54 |
+
"grid_text_inactive_color": "#999999",
|
55 |
+
"grid_background_color": "#ffffff",
|
56 |
+
"save_images_before_face_restoration": false,
|
57 |
+
"save_images_before_highres_fix": false,
|
58 |
+
"save_images_before_color_correction": false,
|
59 |
+
"save_mask": false,
|
60 |
+
"save_mask_composite": false,
|
61 |
+
"jpeg_quality": 80,
|
62 |
+
"webp_lossless": false,
|
63 |
+
"export_for_4chan": true,
|
64 |
+
"img_downscale_threshold": 4.0,
|
65 |
+
"target_side_length": 4000.0,
|
66 |
+
"img_max_size_mp": 200.0,
|
67 |
+
"use_original_name_batch": true,
|
68 |
+
"use_upscaler_name_as_suffix": false,
|
69 |
+
"save_selected_only": true,
|
70 |
+
"save_init_img": false,
|
71 |
+
"temp_dir": "",
|
72 |
+
"clean_temp_dir_at_start": false,
|
73 |
+
"save_incomplete_images": false,
|
74 |
+
"notification_audio": true,
|
75 |
+
"notification_volume": 100,
|
76 |
+
"save_to_dirs": false,
|
77 |
+
"grid_save_to_dirs": false,
|
78 |
+
"use_save_to_dirs_for_ui": false,
|
79 |
+
"directories_filename_pattern": "[date]",
|
80 |
+
"directories_max_prompt_words": 8,
|
81 |
+
"auto_backcompat": true,
|
82 |
+
"use_old_emphasis_implementation": false,
|
83 |
+
"use_old_karras_scheduler_sigmas": false,
|
84 |
+
"no_dpmpp_sde_batch_determinism": false,
|
85 |
+
"use_old_hires_fix_width_height": false,
|
86 |
+
"dont_fix_second_order_samplers_schedule": false,
|
87 |
+
"hires_fix_use_firstpass_conds": false,
|
88 |
+
"use_old_scheduling": false,
|
89 |
+
"lora_functional": false,
|
90 |
+
"extra_networks_show_hidden_directories": true,
|
91 |
+
"extra_networks_dir_button_function": false,
|
92 |
+
"extra_networks_hidden_models": "When searched",
|
93 |
+
"extra_networks_default_multiplier": 1,
|
94 |
+
"extra_networks_card_width": 0.0,
|
95 |
+
"extra_networks_card_height": 0.0,
|
96 |
+
"extra_networks_card_text_scale": 1,
|
97 |
+
"extra_networks_card_show_desc": true,
|
98 |
+
"extra_networks_card_description_is_html": false,
|
99 |
+
"extra_networks_card_order_field": "Path",
|
100 |
+
"extra_networks_card_order": "Ascending",
|
101 |
+
"extra_networks_tree_view_default_enabled": false,
|
102 |
+
"extra_networks_add_text_separator": " ",
|
103 |
+
"ui_extra_networks_tab_reorder": "",
|
104 |
+
"textual_inversion_print_at_load": false,
|
105 |
+
"textual_inversion_add_hashes_to_infotext": true,
|
106 |
+
"sd_hypernetwork": "None",
|
107 |
+
"sd_lora": "None",
|
108 |
+
"lora_preferred_name": "Alias from file",
|
109 |
+
"lora_add_hashes_to_infotext": true,
|
110 |
+
"lora_show_all": false,
|
111 |
+
"lora_hide_unknown_for_versions": [],
|
112 |
+
"lora_in_memory_limit": 0,
|
113 |
+
"lora_not_found_warning_console": false,
|
114 |
+
"lora_not_found_gradio_warning": false,
|
115 |
+
"cross_attention_optimization": "Automatic",
|
116 |
+
"s_min_uncond": 0,
|
117 |
+
"token_merging_ratio": 0,
|
118 |
+
"token_merging_ratio_img2img": 0,
|
119 |
+
"token_merging_ratio_hr": 0,
|
120 |
+
"pad_cond_uncond": false,
|
121 |
+
"pad_cond_uncond_v0": false,
|
122 |
+
"persistent_cond_cache": true,
|
123 |
+
"batch_cond_uncond": true,
|
124 |
+
"fp8_storage": "Enable for SDXL",
|
125 |
+
"cache_fp16_weight": false,
|
126 |
+
"hide_samplers": [],
|
127 |
+
"eta_ddim": 0,
|
128 |
+
"eta_ancestral": 1,
|
129 |
+
"ddim_discretize": "uniform",
|
130 |
+
"s_churn": 0,
|
131 |
+
"s_tmin": 0,
|
132 |
+
"s_tmax": 0,
|
133 |
+
"s_noise": 1,
|
134 |
+
"sigma_min": 0.0,
|
135 |
+
"sigma_max": 0.0,
|
136 |
+
"rho": 0.0,
|
137 |
+
"eta_noise_seed_delta": 0,
|
138 |
+
"always_discard_next_to_last_sigma": false,
|
139 |
+
"sgm_noise_multiplier": false,
|
140 |
+
"uni_pc_variant": "bh1",
|
141 |
+
"uni_pc_skip_type": "time_uniform",
|
142 |
+
"uni_pc_lower_order_final": true,
|
143 |
+
"sd_noise_schedule": "Default",
|
144 |
+
"sd_checkpoints_limit": 1,
|
145 |
+
"sd_checkpoints_keep_in_cpu": true,
|
146 |
+
"sd_checkpoint_cache": 0,
|
147 |
+
"sd_unet": "Automatic",
|
148 |
+
"enable_quantization": false,
|
149 |
+
"emphasis": "Original",
|
150 |
+
"enable_batch_seeds": true,
|
151 |
+
"comma_padding_backtrack": 20,
|
152 |
+
"upcast_attn": false,
|
153 |
+
"randn_source": "CPU",
|
154 |
+
"tiling": false,
|
155 |
+
"hires_fix_refiner_pass": "second pass",
|
156 |
+
"enable_prompt_comments": true,
|
157 |
+
"sdxl_crop_top": 0.0,
|
158 |
+
"sdxl_crop_left": 0.0,
|
159 |
+
"sdxl_refiner_low_aesthetic_score": 2.5,
|
160 |
+
"sdxl_refiner_high_aesthetic_score": 6.0,
|
161 |
+
"sd_vae_checkpoint_cache": 0,
|
162 |
+
"sd_vae_overrides_per_model_preferences": true,
|
163 |
+
"auto_vae_precision_bfloat16": false,
|
164 |
+
"auto_vae_precision": true,
|
165 |
+
"sd_vae_encode_method": "Full",
|
166 |
+
"sd_vae_decode_method": "Full",
|
167 |
+
"inpainting_mask_weight": 1,
|
168 |
+
"initial_noise_multiplier": 1,
|
169 |
+
"img2img_extra_noise": 0,
|
170 |
+
"img2img_color_correction": true,
|
171 |
+
"img2img_fix_steps": false,
|
172 |
+
"img2img_background_color": "#ffffff",
|
173 |
+
"img2img_editor_height": 720,
|
174 |
+
"img2img_sketch_default_brush_color": "#ffffff",
|
175 |
+
"img2img_inpaint_mask_brush_color": "#ffffff",
|
176 |
+
"img2img_inpaint_sketch_default_brush_color": "#ffffff",
|
177 |
+
"return_mask": false,
|
178 |
+
"return_mask_composite": false,
|
179 |
+
"img2img_batch_show_results_limit": 32,
|
180 |
+
"overlay_inpaint": true,
|
181 |
+
"return_grid": true,
|
182 |
+
"do_not_show_images": false,
|
183 |
+
"js_modal_lightbox": true,
|
184 |
+
"js_modal_lightbox_initially_zoomed": true,
|
185 |
+
"js_modal_lightbox_gamepad": false,
|
186 |
+
"js_modal_lightbox_gamepad_repeat": 250.0,
|
187 |
+
"sd_webui_modal_lightbox_icon_opacity": 1,
|
188 |
+
"sd_webui_modal_lightbox_toolbar_opacity": 0.9,
|
189 |
+
"gallery_height": "",
|
190 |
+
"open_dir_button_choice": "Subdirectory",
|
191 |
+
"enable_pnginfo": true,
|
192 |
+
"save_txt": false,
|
193 |
+
"add_model_name_to_info": true,
|
194 |
+
"add_model_hash_to_info": true,
|
195 |
+
"add_vae_name_to_info": true,
|
196 |
+
"add_vae_hash_to_info": true,
|
197 |
+
"add_user_name_to_info": false,
|
198 |
+
"add_version_to_infotext": true,
|
199 |
+
"disable_weights_auto_swap": true,
|
200 |
+
"infotext_skip_pasting": [],
|
201 |
+
"infotext_styles": "Apply if any",
|
202 |
+
"show_progressbar": true,
|
203 |
+
"live_previews_enable": true,
|
204 |
+
"live_previews_image_format": "png",
|
205 |
+
"show_progress_grid": true,
|
206 |
+
"show_progress_every_n_steps": 10,
|
207 |
+
"show_progress_type": "Approx NN",
|
208 |
+
"live_preview_allow_lowvram_full": false,
|
209 |
+
"live_preview_content": "Prompt",
|
210 |
+
"live_preview_refresh_period": 1000.0,
|
211 |
+
"live_preview_fast_interrupt": false,
|
212 |
+
"js_live_preview_in_modal_lightbox": false,
|
213 |
+
"keyedit_precision_attention": 0.1,
|
214 |
+
"keyedit_precision_extra": 0.05,
|
215 |
+
"keyedit_delimiters": ".,\\/!?%^*;:{}=`~() ",
|
216 |
+
"keyedit_delimiters_whitespace": [
|
217 |
+
"Tab",
|
218 |
+
"Carriage Return",
|
219 |
+
"Line Feed"
|
220 |
+
],
|
221 |
+
"keyedit_move": true,
|
222 |
+
"disable_token_counters": false,
|
223 |
+
"include_styles_into_token_counters": true,
|
224 |
+
"extra_options_txt2img": [],
|
225 |
+
"extra_options_img2img": [],
|
226 |
+
"extra_options_cols": 1,
|
227 |
+
"extra_options_accordion": false,
|
228 |
+
"compact_prompt_box": false,
|
229 |
+
"samplers_in_dropdown": true,
|
230 |
+
"dimensions_and_batch_together": true,
|
231 |
+
"sd_checkpoint_dropdown_use_short": false,
|
232 |
+
"hires_fix_show_sampler": false,
|
233 |
+
"hires_fix_show_prompts": false,
|
234 |
+
"txt2img_settings_accordion": false,
|
235 |
+
"img2img_settings_accordion": false,
|
236 |
+
"interrupt_after_current": true,
|
237 |
+
"localization": "None",
|
238 |
+
"quicksettings_list": [
|
239 |
+
"sd_model_checkpoint",
|
240 |
+
"sd_vae",
|
241 |
+
"CLIP_stop_at_last_layers"
|
242 |
+
],
|
243 |
+
"ui_tab_order": [],
|
244 |
+
"hidden_tabs": [],
|
245 |
+
"ui_reorder_list": [],
|
246 |
+
"gradio_theme": "Default",
|
247 |
+
"gradio_themes_cache": true,
|
248 |
+
"show_progress_in_title": true,
|
249 |
+
"send_seed": true,
|
250 |
+
"send_size": true,
|
251 |
+
"api_enable_requests": true,
|
252 |
+
"api_forbid_local_requests": true,
|
253 |
+
"api_useragent": "",
|
254 |
+
"auto_launch_browser": "Local",
|
255 |
+
"enable_console_prompts": false,
|
256 |
+
"show_warnings": false,
|
257 |
+
"show_gradio_deprecation_warnings": true,
|
258 |
+
"memmon_poll_rate": 8,
|
259 |
+
"samples_log_stdout": false,
|
260 |
+
"multiple_tqdm": true,
|
261 |
+
"enable_upscale_progressbar": true,
|
262 |
+
"print_hypernet_extra": false,
|
263 |
+
"list_hidden_files": true,
|
264 |
+
"disable_mmap_load_safetensors": false,
|
265 |
+
"hide_ldm_prints": true,
|
266 |
+
"dump_stacks_on_signal": false,
|
267 |
+
"face_restoration": false,
|
268 |
+
"face_restoration_model": "CodeFormer",
|
269 |
+
"code_former_weight": 0.5,
|
270 |
+
"face_restoration_unload": false,
|
271 |
+
"postprocessing_enable_in_main_ui": [],
|
272 |
+
"postprocessing_operation_order": [],
|
273 |
+
"upscaling_max_images_in_cache": 5,
|
274 |
+
"postprocessing_existing_caption_action": "Ignore",
|
275 |
+
"ESRGAN_tile": 192,
|
276 |
+
"ESRGAN_tile_overlap": 8,
|
277 |
+
"realesrgan_enabled_models": [
|
278 |
+
"R-ESRGAN 4x+",
|
279 |
+
"R-ESRGAN 4x+ Anime6B"
|
280 |
+
],
|
281 |
+
"dat_enabled_models": [
|
282 |
+
"DAT x2",
|
283 |
+
"DAT x3",
|
284 |
+
"DAT x4"
|
285 |
+
],
|
286 |
+
"DAT_tile": 192,
|
287 |
+
"DAT_tile_overlap": 8,
|
288 |
+
"unload_models_when_training": false,
|
289 |
+
"pin_memory": false,
|
290 |
+
"save_optimizer_state": false,
|
291 |
+
"save_training_settings_to_txt": true,
|
292 |
+
"dataset_filename_word_regex": "",
|
293 |
+
"dataset_filename_join_string": " ",
|
294 |
+
"training_image_repeats_per_epoch": 1,
|
295 |
+
"training_write_csv_every": 500.0,
|
296 |
+
"training_xattention_optimizations": false,
|
297 |
+
"training_enable_tensorboard": false,
|
298 |
+
"training_tensorboard_save_images": false,
|
299 |
+
"training_tensorboard_flush_every": 120.0,
|
300 |
+
"canvas_hotkey_zoom": "Alt",
|
301 |
+
"canvas_hotkey_adjust": "Ctrl",
|
302 |
+
"canvas_hotkey_shrink_brush": "Q",
|
303 |
+
"canvas_hotkey_grow_brush": "W",
|
304 |
+
"canvas_hotkey_move": "F",
|
305 |
+
"canvas_hotkey_fullscreen": "S",
|
306 |
+
"canvas_hotkey_reset": "R",
|
307 |
+
"canvas_hotkey_overlap": "O",
|
308 |
+
"canvas_show_tooltip": true,
|
309 |
+
"canvas_auto_expand": true,
|
310 |
+
"canvas_blur_prompt": false,
|
311 |
+
"canvas_disabled_functions": [
|
312 |
+
"Overlap"
|
313 |
+
],
|
314 |
+
"interrogate_keep_models_in_memory": false,
|
315 |
+
"interrogate_return_ranks": false,
|
316 |
+
"interrogate_clip_num_beams": 1,
|
317 |
+
"interrogate_clip_min_length": 24,
|
318 |
+
"interrogate_clip_max_length": 48,
|
319 |
+
"interrogate_clip_dict_limit": 1500.0,
|
320 |
+
"interrogate_clip_skip_categories": [],
|
321 |
+
"interrogate_deepbooru_score_threshold": 0.5,
|
322 |
+
"deepbooru_sort_alpha": true,
|
323 |
+
"deepbooru_use_spaces": true,
|
324 |
+
"deepbooru_escape": true,
|
325 |
+
"deepbooru_filter_tags": ""
|
326 |
+
}
|
configs/alt-diffusion-inference.yaml
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
use_ema: False
|
19 |
+
|
20 |
+
scheduler_config: # 10000 warmup steps
|
21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
params:
|
23 |
+
warm_up_steps: [ 10000 ]
|
24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
f_start: [ 1.e-6 ]
|
26 |
+
f_max: [ 1. ]
|
27 |
+
f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 32 # unused
|
33 |
+
in_channels: 4
|
34 |
+
out_channels: 4
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: modules.xlmr.BertSeriesModelWithTransformation
|
71 |
+
params:
|
72 |
+
name: "XLMR-Large"
|
configs/alt-diffusion-m18-inference.yaml
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
use_ema: False
|
19 |
+
|
20 |
+
scheduler_config: # 10000 warmup steps
|
21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
params:
|
23 |
+
warm_up_steps: [ 10000 ]
|
24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
f_start: [ 1.e-6 ]
|
26 |
+
f_max: [ 1. ]
|
27 |
+
f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 32 # unused
|
33 |
+
in_channels: 4
|
34 |
+
out_channels: 4
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_head_channels: 64
|
40 |
+
use_spatial_transformer: True
|
41 |
+
use_linear_in_transformer: True
|
42 |
+
transformer_depth: 1
|
43 |
+
context_dim: 1024
|
44 |
+
use_checkpoint: True
|
45 |
+
legacy: False
|
46 |
+
|
47 |
+
first_stage_config:
|
48 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
49 |
+
params:
|
50 |
+
embed_dim: 4
|
51 |
+
monitor: val/rec_loss
|
52 |
+
ddconfig:
|
53 |
+
double_z: true
|
54 |
+
z_channels: 4
|
55 |
+
resolution: 256
|
56 |
+
in_channels: 3
|
57 |
+
out_ch: 3
|
58 |
+
ch: 128
|
59 |
+
ch_mult:
|
60 |
+
- 1
|
61 |
+
- 2
|
62 |
+
- 4
|
63 |
+
- 4
|
64 |
+
num_res_blocks: 2
|
65 |
+
attn_resolutions: []
|
66 |
+
dropout: 0.0
|
67 |
+
lossconfig:
|
68 |
+
target: torch.nn.Identity
|
69 |
+
|
70 |
+
cond_stage_config:
|
71 |
+
target: modules.xlmr_m18.BertSeriesModelWithTransformation
|
72 |
+
params:
|
73 |
+
name: "XLMR-Large"
|
configs/instruct-pix2pix.yaml
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
|
2 |
+
# See more details in LICENSE.
|
3 |
+
|
4 |
+
model:
|
5 |
+
base_learning_rate: 1.0e-04
|
6 |
+
target: modules.models.diffusion.ddpm_edit.LatentDiffusion
|
7 |
+
params:
|
8 |
+
linear_start: 0.00085
|
9 |
+
linear_end: 0.0120
|
10 |
+
num_timesteps_cond: 1
|
11 |
+
log_every_t: 200
|
12 |
+
timesteps: 1000
|
13 |
+
first_stage_key: edited
|
14 |
+
cond_stage_key: edit
|
15 |
+
# image_size: 64
|
16 |
+
# image_size: 32
|
17 |
+
image_size: 16
|
18 |
+
channels: 4
|
19 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
20 |
+
conditioning_key: hybrid
|
21 |
+
monitor: val/loss_simple_ema
|
22 |
+
scale_factor: 0.18215
|
23 |
+
use_ema: false
|
24 |
+
|
25 |
+
scheduler_config: # 10000 warmup steps
|
26 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
27 |
+
params:
|
28 |
+
warm_up_steps: [ 0 ]
|
29 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
30 |
+
f_start: [ 1.e-6 ]
|
31 |
+
f_max: [ 1. ]
|
32 |
+
f_min: [ 1. ]
|
33 |
+
|
34 |
+
unet_config:
|
35 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
36 |
+
params:
|
37 |
+
image_size: 32 # unused
|
38 |
+
in_channels: 8
|
39 |
+
out_channels: 4
|
40 |
+
model_channels: 320
|
41 |
+
attention_resolutions: [ 4, 2, 1 ]
|
42 |
+
num_res_blocks: 2
|
43 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
44 |
+
num_heads: 8
|
45 |
+
use_spatial_transformer: True
|
46 |
+
transformer_depth: 1
|
47 |
+
context_dim: 768
|
48 |
+
use_checkpoint: True
|
49 |
+
legacy: False
|
50 |
+
|
51 |
+
first_stage_config:
|
52 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
53 |
+
params:
|
54 |
+
embed_dim: 4
|
55 |
+
monitor: val/rec_loss
|
56 |
+
ddconfig:
|
57 |
+
double_z: true
|
58 |
+
z_channels: 4
|
59 |
+
resolution: 256
|
60 |
+
in_channels: 3
|
61 |
+
out_ch: 3
|
62 |
+
ch: 128
|
63 |
+
ch_mult:
|
64 |
+
- 1
|
65 |
+
- 2
|
66 |
+
- 4
|
67 |
+
- 4
|
68 |
+
num_res_blocks: 2
|
69 |
+
attn_resolutions: []
|
70 |
+
dropout: 0.0
|
71 |
+
lossconfig:
|
72 |
+
target: torch.nn.Identity
|
73 |
+
|
74 |
+
cond_stage_config:
|
75 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
76 |
+
|
77 |
+
data:
|
78 |
+
target: main.DataModuleFromConfig
|
79 |
+
params:
|
80 |
+
batch_size: 128
|
81 |
+
num_workers: 1
|
82 |
+
wrap: false
|
83 |
+
validation:
|
84 |
+
target: edit_dataset.EditDataset
|
85 |
+
params:
|
86 |
+
path: data/clip-filtered-dataset
|
87 |
+
cache_dir: data/
|
88 |
+
cache_name: data_10k
|
89 |
+
split: val
|
90 |
+
min_text_sim: 0.2
|
91 |
+
min_image_sim: 0.75
|
92 |
+
min_direction_sim: 0.2
|
93 |
+
max_samples_per_prompt: 1
|
94 |
+
min_resize_res: 512
|
95 |
+
max_resize_res: 512
|
96 |
+
crop_res: 512
|
97 |
+
output_as_edit: False
|
98 |
+
real_input: True
|
configs/sd_xl_inpaint.yaml
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
target: sgm.models.diffusion.DiffusionEngine
|
3 |
+
params:
|
4 |
+
scale_factor: 0.13025
|
5 |
+
disable_first_stage_autocast: True
|
6 |
+
|
7 |
+
denoiser_config:
|
8 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
9 |
+
params:
|
10 |
+
num_idx: 1000
|
11 |
+
|
12 |
+
weighting_config:
|
13 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
14 |
+
scaling_config:
|
15 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
16 |
+
discretization_config:
|
17 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
18 |
+
|
19 |
+
network_config:
|
20 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
21 |
+
params:
|
22 |
+
adm_in_channels: 2816
|
23 |
+
num_classes: sequential
|
24 |
+
use_checkpoint: True
|
25 |
+
in_channels: 9
|
26 |
+
out_channels: 4
|
27 |
+
model_channels: 320
|
28 |
+
attention_resolutions: [4, 2]
|
29 |
+
num_res_blocks: 2
|
30 |
+
channel_mult: [1, 2, 4]
|
31 |
+
num_head_channels: 64
|
32 |
+
use_spatial_transformer: True
|
33 |
+
use_linear_in_transformer: True
|
34 |
+
transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
|
35 |
+
context_dim: 2048
|
36 |
+
spatial_transformer_attn_type: softmax-xformers
|
37 |
+
legacy: False
|
38 |
+
|
39 |
+
conditioner_config:
|
40 |
+
target: sgm.modules.GeneralConditioner
|
41 |
+
params:
|
42 |
+
emb_models:
|
43 |
+
# crossattn cond
|
44 |
+
- is_trainable: False
|
45 |
+
input_key: txt
|
46 |
+
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
|
47 |
+
params:
|
48 |
+
layer: hidden
|
49 |
+
layer_idx: 11
|
50 |
+
# crossattn and vector cond
|
51 |
+
- is_trainable: False
|
52 |
+
input_key: txt
|
53 |
+
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
|
54 |
+
params:
|
55 |
+
arch: ViT-bigG-14
|
56 |
+
version: laion2b_s39b_b160k
|
57 |
+
freeze: True
|
58 |
+
layer: penultimate
|
59 |
+
always_return_pooled: True
|
60 |
+
legacy: False
|
61 |
+
# vector cond
|
62 |
+
- is_trainable: False
|
63 |
+
input_key: original_size_as_tuple
|
64 |
+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
65 |
+
params:
|
66 |
+
outdim: 256 # multiplied by two
|
67 |
+
# vector cond
|
68 |
+
- is_trainable: False
|
69 |
+
input_key: crop_coords_top_left
|
70 |
+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
71 |
+
params:
|
72 |
+
outdim: 256 # multiplied by two
|
73 |
+
# vector cond
|
74 |
+
- is_trainable: False
|
75 |
+
input_key: target_size_as_tuple
|
76 |
+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
77 |
+
params:
|
78 |
+
outdim: 256 # multiplied by two
|
79 |
+
|
80 |
+
first_stage_config:
|
81 |
+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
82 |
+
params:
|
83 |
+
embed_dim: 4
|
84 |
+
monitor: val/rec_loss
|
85 |
+
ddconfig:
|
86 |
+
attn_type: vanilla-xformers
|
87 |
+
double_z: true
|
88 |
+
z_channels: 4
|
89 |
+
resolution: 256
|
90 |
+
in_channels: 3
|
91 |
+
out_ch: 3
|
92 |
+
ch: 128
|
93 |
+
ch_mult: [1, 2, 4, 4]
|
94 |
+
num_res_blocks: 2
|
95 |
+
attn_resolutions: []
|
96 |
+
dropout: 0.0
|
97 |
+
lossconfig:
|
98 |
+
target: torch.nn.Identity
|
configs/v1-inference.yaml
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
use_ema: False
|
19 |
+
|
20 |
+
scheduler_config: # 10000 warmup steps
|
21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
params:
|
23 |
+
warm_up_steps: [ 10000 ]
|
24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
f_start: [ 1.e-6 ]
|
26 |
+
f_max: [ 1. ]
|
27 |
+
f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 32 # unused
|
33 |
+
in_channels: 4
|
34 |
+
out_channels: 4
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
configs/v1-inpainting-inference.yaml
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 7.5e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: hybrid # important
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
finetune_keys: null
|
19 |
+
|
20 |
+
scheduler_config: # 10000 warmup steps
|
21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
params:
|
23 |
+
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
f_start: [ 1.e-6 ]
|
26 |
+
f_max: [ 1. ]
|
27 |
+
f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 32 # unused
|
33 |
+
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
34 |
+
out_channels: 4
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
downs.sh
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# 1 2 3 ..
|
4 |
+
CHECKPOINT=$1
|
5 |
+
|
6 |
+
# 1 2 3 4 5 .. OR 12 235 ..
|
7 |
+
RC=$2
|
8 |
+
|
9 |
+
GOORM="0"
|
10 |
+
if [ -d "/workspace/firstContainer" ]; then
|
11 |
+
GOORM="1"
|
12 |
+
fi
|
13 |
+
|
14 |
+
RELATIVE_DIR=`dirname "$0"`
|
15 |
+
cd $RELATIVE_DIR
|
16 |
+
|
17 |
+
if [ ! -d "SD" ]; then
|
18 |
+
cd SD
|
19 |
+
else
|
20 |
+
cd SD*
|
21 |
+
fi
|
22 |
+
|
23 |
+
cd models
|
24 |
+
cd Stable-diffusion
|
25 |
+
|
26 |
+
CK_LINK=""
|
27 |
+
|
28 |
+
case $CHECKPOINT in
|
29 |
+
0)
|
30 |
+
echo "CHECKPOINT pass."
|
31 |
+
;;
|
32 |
+
1)
|
33 |
+
CK_LINK="https://huggingface.co/Magamanny/Pony-Diffusion-V6-XL/resolve/main/ponyDiffusionV6XL_v6StartWithThisOne.safetensors"
|
34 |
+
;;
|
35 |
+
2)
|
36 |
+
CK_LINK="https://huggingface.co/JosefJilek/loliDiffusion/resolve/main/loliDiffusionV1.1.0_PDXL_Lx7-CLIP_VAE_FP16.safetensors"
|
37 |
+
;;
|
38 |
+
3)
|
39 |
+
CK_LINK="https://huggingface.co/zuv0/test/resolve/main/MINTSDXL_LollipopMIX_A1-fp32.safetensors"
|
40 |
+
;;
|
41 |
+
4)
|
42 |
+
CK_LINK="https://huggingface.co/Bulkbogan20/autismmix/resolve/main/autismmixSDXL_autismmixPony.safetensors"
|
43 |
+
;;
|
44 |
+
5)
|
45 |
+
CK_LINK="https://huggingface.co/zuv0/test/resolve/main/waiCUTE_v20.safetensors"
|
46 |
+
;;
|
47 |
+
6)
|
48 |
+
CK_LINK="https://huggingface.co/GianPehn/PD_for_Anime/resolve/main/pdForAnime_v20.safetensors"
|
49 |
+
;;
|
50 |
+
8)
|
51 |
+
CK_LINK="https://huggingface.co/Walkearth4/Collection/resolve/main/chenkinAnimeHotbaby_v20.safetensors"
|
52 |
+
;;
|
53 |
+
*)
|
54 |
+
echo "CHECKPOINT error!"
|
55 |
+
;;
|
56 |
+
esac
|
57 |
+
|
58 |
+
if [ -n "$CK_LINK" ]; then
|
59 |
+
CK_FILE=${CK_LINK##*/}
|
60 |
+
if [ -f "$CK_FILE" ]; then
|
61 |
+
\mv -f "$CK_FILE" "../$CK_FILE"
|
62 |
+
rm -rf *.safetensors
|
63 |
+
rm -rf *.ckpt
|
64 |
+
\mv -f "../$CK_FILE" "$CK_FILE"
|
65 |
+
else
|
66 |
+
rm -rf *.safetensors
|
67 |
+
rm -rf *.ckpt
|
68 |
+
if [ "$GOORM" == "1" ]; then
|
69 |
+
wget –limit-rate=49999k -N "$CK_LINK"
|
70 |
+
else
|
71 |
+
wget -N "$CK_LINK"
|
72 |
+
fi
|
73 |
+
fi
|
74 |
+
fi
|
75 |
+
|
76 |
+
cd ..
|
77 |
+
cd Lora
|
78 |
+
|
79 |
+
case $RC in
|
80 |
+
0)
|
81 |
+
echo "RCXL pass."
|
82 |
+
;;
|
83 |
+
*)
|
84 |
+
RCLIST="1 2 3 4 5 6 7 8 9"
|
85 |
+
|
86 |
+
for var in $RCLIST
|
87 |
+
do
|
88 |
+
rm -rf RC$var
|
89 |
+
done
|
90 |
+
|
91 |
+
while [ $RC -gt 0 ]; do
|
92 |
+
digit=$((RC % 10))
|
93 |
+
if [ ! -d "RCXL$digit" ]; then
|
94 |
+
git clone https://huggingface.co/zuv0/RCXL$digit
|
95 |
+
cd RCXL$digit
|
96 |
+
git repack -a -d --depth=250 --window=250
|
97 |
+
cd .git
|
98 |
+
rm -rf lfs
|
99 |
+
cd ..
|
100 |
+
cd ..
|
101 |
+
fi
|
102 |
+
RC=$((RC / 10))
|
103 |
+
done
|
104 |
+
;;
|
105 |
+
esac
|
embeddings/Place Textual Inversion embeddings here.txt
ADDED
File without changes
|
environment-wsl2.yaml
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: automatic
|
2 |
+
channels:
|
3 |
+
- pytorch
|
4 |
+
- defaults
|
5 |
+
dependencies:
|
6 |
+
- python=3.10
|
7 |
+
- pip=23.0
|
8 |
+
- cudatoolkit=11.8
|
9 |
+
- pytorch=2.0
|
10 |
+
- torchvision=0.15
|
11 |
+
- numpy=1.23
|
extensions-builtin/LDSR/__pycache__/ldsr_model_arch.cpython-310.pyc
ADDED
Binary file (6.69 kB). View file
|
|
extensions-builtin/LDSR/__pycache__/preload.cpython-310.pyc
ADDED
Binary file (488 Bytes). View file
|
|
extensions-builtin/LDSR/__pycache__/sd_hijack_autoencoder.cpython-310.pyc
ADDED
Binary file (8.92 kB). View file
|
|
extensions-builtin/LDSR/__pycache__/sd_hijack_ddpm_v1.cpython-310.pyc
ADDED
Binary file (42.4 kB). View file
|
|
extensions-builtin/LDSR/__pycache__/vqvae_quantize.cpython-310.pyc
ADDED
Binary file (3.64 kB). View file
|
|
extensions-builtin/LDSR/ldsr_model_arch.py
ADDED
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gc
|
3 |
+
import time
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
import torchvision
|
8 |
+
from PIL import Image
|
9 |
+
from einops import rearrange, repeat
|
10 |
+
from omegaconf import OmegaConf
|
11 |
+
import safetensors.torch
|
12 |
+
|
13 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
14 |
+
from ldm.util import instantiate_from_config, ismap
|
15 |
+
from modules import shared, sd_hijack, devices
|
16 |
+
|
17 |
+
cached_ldsr_model: torch.nn.Module = None
|
18 |
+
|
19 |
+
|
20 |
+
# Create LDSR Class
|
21 |
+
class LDSR:
|
22 |
+
def load_model_from_config(self, half_attention):
|
23 |
+
global cached_ldsr_model
|
24 |
+
|
25 |
+
if shared.opts.ldsr_cached and cached_ldsr_model is not None:
|
26 |
+
print("Loading model from cache")
|
27 |
+
model: torch.nn.Module = cached_ldsr_model
|
28 |
+
else:
|
29 |
+
print(f"Loading model from {self.modelPath}")
|
30 |
+
_, extension = os.path.splitext(self.modelPath)
|
31 |
+
if extension.lower() == ".safetensors":
|
32 |
+
pl_sd = safetensors.torch.load_file(self.modelPath, device="cpu")
|
33 |
+
else:
|
34 |
+
pl_sd = torch.load(self.modelPath, map_location="cpu")
|
35 |
+
sd = pl_sd["state_dict"] if "state_dict" in pl_sd else pl_sd
|
36 |
+
config = OmegaConf.load(self.yamlPath)
|
37 |
+
config.model.target = "ldm.models.diffusion.ddpm.LatentDiffusionV1"
|
38 |
+
model: torch.nn.Module = instantiate_from_config(config.model)
|
39 |
+
model.load_state_dict(sd, strict=False)
|
40 |
+
model = model.to(shared.device)
|
41 |
+
if half_attention:
|
42 |
+
model = model.half()
|
43 |
+
if shared.cmd_opts.opt_channelslast:
|
44 |
+
model = model.to(memory_format=torch.channels_last)
|
45 |
+
|
46 |
+
sd_hijack.model_hijack.hijack(model) # apply optimization
|
47 |
+
model.eval()
|
48 |
+
|
49 |
+
if shared.opts.ldsr_cached:
|
50 |
+
cached_ldsr_model = model
|
51 |
+
|
52 |
+
return {"model": model}
|
53 |
+
|
54 |
+
def __init__(self, model_path, yaml_path):
|
55 |
+
self.modelPath = model_path
|
56 |
+
self.yamlPath = yaml_path
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def run(model, selected_path, custom_steps, eta):
|
60 |
+
example = get_cond(selected_path)
|
61 |
+
|
62 |
+
n_runs = 1
|
63 |
+
guider = None
|
64 |
+
ckwargs = None
|
65 |
+
ddim_use_x0_pred = False
|
66 |
+
temperature = 1.
|
67 |
+
eta = eta
|
68 |
+
custom_shape = None
|
69 |
+
|
70 |
+
height, width = example["image"].shape[1:3]
|
71 |
+
split_input = height >= 128 and width >= 128
|
72 |
+
|
73 |
+
if split_input:
|
74 |
+
ks = 128
|
75 |
+
stride = 64
|
76 |
+
vqf = 4 #
|
77 |
+
model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
|
78 |
+
"vqf": vqf,
|
79 |
+
"patch_distributed_vq": True,
|
80 |
+
"tie_braker": False,
|
81 |
+
"clip_max_weight": 0.5,
|
82 |
+
"clip_min_weight": 0.01,
|
83 |
+
"clip_max_tie_weight": 0.5,
|
84 |
+
"clip_min_tie_weight": 0.01}
|
85 |
+
else:
|
86 |
+
if hasattr(model, "split_input_params"):
|
87 |
+
delattr(model, "split_input_params")
|
88 |
+
|
89 |
+
x_t = None
|
90 |
+
logs = None
|
91 |
+
for _ in range(n_runs):
|
92 |
+
if custom_shape is not None:
|
93 |
+
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
|
94 |
+
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
|
95 |
+
|
96 |
+
logs = make_convolutional_sample(example, model,
|
97 |
+
custom_steps=custom_steps,
|
98 |
+
eta=eta, quantize_x0=False,
|
99 |
+
custom_shape=custom_shape,
|
100 |
+
temperature=temperature, noise_dropout=0.,
|
101 |
+
corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
|
102 |
+
ddim_use_x0_pred=ddim_use_x0_pred
|
103 |
+
)
|
104 |
+
return logs
|
105 |
+
|
106 |
+
def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
|
107 |
+
model = self.load_model_from_config(half_attention)
|
108 |
+
|
109 |
+
# Run settings
|
110 |
+
diffusion_steps = int(steps)
|
111 |
+
eta = 1.0
|
112 |
+
|
113 |
+
|
114 |
+
gc.collect()
|
115 |
+
devices.torch_gc()
|
116 |
+
|
117 |
+
im_og = image
|
118 |
+
width_og, height_og = im_og.size
|
119 |
+
# If we can adjust the max upscale size, then the 4 below should be our variable
|
120 |
+
down_sample_rate = target_scale / 4
|
121 |
+
wd = width_og * down_sample_rate
|
122 |
+
hd = height_og * down_sample_rate
|
123 |
+
width_downsampled_pre = int(np.ceil(wd))
|
124 |
+
height_downsampled_pre = int(np.ceil(hd))
|
125 |
+
|
126 |
+
if down_sample_rate != 1:
|
127 |
+
print(
|
128 |
+
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
|
129 |
+
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
|
130 |
+
else:
|
131 |
+
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
|
132 |
+
|
133 |
+
# pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
|
134 |
+
pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
|
135 |
+
im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
|
136 |
+
|
137 |
+
logs = self.run(model["model"], im_padded, diffusion_steps, eta)
|
138 |
+
|
139 |
+
sample = logs["sample"]
|
140 |
+
sample = sample.detach().cpu()
|
141 |
+
sample = torch.clamp(sample, -1., 1.)
|
142 |
+
sample = (sample + 1.) / 2. * 255
|
143 |
+
sample = sample.numpy().astype(np.uint8)
|
144 |
+
sample = np.transpose(sample, (0, 2, 3, 1))
|
145 |
+
a = Image.fromarray(sample[0])
|
146 |
+
|
147 |
+
# remove padding
|
148 |
+
a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))
|
149 |
+
|
150 |
+
del model
|
151 |
+
gc.collect()
|
152 |
+
devices.torch_gc()
|
153 |
+
|
154 |
+
return a
|
155 |
+
|
156 |
+
|
157 |
+
def get_cond(selected_path):
|
158 |
+
example = {}
|
159 |
+
up_f = 4
|
160 |
+
c = selected_path.convert('RGB')
|
161 |
+
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
|
162 |
+
c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
|
163 |
+
antialias=True)
|
164 |
+
c_up = rearrange(c_up, '1 c h w -> 1 h w c')
|
165 |
+
c = rearrange(c, '1 c h w -> 1 h w c')
|
166 |
+
c = 2. * c - 1.
|
167 |
+
|
168 |
+
c = c.to(shared.device)
|
169 |
+
example["LR_image"] = c
|
170 |
+
example["image"] = c_up
|
171 |
+
|
172 |
+
return example
|
173 |
+
|
174 |
+
|
175 |
+
@torch.no_grad()
|
176 |
+
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
|
177 |
+
mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
|
178 |
+
corrector_kwargs=None, x_t=None
|
179 |
+
):
|
180 |
+
ddim = DDIMSampler(model)
|
181 |
+
bs = shape[0]
|
182 |
+
shape = shape[1:]
|
183 |
+
print(f"Sampling with eta = {eta}; steps: {steps}")
|
184 |
+
samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
|
185 |
+
normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
|
186 |
+
mask=mask, x0=x0, temperature=temperature, verbose=False,
|
187 |
+
score_corrector=score_corrector,
|
188 |
+
corrector_kwargs=corrector_kwargs, x_t=x_t)
|
189 |
+
|
190 |
+
return samples, intermediates
|
191 |
+
|
192 |
+
|
193 |
+
@torch.no_grad()
|
194 |
+
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
|
195 |
+
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
|
196 |
+
log = {}
|
197 |
+
|
198 |
+
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
|
199 |
+
return_first_stage_outputs=True,
|
200 |
+
force_c_encode=not (hasattr(model, 'split_input_params')
|
201 |
+
and model.cond_stage_key == 'coordinates_bbox'),
|
202 |
+
return_original_cond=True)
|
203 |
+
|
204 |
+
if custom_shape is not None:
|
205 |
+
z = torch.randn(custom_shape)
|
206 |
+
print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
|
207 |
+
|
208 |
+
z0 = None
|
209 |
+
|
210 |
+
log["input"] = x
|
211 |
+
log["reconstruction"] = xrec
|
212 |
+
|
213 |
+
if ismap(xc):
|
214 |
+
log["original_conditioning"] = model.to_rgb(xc)
|
215 |
+
if hasattr(model, 'cond_stage_key'):
|
216 |
+
log[model.cond_stage_key] = model.to_rgb(xc)
|
217 |
+
|
218 |
+
else:
|
219 |
+
log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
|
220 |
+
if model.cond_stage_model:
|
221 |
+
log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
|
222 |
+
if model.cond_stage_key == 'class_label':
|
223 |
+
log[model.cond_stage_key] = xc[model.cond_stage_key]
|
224 |
+
|
225 |
+
with model.ema_scope("Plotting"):
|
226 |
+
t0 = time.time()
|
227 |
+
|
228 |
+
sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
|
229 |
+
eta=eta,
|
230 |
+
quantize_x0=quantize_x0, mask=None, x0=z0,
|
231 |
+
temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
|
232 |
+
x_t=x_T)
|
233 |
+
t1 = time.time()
|
234 |
+
|
235 |
+
if ddim_use_x0_pred:
|
236 |
+
sample = intermediates['pred_x0'][-1]
|
237 |
+
|
238 |
+
x_sample = model.decode_first_stage(sample)
|
239 |
+
|
240 |
+
try:
|
241 |
+
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
|
242 |
+
log["sample_noquant"] = x_sample_noquant
|
243 |
+
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
|
244 |
+
except Exception:
|
245 |
+
pass
|
246 |
+
|
247 |
+
log["sample"] = x_sample
|
248 |
+
log["time"] = t1 - t0
|
249 |
+
|
250 |
+
return log
|
extensions-builtin/LDSR/preload.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import paths
|
3 |
+
|
4 |
+
|
5 |
+
def preload(parser):
|
6 |
+
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR'))
|
extensions-builtin/LDSR/scripts/__pycache__/ldsr_model.cpython-310.pyc
ADDED
Binary file (3.29 kB). View file
|
|
extensions-builtin/LDSR/scripts/ldsr_model.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from modules.modelloader import load_file_from_url
|
4 |
+
from modules.upscaler import Upscaler, UpscalerData
|
5 |
+
from modules_forge.forge_util import prepare_free_memory
|
6 |
+
from ldsr_model_arch import LDSR
|
7 |
+
from modules import shared, script_callbacks, errors
|
8 |
+
import sd_hijack_autoencoder # noqa: F401
|
9 |
+
import sd_hijack_ddpm_v1 # noqa: F401
|
10 |
+
|
11 |
+
|
12 |
+
class UpscalerLDSR(Upscaler):
|
13 |
+
def __init__(self, user_path):
|
14 |
+
self.name = "LDSR"
|
15 |
+
self.user_path = user_path
|
16 |
+
self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
|
17 |
+
self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
|
18 |
+
super().__init__()
|
19 |
+
scaler_data = UpscalerData("LDSR", None, self)
|
20 |
+
self.scalers = [scaler_data]
|
21 |
+
|
22 |
+
def load_model(self, path: str):
|
23 |
+
# Remove incorrect project.yaml file if too big
|
24 |
+
yaml_path = os.path.join(self.model_path, "project.yaml")
|
25 |
+
old_model_path = os.path.join(self.model_path, "model.pth")
|
26 |
+
new_model_path = os.path.join(self.model_path, "model.ckpt")
|
27 |
+
|
28 |
+
local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"])
|
29 |
+
local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None)
|
30 |
+
local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None)
|
31 |
+
local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None)
|
32 |
+
|
33 |
+
if os.path.exists(yaml_path):
|
34 |
+
statinfo = os.stat(yaml_path)
|
35 |
+
if statinfo.st_size >= 10485760:
|
36 |
+
print("Removing invalid LDSR YAML file.")
|
37 |
+
os.remove(yaml_path)
|
38 |
+
|
39 |
+
if os.path.exists(old_model_path):
|
40 |
+
print("Renaming model from model.pth to model.ckpt")
|
41 |
+
os.rename(old_model_path, new_model_path)
|
42 |
+
|
43 |
+
if local_safetensors_path is not None and os.path.exists(local_safetensors_path):
|
44 |
+
model = local_safetensors_path
|
45 |
+
else:
|
46 |
+
model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt")
|
47 |
+
|
48 |
+
yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml")
|
49 |
+
|
50 |
+
return LDSR(model, yaml)
|
51 |
+
|
52 |
+
def do_upscale(self, img, path):
|
53 |
+
prepare_free_memory(aggressive=True)
|
54 |
+
try:
|
55 |
+
ldsr = self.load_model(path)
|
56 |
+
except Exception:
|
57 |
+
errors.report(f"Failed loading LDSR model {path}", exc_info=True)
|
58 |
+
return img
|
59 |
+
ddim_steps = shared.opts.ldsr_steps
|
60 |
+
return ldsr.super_resolution(img, ddim_steps, self.scale)
|
61 |
+
|
62 |
+
|
63 |
+
def on_ui_settings():
|
64 |
+
import gradio as gr
|
65 |
+
|
66 |
+
shared.opts.add_option("ldsr_steps", shared.OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}, section=('upscaling', "Upscaling")))
|
67 |
+
shared.opts.add_option("ldsr_cached", shared.OptionInfo(False, "Cache LDSR model in memory", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")))
|
68 |
+
|
69 |
+
|
70 |
+
script_callbacks.on_ui_settings(on_ui_settings)
|
extensions-builtin/LDSR/sd_hijack_autoencoder.py
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
|
2 |
+
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
|
3 |
+
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import pytorch_lightning as pl
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from contextlib import contextmanager
|
9 |
+
|
10 |
+
from torch.optim.lr_scheduler import LambdaLR
|
11 |
+
|
12 |
+
from ldm.modules.ema import LitEma
|
13 |
+
from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
|
14 |
+
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
15 |
+
from ldm.util import instantiate_from_config
|
16 |
+
|
17 |
+
import ldm.models.autoencoder
|
18 |
+
from packaging import version
|
19 |
+
|
20 |
+
class VQModel(pl.LightningModule):
|
21 |
+
def __init__(self,
|
22 |
+
ddconfig,
|
23 |
+
lossconfig,
|
24 |
+
n_embed,
|
25 |
+
embed_dim,
|
26 |
+
ckpt_path=None,
|
27 |
+
ignore_keys=None,
|
28 |
+
image_key="image",
|
29 |
+
colorize_nlabels=None,
|
30 |
+
monitor=None,
|
31 |
+
batch_resize_range=None,
|
32 |
+
scheduler_config=None,
|
33 |
+
lr_g_factor=1.0,
|
34 |
+
remap=None,
|
35 |
+
sane_index_shape=False, # tell vector quantizer to return indices as bhw
|
36 |
+
use_ema=False
|
37 |
+
):
|
38 |
+
super().__init__()
|
39 |
+
self.embed_dim = embed_dim
|
40 |
+
self.n_embed = n_embed
|
41 |
+
self.image_key = image_key
|
42 |
+
self.encoder = Encoder(**ddconfig)
|
43 |
+
self.decoder = Decoder(**ddconfig)
|
44 |
+
self.loss = instantiate_from_config(lossconfig)
|
45 |
+
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
|
46 |
+
remap=remap,
|
47 |
+
sane_index_shape=sane_index_shape)
|
48 |
+
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
|
49 |
+
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
50 |
+
if colorize_nlabels is not None:
|
51 |
+
assert type(colorize_nlabels)==int
|
52 |
+
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
53 |
+
if monitor is not None:
|
54 |
+
self.monitor = monitor
|
55 |
+
self.batch_resize_range = batch_resize_range
|
56 |
+
if self.batch_resize_range is not None:
|
57 |
+
print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
|
58 |
+
|
59 |
+
self.use_ema = use_ema
|
60 |
+
if self.use_ema:
|
61 |
+
self.model_ema = LitEma(self)
|
62 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
63 |
+
|
64 |
+
if ckpt_path is not None:
|
65 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
|
66 |
+
self.scheduler_config = scheduler_config
|
67 |
+
self.lr_g_factor = lr_g_factor
|
68 |
+
|
69 |
+
@contextmanager
|
70 |
+
def ema_scope(self, context=None):
|
71 |
+
if self.use_ema:
|
72 |
+
self.model_ema.store(self.parameters())
|
73 |
+
self.model_ema.copy_to(self)
|
74 |
+
if context is not None:
|
75 |
+
print(f"{context}: Switched to EMA weights")
|
76 |
+
try:
|
77 |
+
yield None
|
78 |
+
finally:
|
79 |
+
if self.use_ema:
|
80 |
+
self.model_ema.restore(self.parameters())
|
81 |
+
if context is not None:
|
82 |
+
print(f"{context}: Restored training weights")
|
83 |
+
|
84 |
+
def init_from_ckpt(self, path, ignore_keys=None):
|
85 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
86 |
+
keys = list(sd.keys())
|
87 |
+
for k in keys:
|
88 |
+
for ik in ignore_keys or []:
|
89 |
+
if k.startswith(ik):
|
90 |
+
print("Deleting key {} from state_dict.".format(k))
|
91 |
+
del sd[k]
|
92 |
+
missing, unexpected = self.load_state_dict(sd, strict=False)
|
93 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
94 |
+
if missing:
|
95 |
+
print(f"Missing Keys: {missing}")
|
96 |
+
if unexpected:
|
97 |
+
print(f"Unexpected Keys: {unexpected}")
|
98 |
+
|
99 |
+
def on_train_batch_end(self, *args, **kwargs):
|
100 |
+
if self.use_ema:
|
101 |
+
self.model_ema(self)
|
102 |
+
|
103 |
+
def encode(self, x):
|
104 |
+
h = self.encoder(x)
|
105 |
+
h = self.quant_conv(h)
|
106 |
+
quant, emb_loss, info = self.quantize(h)
|
107 |
+
return quant, emb_loss, info
|
108 |
+
|
109 |
+
def encode_to_prequant(self, x):
|
110 |
+
h = self.encoder(x)
|
111 |
+
h = self.quant_conv(h)
|
112 |
+
return h
|
113 |
+
|
114 |
+
def decode(self, quant):
|
115 |
+
quant = self.post_quant_conv(quant)
|
116 |
+
dec = self.decoder(quant)
|
117 |
+
return dec
|
118 |
+
|
119 |
+
def decode_code(self, code_b):
|
120 |
+
quant_b = self.quantize.embed_code(code_b)
|
121 |
+
dec = self.decode(quant_b)
|
122 |
+
return dec
|
123 |
+
|
124 |
+
def forward(self, input, return_pred_indices=False):
|
125 |
+
quant, diff, (_,_,ind) = self.encode(input)
|
126 |
+
dec = self.decode(quant)
|
127 |
+
if return_pred_indices:
|
128 |
+
return dec, diff, ind
|
129 |
+
return dec, diff
|
130 |
+
|
131 |
+
def get_input(self, batch, k):
|
132 |
+
x = batch[k]
|
133 |
+
if len(x.shape) == 3:
|
134 |
+
x = x[..., None]
|
135 |
+
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
136 |
+
if self.batch_resize_range is not None:
|
137 |
+
lower_size = self.batch_resize_range[0]
|
138 |
+
upper_size = self.batch_resize_range[1]
|
139 |
+
if self.global_step <= 4:
|
140 |
+
# do the first few batches with max size to avoid later oom
|
141 |
+
new_resize = upper_size
|
142 |
+
else:
|
143 |
+
new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
|
144 |
+
if new_resize != x.shape[2]:
|
145 |
+
x = F.interpolate(x, size=new_resize, mode="bicubic")
|
146 |
+
x = x.detach()
|
147 |
+
return x
|
148 |
+
|
149 |
+
def training_step(self, batch, batch_idx, optimizer_idx):
|
150 |
+
# https://github.com/pytorch/pytorch/issues/37142
|
151 |
+
# try not to fool the heuristics
|
152 |
+
x = self.get_input(batch, self.image_key)
|
153 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
154 |
+
|
155 |
+
if optimizer_idx == 0:
|
156 |
+
# autoencode
|
157 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
158 |
+
last_layer=self.get_last_layer(), split="train",
|
159 |
+
predicted_indices=ind)
|
160 |
+
|
161 |
+
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
162 |
+
return aeloss
|
163 |
+
|
164 |
+
if optimizer_idx == 1:
|
165 |
+
# discriminator
|
166 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
167 |
+
last_layer=self.get_last_layer(), split="train")
|
168 |
+
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
169 |
+
return discloss
|
170 |
+
|
171 |
+
def validation_step(self, batch, batch_idx):
|
172 |
+
log_dict = self._validation_step(batch, batch_idx)
|
173 |
+
with self.ema_scope():
|
174 |
+
self._validation_step(batch, batch_idx, suffix="_ema")
|
175 |
+
return log_dict
|
176 |
+
|
177 |
+
def _validation_step(self, batch, batch_idx, suffix=""):
|
178 |
+
x = self.get_input(batch, self.image_key)
|
179 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
180 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
|
181 |
+
self.global_step,
|
182 |
+
last_layer=self.get_last_layer(),
|
183 |
+
split="val"+suffix,
|
184 |
+
predicted_indices=ind
|
185 |
+
)
|
186 |
+
|
187 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
|
188 |
+
self.global_step,
|
189 |
+
last_layer=self.get_last_layer(),
|
190 |
+
split="val"+suffix,
|
191 |
+
predicted_indices=ind
|
192 |
+
)
|
193 |
+
rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
|
194 |
+
self.log(f"val{suffix}/rec_loss", rec_loss,
|
195 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
196 |
+
self.log(f"val{suffix}/aeloss", aeloss,
|
197 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
198 |
+
if version.parse(pl.__version__) >= version.parse('1.4.0'):
|
199 |
+
del log_dict_ae[f"val{suffix}/rec_loss"]
|
200 |
+
self.log_dict(log_dict_ae)
|
201 |
+
self.log_dict(log_dict_disc)
|
202 |
+
return self.log_dict
|
203 |
+
|
204 |
+
def configure_optimizers(self):
|
205 |
+
lr_d = self.learning_rate
|
206 |
+
lr_g = self.lr_g_factor*self.learning_rate
|
207 |
+
print("lr_d", lr_d)
|
208 |
+
print("lr_g", lr_g)
|
209 |
+
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
|
210 |
+
list(self.decoder.parameters())+
|
211 |
+
list(self.quantize.parameters())+
|
212 |
+
list(self.quant_conv.parameters())+
|
213 |
+
list(self.post_quant_conv.parameters()),
|
214 |
+
lr=lr_g, betas=(0.5, 0.9))
|
215 |
+
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
216 |
+
lr=lr_d, betas=(0.5, 0.9))
|
217 |
+
|
218 |
+
if self.scheduler_config is not None:
|
219 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
220 |
+
|
221 |
+
print("Setting up LambdaLR scheduler...")
|
222 |
+
scheduler = [
|
223 |
+
{
|
224 |
+
'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
|
225 |
+
'interval': 'step',
|
226 |
+
'frequency': 1
|
227 |
+
},
|
228 |
+
{
|
229 |
+
'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
|
230 |
+
'interval': 'step',
|
231 |
+
'frequency': 1
|
232 |
+
},
|
233 |
+
]
|
234 |
+
return [opt_ae, opt_disc], scheduler
|
235 |
+
return [opt_ae, opt_disc], []
|
236 |
+
|
237 |
+
def get_last_layer(self):
|
238 |
+
return self.decoder.conv_out.weight
|
239 |
+
|
240 |
+
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
|
241 |
+
log = {}
|
242 |
+
x = self.get_input(batch, self.image_key)
|
243 |
+
x = x.to(self.device)
|
244 |
+
if only_inputs:
|
245 |
+
log["inputs"] = x
|
246 |
+
return log
|
247 |
+
xrec, _ = self(x)
|
248 |
+
if x.shape[1] > 3:
|
249 |
+
# colorize with random projection
|
250 |
+
assert xrec.shape[1] > 3
|
251 |
+
x = self.to_rgb(x)
|
252 |
+
xrec = self.to_rgb(xrec)
|
253 |
+
log["inputs"] = x
|
254 |
+
log["reconstructions"] = xrec
|
255 |
+
if plot_ema:
|
256 |
+
with self.ema_scope():
|
257 |
+
xrec_ema, _ = self(x)
|
258 |
+
if x.shape[1] > 3:
|
259 |
+
xrec_ema = self.to_rgb(xrec_ema)
|
260 |
+
log["reconstructions_ema"] = xrec_ema
|
261 |
+
return log
|
262 |
+
|
263 |
+
def to_rgb(self, x):
|
264 |
+
assert self.image_key == "segmentation"
|
265 |
+
if not hasattr(self, "colorize"):
|
266 |
+
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
267 |
+
x = F.conv2d(x, weight=self.colorize)
|
268 |
+
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
269 |
+
return x
|
270 |
+
|
271 |
+
|
272 |
+
class VQModelInterface(VQModel):
|
273 |
+
def __init__(self, embed_dim, *args, **kwargs):
|
274 |
+
super().__init__(*args, embed_dim=embed_dim, **kwargs)
|
275 |
+
self.embed_dim = embed_dim
|
276 |
+
|
277 |
+
def encode(self, x):
|
278 |
+
h = self.encoder(x)
|
279 |
+
h = self.quant_conv(h)
|
280 |
+
return h
|
281 |
+
|
282 |
+
def decode(self, h, force_not_quantize=False):
|
283 |
+
# also go through quantization layer
|
284 |
+
if not force_not_quantize:
|
285 |
+
quant, emb_loss, info = self.quantize(h)
|
286 |
+
else:
|
287 |
+
quant = h
|
288 |
+
quant = self.post_quant_conv(quant)
|
289 |
+
dec = self.decoder(quant)
|
290 |
+
return dec
|
291 |
+
|
292 |
+
ldm.models.autoencoder.VQModel = VQModel
|
293 |
+
ldm.models.autoencoder.VQModelInterface = VQModelInterface
|
extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
ADDED
@@ -0,0 +1,1443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This script is copied from the compvis/stable-diffusion repo (aka the SD V1 repo)
|
2 |
+
# Original filename: ldm/models/diffusion/ddpm.py
|
3 |
+
# The purpose to reinstate the old DDPM logic which works with VQ, whereas the V2 one doesn't
|
4 |
+
# Some models such as LDSR require VQ to work correctly
|
5 |
+
# The classes are suffixed with "V1" and added back to the "ldm.models.diffusion.ddpm" module
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import numpy as np
|
10 |
+
import pytorch_lightning as pl
|
11 |
+
from torch.optim.lr_scheduler import LambdaLR
|
12 |
+
from einops import rearrange, repeat
|
13 |
+
from contextlib import contextmanager
|
14 |
+
from functools import partial
|
15 |
+
from tqdm import tqdm
|
16 |
+
from torchvision.utils import make_grid
|
17 |
+
from pytorch_lightning.utilities.distributed import rank_zero_only
|
18 |
+
|
19 |
+
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
|
20 |
+
from ldm.modules.ema import LitEma
|
21 |
+
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
|
22 |
+
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
|
23 |
+
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
24 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
25 |
+
|
26 |
+
import ldm.models.diffusion.ddpm
|
27 |
+
|
28 |
+
__conditioning_keys__ = {'concat': 'c_concat',
|
29 |
+
'crossattn': 'c_crossattn',
|
30 |
+
'adm': 'y'}
|
31 |
+
|
32 |
+
|
33 |
+
def disabled_train(self, mode=True):
|
34 |
+
"""Overwrite model.train with this function to make sure train/eval mode
|
35 |
+
does not change anymore."""
|
36 |
+
return self
|
37 |
+
|
38 |
+
|
39 |
+
def uniform_on_device(r1, r2, shape, device):
|
40 |
+
return (r1 - r2) * torch.rand(*shape, device=device) + r2
|
41 |
+
|
42 |
+
|
43 |
+
class DDPMV1(pl.LightningModule):
|
44 |
+
# classic DDPM with Gaussian diffusion, in image space
|
45 |
+
def __init__(self,
|
46 |
+
unet_config,
|
47 |
+
timesteps=1000,
|
48 |
+
beta_schedule="linear",
|
49 |
+
loss_type="l2",
|
50 |
+
ckpt_path=None,
|
51 |
+
ignore_keys=None,
|
52 |
+
load_only_unet=False,
|
53 |
+
monitor="val/loss",
|
54 |
+
use_ema=True,
|
55 |
+
first_stage_key="image",
|
56 |
+
image_size=256,
|
57 |
+
channels=3,
|
58 |
+
log_every_t=100,
|
59 |
+
clip_denoised=True,
|
60 |
+
linear_start=1e-4,
|
61 |
+
linear_end=2e-2,
|
62 |
+
cosine_s=8e-3,
|
63 |
+
given_betas=None,
|
64 |
+
original_elbo_weight=0.,
|
65 |
+
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
|
66 |
+
l_simple_weight=1.,
|
67 |
+
conditioning_key=None,
|
68 |
+
parameterization="eps", # all assuming fixed variance schedules
|
69 |
+
scheduler_config=None,
|
70 |
+
use_positional_encodings=False,
|
71 |
+
learn_logvar=False,
|
72 |
+
logvar_init=0.,
|
73 |
+
):
|
74 |
+
super().__init__()
|
75 |
+
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
|
76 |
+
self.parameterization = parameterization
|
77 |
+
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
|
78 |
+
self.cond_stage_model = None
|
79 |
+
self.clip_denoised = clip_denoised
|
80 |
+
self.log_every_t = log_every_t
|
81 |
+
self.first_stage_key = first_stage_key
|
82 |
+
self.image_size = image_size # try conv?
|
83 |
+
self.channels = channels
|
84 |
+
self.use_positional_encodings = use_positional_encodings
|
85 |
+
self.model = DiffusionWrapperV1(unet_config, conditioning_key)
|
86 |
+
count_params(self.model, verbose=True)
|
87 |
+
self.use_ema = use_ema
|
88 |
+
if self.use_ema:
|
89 |
+
self.model_ema = LitEma(self.model)
|
90 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
91 |
+
|
92 |
+
self.use_scheduler = scheduler_config is not None
|
93 |
+
if self.use_scheduler:
|
94 |
+
self.scheduler_config = scheduler_config
|
95 |
+
|
96 |
+
self.v_posterior = v_posterior
|
97 |
+
self.original_elbo_weight = original_elbo_weight
|
98 |
+
self.l_simple_weight = l_simple_weight
|
99 |
+
|
100 |
+
if monitor is not None:
|
101 |
+
self.monitor = monitor
|
102 |
+
if ckpt_path is not None:
|
103 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
|
104 |
+
|
105 |
+
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
|
106 |
+
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
107 |
+
|
108 |
+
self.loss_type = loss_type
|
109 |
+
|
110 |
+
self.learn_logvar = learn_logvar
|
111 |
+
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
|
112 |
+
if self.learn_logvar:
|
113 |
+
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
|
114 |
+
|
115 |
+
|
116 |
+
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
117 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
118 |
+
if exists(given_betas):
|
119 |
+
betas = given_betas
|
120 |
+
else:
|
121 |
+
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
122 |
+
cosine_s=cosine_s)
|
123 |
+
alphas = 1. - betas
|
124 |
+
alphas_cumprod = np.cumprod(alphas, axis=0)
|
125 |
+
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
126 |
+
|
127 |
+
timesteps, = betas.shape
|
128 |
+
self.num_timesteps = int(timesteps)
|
129 |
+
self.linear_start = linear_start
|
130 |
+
self.linear_end = linear_end
|
131 |
+
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
132 |
+
|
133 |
+
to_torch = partial(torch.tensor, dtype=torch.float32)
|
134 |
+
|
135 |
+
self.register_buffer('betas', to_torch(betas))
|
136 |
+
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
137 |
+
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
138 |
+
|
139 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
140 |
+
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
141 |
+
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
142 |
+
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
143 |
+
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
144 |
+
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
145 |
+
|
146 |
+
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
147 |
+
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
|
148 |
+
1. - alphas_cumprod) + self.v_posterior * betas
|
149 |
+
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
150 |
+
self.register_buffer('posterior_variance', to_torch(posterior_variance))
|
151 |
+
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
152 |
+
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
|
153 |
+
self.register_buffer('posterior_mean_coef1', to_torch(
|
154 |
+
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
|
155 |
+
self.register_buffer('posterior_mean_coef2', to_torch(
|
156 |
+
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
|
157 |
+
|
158 |
+
if self.parameterization == "eps":
|
159 |
+
lvlb_weights = self.betas ** 2 / (
|
160 |
+
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
|
161 |
+
elif self.parameterization == "x0":
|
162 |
+
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
|
163 |
+
else:
|
164 |
+
raise NotImplementedError("mu not supported")
|
165 |
+
# TODO how to choose this term
|
166 |
+
lvlb_weights[0] = lvlb_weights[1]
|
167 |
+
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
|
168 |
+
assert not torch.isnan(self.lvlb_weights).all()
|
169 |
+
|
170 |
+
@contextmanager
|
171 |
+
def ema_scope(self, context=None):
|
172 |
+
if self.use_ema:
|
173 |
+
self.model_ema.store(self.model.parameters())
|
174 |
+
self.model_ema.copy_to(self.model)
|
175 |
+
if context is not None:
|
176 |
+
print(f"{context}: Switched to EMA weights")
|
177 |
+
try:
|
178 |
+
yield None
|
179 |
+
finally:
|
180 |
+
if self.use_ema:
|
181 |
+
self.model_ema.restore(self.model.parameters())
|
182 |
+
if context is not None:
|
183 |
+
print(f"{context}: Restored training weights")
|
184 |
+
|
185 |
+
def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
|
186 |
+
sd = torch.load(path, map_location="cpu")
|
187 |
+
if "state_dict" in list(sd.keys()):
|
188 |
+
sd = sd["state_dict"]
|
189 |
+
keys = list(sd.keys())
|
190 |
+
for k in keys:
|
191 |
+
for ik in ignore_keys or []:
|
192 |
+
if k.startswith(ik):
|
193 |
+
print("Deleting key {} from state_dict.".format(k))
|
194 |
+
del sd[k]
|
195 |
+
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
196 |
+
sd, strict=False)
|
197 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
198 |
+
if missing:
|
199 |
+
print(f"Missing Keys: {missing}")
|
200 |
+
if unexpected:
|
201 |
+
print(f"Unexpected Keys: {unexpected}")
|
202 |
+
|
203 |
+
def q_mean_variance(self, x_start, t):
|
204 |
+
"""
|
205 |
+
Get the distribution q(x_t | x_0).
|
206 |
+
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
207 |
+
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
208 |
+
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
209 |
+
"""
|
210 |
+
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
|
211 |
+
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
|
212 |
+
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
213 |
+
return mean, variance, log_variance
|
214 |
+
|
215 |
+
def predict_start_from_noise(self, x_t, t, noise):
|
216 |
+
return (
|
217 |
+
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
218 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
219 |
+
)
|
220 |
+
|
221 |
+
def q_posterior(self, x_start, x_t, t):
|
222 |
+
posterior_mean = (
|
223 |
+
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
224 |
+
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
225 |
+
)
|
226 |
+
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
|
227 |
+
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
|
228 |
+
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
229 |
+
|
230 |
+
def p_mean_variance(self, x, t, clip_denoised: bool):
|
231 |
+
model_out = self.model(x, t)
|
232 |
+
if self.parameterization == "eps":
|
233 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
234 |
+
elif self.parameterization == "x0":
|
235 |
+
x_recon = model_out
|
236 |
+
if clip_denoised:
|
237 |
+
x_recon.clamp_(-1., 1.)
|
238 |
+
|
239 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
240 |
+
return model_mean, posterior_variance, posterior_log_variance
|
241 |
+
|
242 |
+
@torch.no_grad()
|
243 |
+
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
|
244 |
+
b, *_, device = *x.shape, x.device
|
245 |
+
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
|
246 |
+
noise = noise_like(x.shape, device, repeat_noise)
|
247 |
+
# no noise when t == 0
|
248 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
249 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
250 |
+
|
251 |
+
@torch.no_grad()
|
252 |
+
def p_sample_loop(self, shape, return_intermediates=False):
|
253 |
+
device = self.betas.device
|
254 |
+
b = shape[0]
|
255 |
+
img = torch.randn(shape, device=device)
|
256 |
+
intermediates = [img]
|
257 |
+
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
|
258 |
+
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
|
259 |
+
clip_denoised=self.clip_denoised)
|
260 |
+
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
|
261 |
+
intermediates.append(img)
|
262 |
+
if return_intermediates:
|
263 |
+
return img, intermediates
|
264 |
+
return img
|
265 |
+
|
266 |
+
@torch.no_grad()
|
267 |
+
def sample(self, batch_size=16, return_intermediates=False):
|
268 |
+
image_size = self.image_size
|
269 |
+
channels = self.channels
|
270 |
+
return self.p_sample_loop((batch_size, channels, image_size, image_size),
|
271 |
+
return_intermediates=return_intermediates)
|
272 |
+
|
273 |
+
def q_sample(self, x_start, t, noise=None):
|
274 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
275 |
+
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
276 |
+
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
277 |
+
|
278 |
+
def get_loss(self, pred, target, mean=True):
|
279 |
+
if self.loss_type == 'l1':
|
280 |
+
loss = (target - pred).abs()
|
281 |
+
if mean:
|
282 |
+
loss = loss.mean()
|
283 |
+
elif self.loss_type == 'l2':
|
284 |
+
if mean:
|
285 |
+
loss = torch.nn.functional.mse_loss(target, pred)
|
286 |
+
else:
|
287 |
+
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
|
288 |
+
else:
|
289 |
+
raise NotImplementedError("unknown loss type '{loss_type}'")
|
290 |
+
|
291 |
+
return loss
|
292 |
+
|
293 |
+
def p_losses(self, x_start, t, noise=None):
|
294 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
295 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
296 |
+
model_out = self.model(x_noisy, t)
|
297 |
+
|
298 |
+
loss_dict = {}
|
299 |
+
if self.parameterization == "eps":
|
300 |
+
target = noise
|
301 |
+
elif self.parameterization == "x0":
|
302 |
+
target = x_start
|
303 |
+
else:
|
304 |
+
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
|
305 |
+
|
306 |
+
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
307 |
+
|
308 |
+
log_prefix = 'train' if self.training else 'val'
|
309 |
+
|
310 |
+
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
|
311 |
+
loss_simple = loss.mean() * self.l_simple_weight
|
312 |
+
|
313 |
+
loss_vlb = (self.lvlb_weights[t] * loss).mean()
|
314 |
+
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
|
315 |
+
|
316 |
+
loss = loss_simple + self.original_elbo_weight * loss_vlb
|
317 |
+
|
318 |
+
loss_dict.update({f'{log_prefix}/loss': loss})
|
319 |
+
|
320 |
+
return loss, loss_dict
|
321 |
+
|
322 |
+
def forward(self, x, *args, **kwargs):
|
323 |
+
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
|
324 |
+
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
|
325 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
326 |
+
return self.p_losses(x, t, *args, **kwargs)
|
327 |
+
|
328 |
+
def get_input(self, batch, k):
|
329 |
+
x = batch[k]
|
330 |
+
if len(x.shape) == 3:
|
331 |
+
x = x[..., None]
|
332 |
+
x = rearrange(x, 'b h w c -> b c h w')
|
333 |
+
x = x.to(memory_format=torch.contiguous_format).float()
|
334 |
+
return x
|
335 |
+
|
336 |
+
def shared_step(self, batch):
|
337 |
+
x = self.get_input(batch, self.first_stage_key)
|
338 |
+
loss, loss_dict = self(x)
|
339 |
+
return loss, loss_dict
|
340 |
+
|
341 |
+
def training_step(self, batch, batch_idx):
|
342 |
+
loss, loss_dict = self.shared_step(batch)
|
343 |
+
|
344 |
+
self.log_dict(loss_dict, prog_bar=True,
|
345 |
+
logger=True, on_step=True, on_epoch=True)
|
346 |
+
|
347 |
+
self.log("global_step", self.global_step,
|
348 |
+
prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
349 |
+
|
350 |
+
if self.use_scheduler:
|
351 |
+
lr = self.optimizers().param_groups[0]['lr']
|
352 |
+
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
353 |
+
|
354 |
+
return loss
|
355 |
+
|
356 |
+
@torch.no_grad()
|
357 |
+
def validation_step(self, batch, batch_idx):
|
358 |
+
_, loss_dict_no_ema = self.shared_step(batch)
|
359 |
+
with self.ema_scope():
|
360 |
+
_, loss_dict_ema = self.shared_step(batch)
|
361 |
+
loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
|
362 |
+
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
363 |
+
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
364 |
+
|
365 |
+
def on_train_batch_end(self, *args, **kwargs):
|
366 |
+
if self.use_ema:
|
367 |
+
self.model_ema(self.model)
|
368 |
+
|
369 |
+
def _get_rows_from_list(self, samples):
|
370 |
+
n_imgs_per_row = len(samples)
|
371 |
+
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
|
372 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
373 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
374 |
+
return denoise_grid
|
375 |
+
|
376 |
+
@torch.no_grad()
|
377 |
+
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
|
378 |
+
log = {}
|
379 |
+
x = self.get_input(batch, self.first_stage_key)
|
380 |
+
N = min(x.shape[0], N)
|
381 |
+
n_row = min(x.shape[0], n_row)
|
382 |
+
x = x.to(self.device)[:N]
|
383 |
+
log["inputs"] = x
|
384 |
+
|
385 |
+
# get diffusion row
|
386 |
+
diffusion_row = []
|
387 |
+
x_start = x[:n_row]
|
388 |
+
|
389 |
+
for t in range(self.num_timesteps):
|
390 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
391 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
392 |
+
t = t.to(self.device).long()
|
393 |
+
noise = torch.randn_like(x_start)
|
394 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
395 |
+
diffusion_row.append(x_noisy)
|
396 |
+
|
397 |
+
log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
|
398 |
+
|
399 |
+
if sample:
|
400 |
+
# get denoise row
|
401 |
+
with self.ema_scope("Plotting"):
|
402 |
+
samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
|
403 |
+
|
404 |
+
log["samples"] = samples
|
405 |
+
log["denoise_row"] = self._get_rows_from_list(denoise_row)
|
406 |
+
|
407 |
+
if return_keys:
|
408 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
409 |
+
return log
|
410 |
+
else:
|
411 |
+
return {key: log[key] for key in return_keys}
|
412 |
+
return log
|
413 |
+
|
414 |
+
def configure_optimizers(self):
|
415 |
+
lr = self.learning_rate
|
416 |
+
params = list(self.model.parameters())
|
417 |
+
if self.learn_logvar:
|
418 |
+
params = params + [self.logvar]
|
419 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
420 |
+
return opt
|
421 |
+
|
422 |
+
|
423 |
+
class LatentDiffusionV1(DDPMV1):
|
424 |
+
"""main class"""
|
425 |
+
def __init__(self,
|
426 |
+
first_stage_config,
|
427 |
+
cond_stage_config,
|
428 |
+
num_timesteps_cond=None,
|
429 |
+
cond_stage_key="image",
|
430 |
+
cond_stage_trainable=False,
|
431 |
+
concat_mode=True,
|
432 |
+
cond_stage_forward=None,
|
433 |
+
conditioning_key=None,
|
434 |
+
scale_factor=1.0,
|
435 |
+
scale_by_std=False,
|
436 |
+
*args, **kwargs):
|
437 |
+
self.num_timesteps_cond = default(num_timesteps_cond, 1)
|
438 |
+
self.scale_by_std = scale_by_std
|
439 |
+
assert self.num_timesteps_cond <= kwargs['timesteps']
|
440 |
+
# for backwards compatibility after implementation of DiffusionWrapper
|
441 |
+
if conditioning_key is None:
|
442 |
+
conditioning_key = 'concat' if concat_mode else 'crossattn'
|
443 |
+
if cond_stage_config == '__is_unconditional__':
|
444 |
+
conditioning_key = None
|
445 |
+
ckpt_path = kwargs.pop("ckpt_path", None)
|
446 |
+
ignore_keys = kwargs.pop("ignore_keys", [])
|
447 |
+
super().__init__(*args, conditioning_key=conditioning_key, **kwargs)
|
448 |
+
self.concat_mode = concat_mode
|
449 |
+
self.cond_stage_trainable = cond_stage_trainable
|
450 |
+
self.cond_stage_key = cond_stage_key
|
451 |
+
try:
|
452 |
+
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
|
453 |
+
except Exception:
|
454 |
+
self.num_downs = 0
|
455 |
+
if not scale_by_std:
|
456 |
+
self.scale_factor = scale_factor
|
457 |
+
else:
|
458 |
+
self.register_buffer('scale_factor', torch.tensor(scale_factor))
|
459 |
+
self.instantiate_first_stage(first_stage_config)
|
460 |
+
self.instantiate_cond_stage(cond_stage_config)
|
461 |
+
self.cond_stage_forward = cond_stage_forward
|
462 |
+
self.clip_denoised = False
|
463 |
+
self.bbox_tokenizer = None
|
464 |
+
|
465 |
+
self.restarted_from_ckpt = False
|
466 |
+
if ckpt_path is not None:
|
467 |
+
self.init_from_ckpt(ckpt_path, ignore_keys)
|
468 |
+
self.restarted_from_ckpt = True
|
469 |
+
|
470 |
+
def make_cond_schedule(self, ):
|
471 |
+
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
|
472 |
+
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
|
473 |
+
self.cond_ids[:self.num_timesteps_cond] = ids
|
474 |
+
|
475 |
+
@rank_zero_only
|
476 |
+
@torch.no_grad()
|
477 |
+
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
|
478 |
+
# only for very first batch
|
479 |
+
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
|
480 |
+
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
|
481 |
+
# set rescale weight to 1./std of encodings
|
482 |
+
print("### USING STD-RESCALING ###")
|
483 |
+
x = super().get_input(batch, self.first_stage_key)
|
484 |
+
x = x.to(self.device)
|
485 |
+
encoder_posterior = self.encode_first_stage(x)
|
486 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
487 |
+
del self.scale_factor
|
488 |
+
self.register_buffer('scale_factor', 1. / z.flatten().std())
|
489 |
+
print(f"setting self.scale_factor to {self.scale_factor}")
|
490 |
+
print("### USING STD-RESCALING ###")
|
491 |
+
|
492 |
+
def register_schedule(self,
|
493 |
+
given_betas=None, beta_schedule="linear", timesteps=1000,
|
494 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
495 |
+
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
|
496 |
+
|
497 |
+
self.shorten_cond_schedule = self.num_timesteps_cond > 1
|
498 |
+
if self.shorten_cond_schedule:
|
499 |
+
self.make_cond_schedule()
|
500 |
+
|
501 |
+
def instantiate_first_stage(self, config):
|
502 |
+
model = instantiate_from_config(config)
|
503 |
+
self.first_stage_model = model.eval()
|
504 |
+
self.first_stage_model.train = disabled_train
|
505 |
+
for param in self.first_stage_model.parameters():
|
506 |
+
param.requires_grad = False
|
507 |
+
|
508 |
+
def instantiate_cond_stage(self, config):
|
509 |
+
if not self.cond_stage_trainable:
|
510 |
+
if config == "__is_first_stage__":
|
511 |
+
print("Using first stage also as cond stage.")
|
512 |
+
self.cond_stage_model = self.first_stage_model
|
513 |
+
elif config == "__is_unconditional__":
|
514 |
+
print(f"Training {self.__class__.__name__} as an unconditional model.")
|
515 |
+
self.cond_stage_model = None
|
516 |
+
# self.be_unconditional = True
|
517 |
+
else:
|
518 |
+
model = instantiate_from_config(config)
|
519 |
+
self.cond_stage_model = model.eval()
|
520 |
+
self.cond_stage_model.train = disabled_train
|
521 |
+
for param in self.cond_stage_model.parameters():
|
522 |
+
param.requires_grad = False
|
523 |
+
else:
|
524 |
+
assert config != '__is_first_stage__'
|
525 |
+
assert config != '__is_unconditional__'
|
526 |
+
model = instantiate_from_config(config)
|
527 |
+
self.cond_stage_model = model
|
528 |
+
|
529 |
+
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
|
530 |
+
denoise_row = []
|
531 |
+
for zd in tqdm(samples, desc=desc):
|
532 |
+
denoise_row.append(self.decode_first_stage(zd.to(self.device),
|
533 |
+
force_not_quantize=force_no_decoder_quantization))
|
534 |
+
n_imgs_per_row = len(denoise_row)
|
535 |
+
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
|
536 |
+
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
|
537 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
538 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
539 |
+
return denoise_grid
|
540 |
+
|
541 |
+
def get_first_stage_encoding(self, encoder_posterior):
|
542 |
+
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
|
543 |
+
z = encoder_posterior.sample()
|
544 |
+
elif isinstance(encoder_posterior, torch.Tensor):
|
545 |
+
z = encoder_posterior
|
546 |
+
else:
|
547 |
+
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
|
548 |
+
return self.scale_factor * z
|
549 |
+
|
550 |
+
def get_learned_conditioning(self, c):
|
551 |
+
if self.cond_stage_forward is None:
|
552 |
+
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
|
553 |
+
c = self.cond_stage_model.encode(c)
|
554 |
+
if isinstance(c, DiagonalGaussianDistribution):
|
555 |
+
c = c.mode()
|
556 |
+
else:
|
557 |
+
c = self.cond_stage_model(c)
|
558 |
+
else:
|
559 |
+
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
|
560 |
+
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
|
561 |
+
return c
|
562 |
+
|
563 |
+
def meshgrid(self, h, w):
|
564 |
+
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
|
565 |
+
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
|
566 |
+
|
567 |
+
arr = torch.cat([y, x], dim=-1)
|
568 |
+
return arr
|
569 |
+
|
570 |
+
def delta_border(self, h, w):
|
571 |
+
"""
|
572 |
+
:param h: height
|
573 |
+
:param w: width
|
574 |
+
:return: normalized distance to image border,
|
575 |
+
wtith min distance = 0 at border and max dist = 0.5 at image center
|
576 |
+
"""
|
577 |
+
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
|
578 |
+
arr = self.meshgrid(h, w) / lower_right_corner
|
579 |
+
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
|
580 |
+
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
|
581 |
+
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
|
582 |
+
return edge_dist
|
583 |
+
|
584 |
+
def get_weighting(self, h, w, Ly, Lx, device):
|
585 |
+
weighting = self.delta_border(h, w)
|
586 |
+
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
|
587 |
+
self.split_input_params["clip_max_weight"], )
|
588 |
+
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
|
589 |
+
|
590 |
+
if self.split_input_params["tie_braker"]:
|
591 |
+
L_weighting = self.delta_border(Ly, Lx)
|
592 |
+
L_weighting = torch.clip(L_weighting,
|
593 |
+
self.split_input_params["clip_min_tie_weight"],
|
594 |
+
self.split_input_params["clip_max_tie_weight"])
|
595 |
+
|
596 |
+
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
|
597 |
+
weighting = weighting * L_weighting
|
598 |
+
return weighting
|
599 |
+
|
600 |
+
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
|
601 |
+
"""
|
602 |
+
:param x: img of size (bs, c, h, w)
|
603 |
+
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
|
604 |
+
"""
|
605 |
+
bs, nc, h, w = x.shape
|
606 |
+
|
607 |
+
# number of crops in image
|
608 |
+
Ly = (h - kernel_size[0]) // stride[0] + 1
|
609 |
+
Lx = (w - kernel_size[1]) // stride[1] + 1
|
610 |
+
|
611 |
+
if uf == 1 and df == 1:
|
612 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
613 |
+
unfold = torch.nn.Unfold(**fold_params)
|
614 |
+
|
615 |
+
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
|
616 |
+
|
617 |
+
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
|
618 |
+
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
|
619 |
+
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
|
620 |
+
|
621 |
+
elif uf > 1 and df == 1:
|
622 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
623 |
+
unfold = torch.nn.Unfold(**fold_params)
|
624 |
+
|
625 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
|
626 |
+
dilation=1, padding=0,
|
627 |
+
stride=(stride[0] * uf, stride[1] * uf))
|
628 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
|
629 |
+
|
630 |
+
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
|
631 |
+
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
|
632 |
+
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
|
633 |
+
|
634 |
+
elif df > 1 and uf == 1:
|
635 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
636 |
+
unfold = torch.nn.Unfold(**fold_params)
|
637 |
+
|
638 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
|
639 |
+
dilation=1, padding=0,
|
640 |
+
stride=(stride[0] // df, stride[1] // df))
|
641 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
|
642 |
+
|
643 |
+
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
|
644 |
+
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
|
645 |
+
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
|
646 |
+
|
647 |
+
else:
|
648 |
+
raise NotImplementedError
|
649 |
+
|
650 |
+
return fold, unfold, normalization, weighting
|
651 |
+
|
652 |
+
@torch.no_grad()
|
653 |
+
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
|
654 |
+
cond_key=None, return_original_cond=False, bs=None):
|
655 |
+
x = super().get_input(batch, k)
|
656 |
+
if bs is not None:
|
657 |
+
x = x[:bs]
|
658 |
+
x = x.to(self.device)
|
659 |
+
encoder_posterior = self.encode_first_stage(x)
|
660 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
661 |
+
|
662 |
+
if self.model.conditioning_key is not None:
|
663 |
+
if cond_key is None:
|
664 |
+
cond_key = self.cond_stage_key
|
665 |
+
if cond_key != self.first_stage_key:
|
666 |
+
if cond_key in ['caption', 'coordinates_bbox']:
|
667 |
+
xc = batch[cond_key]
|
668 |
+
elif cond_key == 'class_label':
|
669 |
+
xc = batch
|
670 |
+
else:
|
671 |
+
xc = super().get_input(batch, cond_key).to(self.device)
|
672 |
+
else:
|
673 |
+
xc = x
|
674 |
+
if not self.cond_stage_trainable or force_c_encode:
|
675 |
+
if isinstance(xc, dict) or isinstance(xc, list):
|
676 |
+
# import pudb; pudb.set_trace()
|
677 |
+
c = self.get_learned_conditioning(xc)
|
678 |
+
else:
|
679 |
+
c = self.get_learned_conditioning(xc.to(self.device))
|
680 |
+
else:
|
681 |
+
c = xc
|
682 |
+
if bs is not None:
|
683 |
+
c = c[:bs]
|
684 |
+
|
685 |
+
if self.use_positional_encodings:
|
686 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
687 |
+
ckey = __conditioning_keys__[self.model.conditioning_key]
|
688 |
+
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
|
689 |
+
|
690 |
+
else:
|
691 |
+
c = None
|
692 |
+
xc = None
|
693 |
+
if self.use_positional_encodings:
|
694 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
695 |
+
c = {'pos_x': pos_x, 'pos_y': pos_y}
|
696 |
+
out = [z, c]
|
697 |
+
if return_first_stage_outputs:
|
698 |
+
xrec = self.decode_first_stage(z)
|
699 |
+
out.extend([x, xrec])
|
700 |
+
if return_original_cond:
|
701 |
+
out.append(xc)
|
702 |
+
return out
|
703 |
+
|
704 |
+
@torch.no_grad()
|
705 |
+
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
706 |
+
if predict_cids:
|
707 |
+
if z.dim() == 4:
|
708 |
+
z = torch.argmax(z.exp(), dim=1).long()
|
709 |
+
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
710 |
+
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
711 |
+
|
712 |
+
z = 1. / self.scale_factor * z
|
713 |
+
|
714 |
+
if hasattr(self, "split_input_params"):
|
715 |
+
if self.split_input_params["patch_distributed_vq"]:
|
716 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
717 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
718 |
+
uf = self.split_input_params["vqf"]
|
719 |
+
bs, nc, h, w = z.shape
|
720 |
+
if ks[0] > h or ks[1] > w:
|
721 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
722 |
+
print("reducing Kernel")
|
723 |
+
|
724 |
+
if stride[0] > h or stride[1] > w:
|
725 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
726 |
+
print("reducing stride")
|
727 |
+
|
728 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
|
729 |
+
|
730 |
+
z = unfold(z) # (bn, nc * prod(**ks), L)
|
731 |
+
# 1. Reshape to img shape
|
732 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
733 |
+
|
734 |
+
# 2. apply model loop over last dim
|
735 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
736 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
|
737 |
+
force_not_quantize=predict_cids or force_not_quantize)
|
738 |
+
for i in range(z.shape[-1])]
|
739 |
+
else:
|
740 |
+
|
741 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
|
742 |
+
for i in range(z.shape[-1])]
|
743 |
+
|
744 |
+
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
|
745 |
+
o = o * weighting
|
746 |
+
# Reverse 1. reshape to img shape
|
747 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
748 |
+
# stitch crops together
|
749 |
+
decoded = fold(o)
|
750 |
+
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
751 |
+
return decoded
|
752 |
+
else:
|
753 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
754 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
755 |
+
else:
|
756 |
+
return self.first_stage_model.decode(z)
|
757 |
+
|
758 |
+
else:
|
759 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
760 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
761 |
+
else:
|
762 |
+
return self.first_stage_model.decode(z)
|
763 |
+
|
764 |
+
# same as above but without decorator
|
765 |
+
def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
766 |
+
if predict_cids:
|
767 |
+
if z.dim() == 4:
|
768 |
+
z = torch.argmax(z.exp(), dim=1).long()
|
769 |
+
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
770 |
+
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
771 |
+
|
772 |
+
z = 1. / self.scale_factor * z
|
773 |
+
|
774 |
+
if hasattr(self, "split_input_params"):
|
775 |
+
if self.split_input_params["patch_distributed_vq"]:
|
776 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
777 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
778 |
+
uf = self.split_input_params["vqf"]
|
779 |
+
bs, nc, h, w = z.shape
|
780 |
+
if ks[0] > h or ks[1] > w:
|
781 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
782 |
+
print("reducing Kernel")
|
783 |
+
|
784 |
+
if stride[0] > h or stride[1] > w:
|
785 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
786 |
+
print("reducing stride")
|
787 |
+
|
788 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
|
789 |
+
|
790 |
+
z = unfold(z) # (bn, nc * prod(**ks), L)
|
791 |
+
# 1. Reshape to img shape
|
792 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
793 |
+
|
794 |
+
# 2. apply model loop over last dim
|
795 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
796 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
|
797 |
+
force_not_quantize=predict_cids or force_not_quantize)
|
798 |
+
for i in range(z.shape[-1])]
|
799 |
+
else:
|
800 |
+
|
801 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
|
802 |
+
for i in range(z.shape[-1])]
|
803 |
+
|
804 |
+
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
|
805 |
+
o = o * weighting
|
806 |
+
# Reverse 1. reshape to img shape
|
807 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
808 |
+
# stitch crops together
|
809 |
+
decoded = fold(o)
|
810 |
+
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
811 |
+
return decoded
|
812 |
+
else:
|
813 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
814 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
815 |
+
else:
|
816 |
+
return self.first_stage_model.decode(z)
|
817 |
+
|
818 |
+
else:
|
819 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
820 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
821 |
+
else:
|
822 |
+
return self.first_stage_model.decode(z)
|
823 |
+
|
824 |
+
@torch.no_grad()
|
825 |
+
def encode_first_stage(self, x):
|
826 |
+
if hasattr(self, "split_input_params"):
|
827 |
+
if self.split_input_params["patch_distributed_vq"]:
|
828 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
829 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
830 |
+
df = self.split_input_params["vqf"]
|
831 |
+
self.split_input_params['original_image_size'] = x.shape[-2:]
|
832 |
+
bs, nc, h, w = x.shape
|
833 |
+
if ks[0] > h or ks[1] > w:
|
834 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
835 |
+
print("reducing Kernel")
|
836 |
+
|
837 |
+
if stride[0] > h or stride[1] > w:
|
838 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
839 |
+
print("reducing stride")
|
840 |
+
|
841 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
|
842 |
+
z = unfold(x) # (bn, nc * prod(**ks), L)
|
843 |
+
# Reshape to img shape
|
844 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
845 |
+
|
846 |
+
output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
|
847 |
+
for i in range(z.shape[-1])]
|
848 |
+
|
849 |
+
o = torch.stack(output_list, axis=-1)
|
850 |
+
o = o * weighting
|
851 |
+
|
852 |
+
# Reverse reshape to img shape
|
853 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
854 |
+
# stitch crops together
|
855 |
+
decoded = fold(o)
|
856 |
+
decoded = decoded / normalization
|
857 |
+
return decoded
|
858 |
+
|
859 |
+
else:
|
860 |
+
return self.first_stage_model.encode(x)
|
861 |
+
else:
|
862 |
+
return self.first_stage_model.encode(x)
|
863 |
+
|
864 |
+
def shared_step(self, batch, **kwargs):
|
865 |
+
x, c = self.get_input(batch, self.first_stage_key)
|
866 |
+
loss = self(x, c)
|
867 |
+
return loss
|
868 |
+
|
869 |
+
def forward(self, x, c, *args, **kwargs):
|
870 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
871 |
+
if self.model.conditioning_key is not None:
|
872 |
+
assert c is not None
|
873 |
+
if self.cond_stage_trainable:
|
874 |
+
c = self.get_learned_conditioning(c)
|
875 |
+
if self.shorten_cond_schedule: # TODO: drop this option
|
876 |
+
tc = self.cond_ids[t].to(self.device)
|
877 |
+
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
|
878 |
+
return self.p_losses(x, c, t, *args, **kwargs)
|
879 |
+
|
880 |
+
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
881 |
+
|
882 |
+
if isinstance(cond, dict):
|
883 |
+
# hybrid case, cond is exptected to be a dict
|
884 |
+
pass
|
885 |
+
else:
|
886 |
+
if not isinstance(cond, list):
|
887 |
+
cond = [cond]
|
888 |
+
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
|
889 |
+
cond = {key: cond}
|
890 |
+
|
891 |
+
if hasattr(self, "split_input_params"):
|
892 |
+
assert len(cond) == 1 # todo can only deal with one conditioning atm
|
893 |
+
assert not return_ids
|
894 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
895 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
896 |
+
|
897 |
+
h, w = x_noisy.shape[-2:]
|
898 |
+
|
899 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
|
900 |
+
|
901 |
+
z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
|
902 |
+
# Reshape to img shape
|
903 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
904 |
+
z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
|
905 |
+
|
906 |
+
if self.cond_stage_key in ["image", "LR_image", "segmentation",
|
907 |
+
'bbox_img'] and self.model.conditioning_key: # todo check for completeness
|
908 |
+
c_key = next(iter(cond.keys())) # get key
|
909 |
+
c = next(iter(cond.values())) # get value
|
910 |
+
assert (len(c) == 1) # todo extend to list with more than one elem
|
911 |
+
c = c[0] # get element
|
912 |
+
|
913 |
+
c = unfold(c)
|
914 |
+
c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
915 |
+
|
916 |
+
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
|
917 |
+
|
918 |
+
elif self.cond_stage_key == 'coordinates_bbox':
|
919 |
+
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
|
920 |
+
|
921 |
+
# assuming padding of unfold is always 0 and its dilation is always 1
|
922 |
+
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
|
923 |
+
full_img_h, full_img_w = self.split_input_params['original_image_size']
|
924 |
+
# as we are operating on latents, we need the factor from the original image size to the
|
925 |
+
# spatial latent size to properly rescale the crops for regenerating the bbox annotations
|
926 |
+
num_downs = self.first_stage_model.encoder.num_resolutions - 1
|
927 |
+
rescale_latent = 2 ** (num_downs)
|
928 |
+
|
929 |
+
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
|
930 |
+
# need to rescale the tl patch coordinates to be in between (0,1)
|
931 |
+
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
|
932 |
+
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
|
933 |
+
for patch_nr in range(z.shape[-1])]
|
934 |
+
|
935 |
+
# patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
|
936 |
+
patch_limits = [(x_tl, y_tl,
|
937 |
+
rescale_latent * ks[0] / full_img_w,
|
938 |
+
rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
|
939 |
+
# patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
|
940 |
+
|
941 |
+
# tokenize crop coordinates for the bounding boxes of the respective patches
|
942 |
+
patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
|
943 |
+
for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
|
944 |
+
print(patch_limits_tknzd[0].shape)
|
945 |
+
# cut tknzd crop position from conditioning
|
946 |
+
assert isinstance(cond, dict), 'cond must be dict to be fed into model'
|
947 |
+
cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
|
948 |
+
print(cut_cond.shape)
|
949 |
+
|
950 |
+
adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
|
951 |
+
adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
|
952 |
+
print(adapted_cond.shape)
|
953 |
+
adapted_cond = self.get_learned_conditioning(adapted_cond)
|
954 |
+
print(adapted_cond.shape)
|
955 |
+
adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
|
956 |
+
print(adapted_cond.shape)
|
957 |
+
|
958 |
+
cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
|
959 |
+
|
960 |
+
else:
|
961 |
+
cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
|
962 |
+
|
963 |
+
# apply model by loop over crops
|
964 |
+
output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
|
965 |
+
assert not isinstance(output_list[0],
|
966 |
+
tuple) # todo cant deal with multiple model outputs check this never happens
|
967 |
+
|
968 |
+
o = torch.stack(output_list, axis=-1)
|
969 |
+
o = o * weighting
|
970 |
+
# Reverse reshape to img shape
|
971 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
972 |
+
# stitch crops together
|
973 |
+
x_recon = fold(o) / normalization
|
974 |
+
|
975 |
+
else:
|
976 |
+
x_recon = self.model(x_noisy, t, **cond)
|
977 |
+
|
978 |
+
if isinstance(x_recon, tuple) and not return_ids:
|
979 |
+
return x_recon[0]
|
980 |
+
else:
|
981 |
+
return x_recon
|
982 |
+
|
983 |
+
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
984 |
+
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
|
985 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
986 |
+
|
987 |
+
def _prior_bpd(self, x_start):
|
988 |
+
"""
|
989 |
+
Get the prior KL term for the variational lower-bound, measured in
|
990 |
+
bits-per-dim.
|
991 |
+
This term can't be optimized, as it only depends on the encoder.
|
992 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
993 |
+
:return: a batch of [N] KL values (in bits), one per batch element.
|
994 |
+
"""
|
995 |
+
batch_size = x_start.shape[0]
|
996 |
+
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
|
997 |
+
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
998 |
+
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
|
999 |
+
return mean_flat(kl_prior) / np.log(2.0)
|
1000 |
+
|
1001 |
+
def p_losses(self, x_start, cond, t, noise=None):
|
1002 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
1003 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
1004 |
+
model_output = self.apply_model(x_noisy, t, cond)
|
1005 |
+
|
1006 |
+
loss_dict = {}
|
1007 |
+
prefix = 'train' if self.training else 'val'
|
1008 |
+
|
1009 |
+
if self.parameterization == "x0":
|
1010 |
+
target = x_start
|
1011 |
+
elif self.parameterization == "eps":
|
1012 |
+
target = noise
|
1013 |
+
else:
|
1014 |
+
raise NotImplementedError()
|
1015 |
+
|
1016 |
+
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
|
1017 |
+
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
|
1018 |
+
|
1019 |
+
logvar_t = self.logvar[t].to(self.device)
|
1020 |
+
loss = loss_simple / torch.exp(logvar_t) + logvar_t
|
1021 |
+
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
|
1022 |
+
if self.learn_logvar:
|
1023 |
+
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
|
1024 |
+
loss_dict.update({'logvar': self.logvar.data.mean()})
|
1025 |
+
|
1026 |
+
loss = self.l_simple_weight * loss.mean()
|
1027 |
+
|
1028 |
+
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
|
1029 |
+
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
|
1030 |
+
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
|
1031 |
+
loss += (self.original_elbo_weight * loss_vlb)
|
1032 |
+
loss_dict.update({f'{prefix}/loss': loss})
|
1033 |
+
|
1034 |
+
return loss, loss_dict
|
1035 |
+
|
1036 |
+
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
|
1037 |
+
return_x0=False, score_corrector=None, corrector_kwargs=None):
|
1038 |
+
t_in = t
|
1039 |
+
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
|
1040 |
+
|
1041 |
+
if score_corrector is not None:
|
1042 |
+
assert self.parameterization == "eps"
|
1043 |
+
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
|
1044 |
+
|
1045 |
+
if return_codebook_ids:
|
1046 |
+
model_out, logits = model_out
|
1047 |
+
|
1048 |
+
if self.parameterization == "eps":
|
1049 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
1050 |
+
elif self.parameterization == "x0":
|
1051 |
+
x_recon = model_out
|
1052 |
+
else:
|
1053 |
+
raise NotImplementedError()
|
1054 |
+
|
1055 |
+
if clip_denoised:
|
1056 |
+
x_recon.clamp_(-1., 1.)
|
1057 |
+
if quantize_denoised:
|
1058 |
+
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
|
1059 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
1060 |
+
if return_codebook_ids:
|
1061 |
+
return model_mean, posterior_variance, posterior_log_variance, logits
|
1062 |
+
elif return_x0:
|
1063 |
+
return model_mean, posterior_variance, posterior_log_variance, x_recon
|
1064 |
+
else:
|
1065 |
+
return model_mean, posterior_variance, posterior_log_variance
|
1066 |
+
|
1067 |
+
@torch.no_grad()
|
1068 |
+
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
|
1069 |
+
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
|
1070 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
|
1071 |
+
b, *_, device = *x.shape, x.device
|
1072 |
+
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
|
1073 |
+
return_codebook_ids=return_codebook_ids,
|
1074 |
+
quantize_denoised=quantize_denoised,
|
1075 |
+
return_x0=return_x0,
|
1076 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
1077 |
+
if return_codebook_ids:
|
1078 |
+
raise DeprecationWarning("Support dropped.")
|
1079 |
+
model_mean, _, model_log_variance, logits = outputs
|
1080 |
+
elif return_x0:
|
1081 |
+
model_mean, _, model_log_variance, x0 = outputs
|
1082 |
+
else:
|
1083 |
+
model_mean, _, model_log_variance = outputs
|
1084 |
+
|
1085 |
+
noise = noise_like(x.shape, device, repeat_noise) * temperature
|
1086 |
+
if noise_dropout > 0.:
|
1087 |
+
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
1088 |
+
# no noise when t == 0
|
1089 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
1090 |
+
|
1091 |
+
if return_codebook_ids:
|
1092 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
|
1093 |
+
if return_x0:
|
1094 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
|
1095 |
+
else:
|
1096 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
1097 |
+
|
1098 |
+
@torch.no_grad()
|
1099 |
+
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
|
1100 |
+
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
|
1101 |
+
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
|
1102 |
+
log_every_t=None):
|
1103 |
+
if not log_every_t:
|
1104 |
+
log_every_t = self.log_every_t
|
1105 |
+
timesteps = self.num_timesteps
|
1106 |
+
if batch_size is not None:
|
1107 |
+
b = batch_size if batch_size is not None else shape[0]
|
1108 |
+
shape = [batch_size] + list(shape)
|
1109 |
+
else:
|
1110 |
+
b = batch_size = shape[0]
|
1111 |
+
if x_T is None:
|
1112 |
+
img = torch.randn(shape, device=self.device)
|
1113 |
+
else:
|
1114 |
+
img = x_T
|
1115 |
+
intermediates = []
|
1116 |
+
if cond is not None:
|
1117 |
+
if isinstance(cond, dict):
|
1118 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
1119 |
+
[x[:batch_size] for x in cond[key]] for key in cond}
|
1120 |
+
else:
|
1121 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
1122 |
+
|
1123 |
+
if start_T is not None:
|
1124 |
+
timesteps = min(timesteps, start_T)
|
1125 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
|
1126 |
+
total=timesteps) if verbose else reversed(
|
1127 |
+
range(0, timesteps))
|
1128 |
+
if type(temperature) == float:
|
1129 |
+
temperature = [temperature] * timesteps
|
1130 |
+
|
1131 |
+
for i in iterator:
|
1132 |
+
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
|
1133 |
+
if self.shorten_cond_schedule:
|
1134 |
+
assert self.model.conditioning_key != 'hybrid'
|
1135 |
+
tc = self.cond_ids[ts].to(cond.device)
|
1136 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
1137 |
+
|
1138 |
+
img, x0_partial = self.p_sample(img, cond, ts,
|
1139 |
+
clip_denoised=self.clip_denoised,
|
1140 |
+
quantize_denoised=quantize_denoised, return_x0=True,
|
1141 |
+
temperature=temperature[i], noise_dropout=noise_dropout,
|
1142 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
1143 |
+
if mask is not None:
|
1144 |
+
assert x0 is not None
|
1145 |
+
img_orig = self.q_sample(x0, ts)
|
1146 |
+
img = img_orig * mask + (1. - mask) * img
|
1147 |
+
|
1148 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
1149 |
+
intermediates.append(x0_partial)
|
1150 |
+
if callback:
|
1151 |
+
callback(i)
|
1152 |
+
if img_callback:
|
1153 |
+
img_callback(img, i)
|
1154 |
+
return img, intermediates
|
1155 |
+
|
1156 |
+
@torch.no_grad()
|
1157 |
+
def p_sample_loop(self, cond, shape, return_intermediates=False,
|
1158 |
+
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
|
1159 |
+
mask=None, x0=None, img_callback=None, start_T=None,
|
1160 |
+
log_every_t=None):
|
1161 |
+
|
1162 |
+
if not log_every_t:
|
1163 |
+
log_every_t = self.log_every_t
|
1164 |
+
device = self.betas.device
|
1165 |
+
b = shape[0]
|
1166 |
+
if x_T is None:
|
1167 |
+
img = torch.randn(shape, device=device)
|
1168 |
+
else:
|
1169 |
+
img = x_T
|
1170 |
+
|
1171 |
+
intermediates = [img]
|
1172 |
+
if timesteps is None:
|
1173 |
+
timesteps = self.num_timesteps
|
1174 |
+
|
1175 |
+
if start_T is not None:
|
1176 |
+
timesteps = min(timesteps, start_T)
|
1177 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
|
1178 |
+
range(0, timesteps))
|
1179 |
+
|
1180 |
+
if mask is not None:
|
1181 |
+
assert x0 is not None
|
1182 |
+
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
|
1183 |
+
|
1184 |
+
for i in iterator:
|
1185 |
+
ts = torch.full((b,), i, device=device, dtype=torch.long)
|
1186 |
+
if self.shorten_cond_schedule:
|
1187 |
+
assert self.model.conditioning_key != 'hybrid'
|
1188 |
+
tc = self.cond_ids[ts].to(cond.device)
|
1189 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
1190 |
+
|
1191 |
+
img = self.p_sample(img, cond, ts,
|
1192 |
+
clip_denoised=self.clip_denoised,
|
1193 |
+
quantize_denoised=quantize_denoised)
|
1194 |
+
if mask is not None:
|
1195 |
+
img_orig = self.q_sample(x0, ts)
|
1196 |
+
img = img_orig * mask + (1. - mask) * img
|
1197 |
+
|
1198 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
1199 |
+
intermediates.append(img)
|
1200 |
+
if callback:
|
1201 |
+
callback(i)
|
1202 |
+
if img_callback:
|
1203 |
+
img_callback(img, i)
|
1204 |
+
|
1205 |
+
if return_intermediates:
|
1206 |
+
return img, intermediates
|
1207 |
+
return img
|
1208 |
+
|
1209 |
+
@torch.no_grad()
|
1210 |
+
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
|
1211 |
+
verbose=True, timesteps=None, quantize_denoised=False,
|
1212 |
+
mask=None, x0=None, shape=None,**kwargs):
|
1213 |
+
if shape is None:
|
1214 |
+
shape = (batch_size, self.channels, self.image_size, self.image_size)
|
1215 |
+
if cond is not None:
|
1216 |
+
if isinstance(cond, dict):
|
1217 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
1218 |
+
[x[:batch_size] for x in cond[key]] for key in cond}
|
1219 |
+
else:
|
1220 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
1221 |
+
return self.p_sample_loop(cond,
|
1222 |
+
shape,
|
1223 |
+
return_intermediates=return_intermediates, x_T=x_T,
|
1224 |
+
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
|
1225 |
+
mask=mask, x0=x0)
|
1226 |
+
|
1227 |
+
@torch.no_grad()
|
1228 |
+
def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
|
1229 |
+
|
1230 |
+
if ddim:
|
1231 |
+
ddim_sampler = DDIMSampler(self)
|
1232 |
+
shape = (self.channels, self.image_size, self.image_size)
|
1233 |
+
samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
|
1234 |
+
shape,cond,verbose=False,**kwargs)
|
1235 |
+
|
1236 |
+
else:
|
1237 |
+
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
|
1238 |
+
return_intermediates=True,**kwargs)
|
1239 |
+
|
1240 |
+
return samples, intermediates
|
1241 |
+
|
1242 |
+
|
1243 |
+
@torch.no_grad()
|
1244 |
+
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
|
1245 |
+
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
1246 |
+
plot_diffusion_rows=True, **kwargs):
|
1247 |
+
|
1248 |
+
use_ddim = ddim_steps is not None
|
1249 |
+
|
1250 |
+
log = {}
|
1251 |
+
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
|
1252 |
+
return_first_stage_outputs=True,
|
1253 |
+
force_c_encode=True,
|
1254 |
+
return_original_cond=True,
|
1255 |
+
bs=N)
|
1256 |
+
N = min(x.shape[0], N)
|
1257 |
+
n_row = min(x.shape[0], n_row)
|
1258 |
+
log["inputs"] = x
|
1259 |
+
log["reconstruction"] = xrec
|
1260 |
+
if self.model.conditioning_key is not None:
|
1261 |
+
if hasattr(self.cond_stage_model, "decode"):
|
1262 |
+
xc = self.cond_stage_model.decode(c)
|
1263 |
+
log["conditioning"] = xc
|
1264 |
+
elif self.cond_stage_key in ["caption"]:
|
1265 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
|
1266 |
+
log["conditioning"] = xc
|
1267 |
+
elif self.cond_stage_key == 'class_label':
|
1268 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
|
1269 |
+
log['conditioning'] = xc
|
1270 |
+
elif isimage(xc):
|
1271 |
+
log["conditioning"] = xc
|
1272 |
+
if ismap(xc):
|
1273 |
+
log["original_conditioning"] = self.to_rgb(xc)
|
1274 |
+
|
1275 |
+
if plot_diffusion_rows:
|
1276 |
+
# get diffusion row
|
1277 |
+
diffusion_row = []
|
1278 |
+
z_start = z[:n_row]
|
1279 |
+
for t in range(self.num_timesteps):
|
1280 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
1281 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
1282 |
+
t = t.to(self.device).long()
|
1283 |
+
noise = torch.randn_like(z_start)
|
1284 |
+
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
1285 |
+
diffusion_row.append(self.decode_first_stage(z_noisy))
|
1286 |
+
|
1287 |
+
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
1288 |
+
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
1289 |
+
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
1290 |
+
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
1291 |
+
log["diffusion_row"] = diffusion_grid
|
1292 |
+
|
1293 |
+
if sample:
|
1294 |
+
# get denoise row
|
1295 |
+
with self.ema_scope("Plotting"):
|
1296 |
+
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
|
1297 |
+
ddim_steps=ddim_steps,eta=ddim_eta)
|
1298 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
1299 |
+
x_samples = self.decode_first_stage(samples)
|
1300 |
+
log["samples"] = x_samples
|
1301 |
+
if plot_denoise_rows:
|
1302 |
+
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
1303 |
+
log["denoise_row"] = denoise_grid
|
1304 |
+
|
1305 |
+
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
|
1306 |
+
self.first_stage_model, IdentityFirstStage):
|
1307 |
+
# also display when quantizing x0 while sampling
|
1308 |
+
with self.ema_scope("Plotting Quantized Denoised"):
|
1309 |
+
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
|
1310 |
+
ddim_steps=ddim_steps,eta=ddim_eta,
|
1311 |
+
quantize_denoised=True)
|
1312 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
|
1313 |
+
# quantize_denoised=True)
|
1314 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
1315 |
+
log["samples_x0_quantized"] = x_samples
|
1316 |
+
|
1317 |
+
if inpaint:
|
1318 |
+
# make a simple center square
|
1319 |
+
h, w = z.shape[2], z.shape[3]
|
1320 |
+
mask = torch.ones(N, h, w).to(self.device)
|
1321 |
+
# zeros will be filled in
|
1322 |
+
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
|
1323 |
+
mask = mask[:, None, ...]
|
1324 |
+
with self.ema_scope("Plotting Inpaint"):
|
1325 |
+
|
1326 |
+
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
|
1327 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1328 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
1329 |
+
log["samples_inpainting"] = x_samples
|
1330 |
+
log["mask"] = mask
|
1331 |
+
|
1332 |
+
# outpaint
|
1333 |
+
with self.ema_scope("Plotting Outpaint"):
|
1334 |
+
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
|
1335 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1336 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
1337 |
+
log["samples_outpainting"] = x_samples
|
1338 |
+
|
1339 |
+
if plot_progressive_rows:
|
1340 |
+
with self.ema_scope("Plotting Progressives"):
|
1341 |
+
img, progressives = self.progressive_denoising(c,
|
1342 |
+
shape=(self.channels, self.image_size, self.image_size),
|
1343 |
+
batch_size=N)
|
1344 |
+
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
|
1345 |
+
log["progressive_row"] = prog_row
|
1346 |
+
|
1347 |
+
if return_keys:
|
1348 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
1349 |
+
return log
|
1350 |
+
else:
|
1351 |
+
return {key: log[key] for key in return_keys}
|
1352 |
+
return log
|
1353 |
+
|
1354 |
+
def configure_optimizers(self):
|
1355 |
+
lr = self.learning_rate
|
1356 |
+
params = list(self.model.parameters())
|
1357 |
+
if self.cond_stage_trainable:
|
1358 |
+
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
|
1359 |
+
params = params + list(self.cond_stage_model.parameters())
|
1360 |
+
if self.learn_logvar:
|
1361 |
+
print('Diffusion model optimizing logvar')
|
1362 |
+
params.append(self.logvar)
|
1363 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
1364 |
+
if self.use_scheduler:
|
1365 |
+
assert 'target' in self.scheduler_config
|
1366 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
1367 |
+
|
1368 |
+
print("Setting up LambdaLR scheduler...")
|
1369 |
+
scheduler = [
|
1370 |
+
{
|
1371 |
+
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
|
1372 |
+
'interval': 'step',
|
1373 |
+
'frequency': 1
|
1374 |
+
}]
|
1375 |
+
return [opt], scheduler
|
1376 |
+
return opt
|
1377 |
+
|
1378 |
+
@torch.no_grad()
|
1379 |
+
def to_rgb(self, x):
|
1380 |
+
x = x.float()
|
1381 |
+
if not hasattr(self, "colorize"):
|
1382 |
+
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
|
1383 |
+
x = nn.functional.conv2d(x, weight=self.colorize)
|
1384 |
+
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
|
1385 |
+
return x
|
1386 |
+
|
1387 |
+
|
1388 |
+
class DiffusionWrapperV1(pl.LightningModule):
|
1389 |
+
def __init__(self, diff_model_config, conditioning_key):
|
1390 |
+
super().__init__()
|
1391 |
+
self.diffusion_model = instantiate_from_config(diff_model_config)
|
1392 |
+
self.conditioning_key = conditioning_key
|
1393 |
+
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
|
1394 |
+
|
1395 |
+
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
|
1396 |
+
if self.conditioning_key is None:
|
1397 |
+
out = self.diffusion_model(x, t)
|
1398 |
+
elif self.conditioning_key == 'concat':
|
1399 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
1400 |
+
out = self.diffusion_model(xc, t)
|
1401 |
+
elif self.conditioning_key == 'crossattn':
|
1402 |
+
cc = torch.cat(c_crossattn, 1)
|
1403 |
+
out = self.diffusion_model(x, t, context=cc)
|
1404 |
+
elif self.conditioning_key == 'hybrid':
|
1405 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
1406 |
+
cc = torch.cat(c_crossattn, 1)
|
1407 |
+
out = self.diffusion_model(xc, t, context=cc)
|
1408 |
+
elif self.conditioning_key == 'adm':
|
1409 |
+
cc = c_crossattn[0]
|
1410 |
+
out = self.diffusion_model(x, t, y=cc)
|
1411 |
+
else:
|
1412 |
+
raise NotImplementedError()
|
1413 |
+
|
1414 |
+
return out
|
1415 |
+
|
1416 |
+
|
1417 |
+
class Layout2ImgDiffusionV1(LatentDiffusionV1):
|
1418 |
+
# TODO: move all layout-specific hacks to this class
|
1419 |
+
def __init__(self, cond_stage_key, *args, **kwargs):
|
1420 |
+
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
|
1421 |
+
super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
|
1422 |
+
|
1423 |
+
def log_images(self, batch, N=8, *args, **kwargs):
|
1424 |
+
logs = super().log_images(*args, batch=batch, N=N, **kwargs)
|
1425 |
+
|
1426 |
+
key = 'train' if self.training else 'validation'
|
1427 |
+
dset = self.trainer.datamodule.datasets[key]
|
1428 |
+
mapper = dset.conditional_builders[self.cond_stage_key]
|
1429 |
+
|
1430 |
+
bbox_imgs = []
|
1431 |
+
map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
|
1432 |
+
for tknzd_bbox in batch[self.cond_stage_key][:N]:
|
1433 |
+
bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
|
1434 |
+
bbox_imgs.append(bboximg)
|
1435 |
+
|
1436 |
+
cond_img = torch.stack(bbox_imgs, dim=0)
|
1437 |
+
logs['bbox_image'] = cond_img
|
1438 |
+
return logs
|
1439 |
+
|
1440 |
+
ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
|
1441 |
+
ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
|
1442 |
+
ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
|
1443 |
+
ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
|
extensions-builtin/LDSR/vqvae_quantize.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
|
2 |
+
# where the license is as follows:
|
3 |
+
#
|
4 |
+
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
|
5 |
+
#
|
6 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
+
# of this software and associated documentation files (the "Software"), to deal
|
8 |
+
# in the Software without restriction, including without limitation the rights
|
9 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
# copies of the Software, and to permit persons to whom the Software is
|
11 |
+
# furnished to do so, subject to the following conditions:
|
12 |
+
#
|
13 |
+
# The above copyright notice and this permission notice shall be included in all
|
14 |
+
# copies or substantial portions of the Software.
|
15 |
+
#
|
16 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
17 |
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
18 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
19 |
+
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
20 |
+
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
21 |
+
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
22 |
+
# OR OTHER DEALINGS IN THE SOFTWARE./
|
23 |
+
|
24 |
+
import torch
|
25 |
+
import torch.nn as nn
|
26 |
+
import numpy as np
|
27 |
+
from einops import rearrange
|
28 |
+
|
29 |
+
|
30 |
+
class VectorQuantizer2(nn.Module):
|
31 |
+
"""
|
32 |
+
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
|
33 |
+
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
|
34 |
+
"""
|
35 |
+
|
36 |
+
# NOTE: due to a bug the beta term was applied to the wrong term. for
|
37 |
+
# backwards compatibility we use the buggy version by default, but you can
|
38 |
+
# specify legacy=False to fix it.
|
39 |
+
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
|
40 |
+
sane_index_shape=False, legacy=True):
|
41 |
+
super().__init__()
|
42 |
+
self.n_e = n_e
|
43 |
+
self.e_dim = e_dim
|
44 |
+
self.beta = beta
|
45 |
+
self.legacy = legacy
|
46 |
+
|
47 |
+
self.embedding = nn.Embedding(self.n_e, self.e_dim)
|
48 |
+
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
|
49 |
+
|
50 |
+
self.remap = remap
|
51 |
+
if self.remap is not None:
|
52 |
+
self.register_buffer("used", torch.tensor(np.load(self.remap)))
|
53 |
+
self.re_embed = self.used.shape[0]
|
54 |
+
self.unknown_index = unknown_index # "random" or "extra" or integer
|
55 |
+
if self.unknown_index == "extra":
|
56 |
+
self.unknown_index = self.re_embed
|
57 |
+
self.re_embed = self.re_embed + 1
|
58 |
+
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
|
59 |
+
f"Using {self.unknown_index} for unknown indices.")
|
60 |
+
else:
|
61 |
+
self.re_embed = n_e
|
62 |
+
|
63 |
+
self.sane_index_shape = sane_index_shape
|
64 |
+
|
65 |
+
def remap_to_used(self, inds):
|
66 |
+
ishape = inds.shape
|
67 |
+
assert len(ishape) > 1
|
68 |
+
inds = inds.reshape(ishape[0], -1)
|
69 |
+
used = self.used.to(inds)
|
70 |
+
match = (inds[:, :, None] == used[None, None, ...]).long()
|
71 |
+
new = match.argmax(-1)
|
72 |
+
unknown = match.sum(2) < 1
|
73 |
+
if self.unknown_index == "random":
|
74 |
+
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
|
75 |
+
else:
|
76 |
+
new[unknown] = self.unknown_index
|
77 |
+
return new.reshape(ishape)
|
78 |
+
|
79 |
+
def unmap_to_all(self, inds):
|
80 |
+
ishape = inds.shape
|
81 |
+
assert len(ishape) > 1
|
82 |
+
inds = inds.reshape(ishape[0], -1)
|
83 |
+
used = self.used.to(inds)
|
84 |
+
if self.re_embed > self.used.shape[0]: # extra token
|
85 |
+
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
|
86 |
+
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
|
87 |
+
return back.reshape(ishape)
|
88 |
+
|
89 |
+
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
|
90 |
+
assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
|
91 |
+
assert rescale_logits is False, "Only for interface compatible with Gumbel"
|
92 |
+
assert return_logits is False, "Only for interface compatible with Gumbel"
|
93 |
+
# reshape z -> (batch, height, width, channel) and flatten
|
94 |
+
z = rearrange(z, 'b c h w -> b h w c').contiguous()
|
95 |
+
z_flattened = z.view(-1, self.e_dim)
|
96 |
+
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
|
97 |
+
|
98 |
+
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
|
99 |
+
torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
|
100 |
+
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
|
101 |
+
|
102 |
+
min_encoding_indices = torch.argmin(d, dim=1)
|
103 |
+
z_q = self.embedding(min_encoding_indices).view(z.shape)
|
104 |
+
perplexity = None
|
105 |
+
min_encodings = None
|
106 |
+
|
107 |
+
# compute loss for embedding
|
108 |
+
if not self.legacy:
|
109 |
+
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
|
110 |
+
torch.mean((z_q - z.detach()) ** 2)
|
111 |
+
else:
|
112 |
+
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
|
113 |
+
torch.mean((z_q - z.detach()) ** 2)
|
114 |
+
|
115 |
+
# preserve gradients
|
116 |
+
z_q = z + (z_q - z).detach()
|
117 |
+
|
118 |
+
# reshape back to match original input shape
|
119 |
+
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
|
120 |
+
|
121 |
+
if self.remap is not None:
|
122 |
+
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
|
123 |
+
min_encoding_indices = self.remap_to_used(min_encoding_indices)
|
124 |
+
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
|
125 |
+
|
126 |
+
if self.sane_index_shape:
|
127 |
+
min_encoding_indices = min_encoding_indices.reshape(
|
128 |
+
z_q.shape[0], z_q.shape[2], z_q.shape[3])
|
129 |
+
|
130 |
+
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
|
131 |
+
|
132 |
+
def get_codebook_entry(self, indices, shape):
|
133 |
+
# shape specifying (batch, height, width, channel)
|
134 |
+
if self.remap is not None:
|
135 |
+
indices = indices.reshape(shape[0], -1) # add batch axis
|
136 |
+
indices = self.unmap_to_all(indices)
|
137 |
+
indices = indices.reshape(-1) # flatten again
|
138 |
+
|
139 |
+
# get quantized latent vectors
|
140 |
+
z_q = self.embedding(indices)
|
141 |
+
|
142 |
+
if shape is not None:
|
143 |
+
z_q = z_q.view(shape)
|
144 |
+
# reshape back to match original input shape
|
145 |
+
z_q = z_q.permute(0, 3, 1, 2).contiguous()
|
146 |
+
|
147 |
+
return z_q
|
extensions-builtin/Lora/__pycache__/extra_networks_lora.cpython-310.pyc
ADDED
Binary file (2.69 kB). View file
|
|
extensions-builtin/Lora/__pycache__/lora.cpython-310.pyc
ADDED
Binary file (497 Bytes). View file
|
|
extensions-builtin/Lora/__pycache__/lora_patches.cpython-310.pyc
ADDED
Binary file (545 Bytes). View file
|
|
extensions-builtin/Lora/__pycache__/network.cpython-310.pyc
ADDED
Binary file (6.43 kB). View file
|
|
extensions-builtin/Lora/__pycache__/networks.cpython-310.pyc
ADDED
Binary file (6.55 kB). View file
|
|
extensions-builtin/Lora/__pycache__/preload.cpython-310.pyc
ADDED
Binary file (703 Bytes). View file
|
|
extensions-builtin/Lora/__pycache__/ui_edit_user_metadata.cpython-310.pyc
ADDED
Binary file (7.66 kB). View file
|
|
extensions-builtin/Lora/__pycache__/ui_extra_networks_lora.cpython-310.pyc
ADDED
Binary file (3.22 kB). View file
|
|
extensions-builtin/Lora/extra_networks_lora.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from modules import extra_networks, shared
|
2 |
+
import networks
|
3 |
+
|
4 |
+
|
5 |
+
class ExtraNetworkLora(extra_networks.ExtraNetwork):
|
6 |
+
def __init__(self):
|
7 |
+
super().__init__('lora')
|
8 |
+
|
9 |
+
self.errors = {}
|
10 |
+
"""mapping of network names to the number of errors the network had during operation"""
|
11 |
+
|
12 |
+
def activate(self, p, params_list):
|
13 |
+
additional = shared.opts.sd_lora
|
14 |
+
|
15 |
+
self.errors.clear()
|
16 |
+
|
17 |
+
if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
|
18 |
+
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
|
19 |
+
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
|
20 |
+
|
21 |
+
names = []
|
22 |
+
te_multipliers = []
|
23 |
+
unet_multipliers = []
|
24 |
+
dyn_dims = []
|
25 |
+
for params in params_list:
|
26 |
+
assert params.items
|
27 |
+
|
28 |
+
names.append(params.positional[0])
|
29 |
+
|
30 |
+
te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
|
31 |
+
te_multiplier = float(params.named.get("te", te_multiplier))
|
32 |
+
|
33 |
+
unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier
|
34 |
+
unet_multiplier = float(params.named.get("unet", unet_multiplier))
|
35 |
+
|
36 |
+
dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
|
37 |
+
dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
|
38 |
+
|
39 |
+
te_multipliers.append(te_multiplier)
|
40 |
+
unet_multipliers.append(unet_multiplier)
|
41 |
+
dyn_dims.append(dyn_dim)
|
42 |
+
|
43 |
+
networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
|
44 |
+
|
45 |
+
if shared.opts.lora_add_hashes_to_infotext:
|
46 |
+
network_hashes = []
|
47 |
+
for item in networks.loaded_networks:
|
48 |
+
shorthash = item.network_on_disk.shorthash
|
49 |
+
if not shorthash:
|
50 |
+
continue
|
51 |
+
|
52 |
+
alias = item.mentioned_name
|
53 |
+
if not alias:
|
54 |
+
continue
|
55 |
+
|
56 |
+
alias = alias.replace(":", "").replace(",", "")
|
57 |
+
|
58 |
+
network_hashes.append(f"{alias}: {shorthash}")
|
59 |
+
|
60 |
+
if network_hashes:
|
61 |
+
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
|
62 |
+
|
63 |
+
def deactivate(self, p):
|
64 |
+
if self.errors:
|
65 |
+
p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))
|
66 |
+
|
67 |
+
self.errors.clear()
|
extensions-builtin/Lora/lora.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import networks
|
2 |
+
|
3 |
+
list_available_loras = networks.list_available_networks
|
4 |
+
|
5 |
+
available_loras = networks.available_networks
|
6 |
+
available_lora_aliases = networks.available_network_aliases
|
7 |
+
available_lora_hash_lookup = networks.available_network_hash_lookup
|
8 |
+
forbidden_lora_aliases = networks.forbidden_network_aliases
|
9 |
+
loaded_loras = networks.loaded_networks
|
extensions-builtin/Lora/lora_logger.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import copy
|
3 |
+
import logging
|
4 |
+
|
5 |
+
|
6 |
+
class ColoredFormatter(logging.Formatter):
|
7 |
+
COLORS = {
|
8 |
+
"DEBUG": "\033[0;36m", # CYAN
|
9 |
+
"INFO": "\033[0;32m", # GREEN
|
10 |
+
"WARNING": "\033[0;33m", # YELLOW
|
11 |
+
"ERROR": "\033[0;31m", # RED
|
12 |
+
"CRITICAL": "\033[0;37;41m", # WHITE ON RED
|
13 |
+
"RESET": "\033[0m", # RESET COLOR
|
14 |
+
}
|
15 |
+
|
16 |
+
def format(self, record):
|
17 |
+
colored_record = copy.copy(record)
|
18 |
+
levelname = colored_record.levelname
|
19 |
+
seq = self.COLORS.get(levelname, self.COLORS["RESET"])
|
20 |
+
colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
|
21 |
+
return super().format(colored_record)
|
22 |
+
|
23 |
+
|
24 |
+
logger = logging.getLogger("lora")
|
25 |
+
logger.propagate = False
|
26 |
+
|
27 |
+
|
28 |
+
if not logger.handlers:
|
29 |
+
handler = logging.StreamHandler(sys.stdout)
|
30 |
+
handler.setFormatter(
|
31 |
+
ColoredFormatter("[%(name)s]-%(levelname)s: %(message)s")
|
32 |
+
)
|
33 |
+
logger.addHandler(handler)
|
extensions-builtin/Lora/lora_patches.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class LoraPatches:
|
2 |
+
def __init__(self):
|
3 |
+
pass
|
4 |
+
|
5 |
+
def undo(self):
|
6 |
+
pass
|
extensions-builtin/Lora/network.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import os
|
3 |
+
from collections import namedtuple
|
4 |
+
import enum
|
5 |
+
|
6 |
+
import torch.nn as nn
|
7 |
+
import torch.nn.functional as F
|
8 |
+
|
9 |
+
from modules import sd_models, cache, errors, hashes, shared
|
10 |
+
|
11 |
+
NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
|
12 |
+
|
13 |
+
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
|
14 |
+
|
15 |
+
|
16 |
+
class SdVersion(enum.Enum):
|
17 |
+
Unknown = 1
|
18 |
+
SD1 = 2
|
19 |
+
SD2 = 3
|
20 |
+
SDXL = 4
|
21 |
+
|
22 |
+
|
23 |
+
class NetworkOnDisk:
|
24 |
+
def __init__(self, name, filename):
|
25 |
+
self.name = name
|
26 |
+
self.filename = filename
|
27 |
+
self.metadata = {}
|
28 |
+
self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
|
29 |
+
|
30 |
+
def read_metadata():
|
31 |
+
metadata = sd_models.read_metadata_from_safetensors(filename)
|
32 |
+
metadata.pop('ssmd_cover_images', None) # those are cover images, and they are too big to display in UI as text
|
33 |
+
|
34 |
+
return metadata
|
35 |
+
|
36 |
+
if self.is_safetensors:
|
37 |
+
try:
|
38 |
+
self.metadata = cache.cached_data_for_file('safetensors-metadata', "lora/" + self.name, filename, read_metadata)
|
39 |
+
except Exception as e:
|
40 |
+
errors.display(e, f"reading lora {filename}")
|
41 |
+
|
42 |
+
if self.metadata:
|
43 |
+
m = {}
|
44 |
+
for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)):
|
45 |
+
m[k] = v
|
46 |
+
|
47 |
+
self.metadata = m
|
48 |
+
|
49 |
+
self.alias = self.metadata.get('ss_output_name', self.name)
|
50 |
+
|
51 |
+
self.hash = None
|
52 |
+
self.shorthash = None
|
53 |
+
self.set_hash(
|
54 |
+
self.metadata.get('sshs_model_hash') or
|
55 |
+
hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or
|
56 |
+
''
|
57 |
+
)
|
58 |
+
|
59 |
+
self.sd_version = self.detect_version()
|
60 |
+
|
61 |
+
def detect_version(self):
|
62 |
+
if str(self.metadata.get('ss_base_model_version', "")).startswith("sdxl_"):
|
63 |
+
return SdVersion.SDXL
|
64 |
+
elif str(self.metadata.get('ss_v2', "")) == "True":
|
65 |
+
return SdVersion.SD2
|
66 |
+
elif len(self.metadata):
|
67 |
+
return SdVersion.SD1
|
68 |
+
|
69 |
+
return SdVersion.Unknown
|
70 |
+
|
71 |
+
def set_hash(self, v):
|
72 |
+
self.hash = v
|
73 |
+
self.shorthash = self.hash[0:12]
|
74 |
+
|
75 |
+
if self.shorthash:
|
76 |
+
import networks
|
77 |
+
networks.available_network_hash_lookup[self.shorthash] = self
|
78 |
+
|
79 |
+
def read_hash(self):
|
80 |
+
if not self.hash:
|
81 |
+
self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '')
|
82 |
+
|
83 |
+
def get_alias(self):
|
84 |
+
import networks
|
85 |
+
if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in networks.forbidden_network_aliases:
|
86 |
+
return self.name
|
87 |
+
else:
|
88 |
+
return self.alias
|
89 |
+
|
90 |
+
|
91 |
+
class Network: # LoraModule
|
92 |
+
def __init__(self, name, network_on_disk: NetworkOnDisk):
|
93 |
+
self.name = name
|
94 |
+
self.network_on_disk = network_on_disk
|
95 |
+
self.te_multiplier = 1.0
|
96 |
+
self.unet_multiplier = 1.0
|
97 |
+
self.dyn_dim = None
|
98 |
+
self.modules = {}
|
99 |
+
self.bundle_embeddings = {}
|
100 |
+
self.mtime = None
|
101 |
+
|
102 |
+
self.mentioned_name = None
|
103 |
+
"""the text that was used to add the network to prompt - can be either name or an alias"""
|
104 |
+
|
105 |
+
|
106 |
+
class ModuleType:
|
107 |
+
def create_module(self, net: Network, weights: NetworkWeights) -> Network | None:
|
108 |
+
return None
|
109 |
+
|
110 |
+
|
111 |
+
class NetworkModule:
|
112 |
+
def __init__(self, net: Network, weights: NetworkWeights):
|
113 |
+
self.network = net
|
114 |
+
self.network_key = weights.network_key
|
115 |
+
self.sd_key = weights.sd_key
|
116 |
+
self.sd_module = weights.sd_module
|
117 |
+
|
118 |
+
if hasattr(self.sd_module, 'weight'):
|
119 |
+
self.shape = self.sd_module.weight.shape
|
120 |
+
|
121 |
+
self.ops = None
|
122 |
+
self.extra_kwargs = {}
|
123 |
+
if isinstance(self.sd_module, nn.Conv2d):
|
124 |
+
self.ops = F.conv2d
|
125 |
+
self.extra_kwargs = {
|
126 |
+
'stride': self.sd_module.stride,
|
127 |
+
'padding': self.sd_module.padding
|
128 |
+
}
|
129 |
+
elif isinstance(self.sd_module, nn.Linear):
|
130 |
+
self.ops = F.linear
|
131 |
+
elif isinstance(self.sd_module, nn.LayerNorm):
|
132 |
+
self.ops = F.layer_norm
|
133 |
+
self.extra_kwargs = {
|
134 |
+
'normalized_shape': self.sd_module.normalized_shape,
|
135 |
+
'eps': self.sd_module.eps
|
136 |
+
}
|
137 |
+
elif isinstance(self.sd_module, nn.GroupNorm):
|
138 |
+
self.ops = F.group_norm
|
139 |
+
self.extra_kwargs = {
|
140 |
+
'num_groups': self.sd_module.num_groups,
|
141 |
+
'eps': self.sd_module.eps
|
142 |
+
}
|
143 |
+
|
144 |
+
self.dim = None
|
145 |
+
self.bias = weights.w.get("bias")
|
146 |
+
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
|
147 |
+
self.scale = weights.w["scale"].item() if "scale" in weights.w else None
|
148 |
+
|
149 |
+
def multiplier(self):
|
150 |
+
if 'transformer' in self.sd_key[:20]:
|
151 |
+
return self.network.te_multiplier
|
152 |
+
else:
|
153 |
+
return self.network.unet_multiplier
|
154 |
+
|
155 |
+
def calc_scale(self):
|
156 |
+
if self.scale is not None:
|
157 |
+
return self.scale
|
158 |
+
if self.dim is not None and self.alpha is not None:
|
159 |
+
return self.alpha / self.dim
|
160 |
+
|
161 |
+
return 1.0
|
162 |
+
|
163 |
+
def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
|
164 |
+
if self.bias is not None:
|
165 |
+
updown = updown.reshape(self.bias.shape)
|
166 |
+
updown += self.bias.to(orig_weight.device, dtype=updown.dtype)
|
167 |
+
updown = updown.reshape(output_shape)
|
168 |
+
|
169 |
+
if len(output_shape) == 4:
|
170 |
+
updown = updown.reshape(output_shape)
|
171 |
+
|
172 |
+
if orig_weight.size().numel() == updown.size().numel():
|
173 |
+
updown = updown.reshape(orig_weight.shape)
|
174 |
+
|
175 |
+
if ex_bias is not None:
|
176 |
+
ex_bias = ex_bias * self.multiplier()
|
177 |
+
|
178 |
+
return updown * self.calc_scale() * self.multiplier(), ex_bias
|
179 |
+
|
180 |
+
def calc_updown(self, target):
|
181 |
+
raise NotImplementedError()
|
182 |
+
|
183 |
+
def forward(self, x, y):
|
184 |
+
"""A general forward implementation for all modules"""
|
185 |
+
if self.ops is None:
|
186 |
+
raise NotImplementedError()
|
187 |
+
else:
|
188 |
+
updown, ex_bias = self.calc_updown(self.sd_module.weight)
|
189 |
+
return y + self.ops(x, weight=updown, bias=ex_bias, **self.extra_kwargs)
|
190 |
+
|
extensions-builtin/Lora/networks.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
|
4 |
+
import lora_patches
|
5 |
+
import functools
|
6 |
+
import network
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from typing import Union
|
10 |
+
|
11 |
+
from modules import shared, sd_models, errors, scripts
|
12 |
+
from ldm_patched.modules.utils import load_torch_file
|
13 |
+
from ldm_patched.modules.sd import load_lora_for_models
|
14 |
+
|
15 |
+
|
16 |
+
@functools.lru_cache(maxsize=5)
|
17 |
+
def load_lora_state_dict(filename):
|
18 |
+
return load_torch_file(filename, safe_load=True)
|
19 |
+
|
20 |
+
|
21 |
+
def convert_diffusers_name_to_compvis(key, is_sd2):
|
22 |
+
pass
|
23 |
+
|
24 |
+
|
25 |
+
def assign_network_names_to_compvis_modules(sd_model):
|
26 |
+
pass
|
27 |
+
|
28 |
+
|
29 |
+
def load_network(name, network_on_disk):
|
30 |
+
net = network.Network(name, network_on_disk)
|
31 |
+
net.mtime = os.path.getmtime(network_on_disk.filename)
|
32 |
+
|
33 |
+
return net
|
34 |
+
|
35 |
+
|
36 |
+
def purge_networks_from_memory():
|
37 |
+
pass
|
38 |
+
|
39 |
+
|
40 |
+
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
|
41 |
+
global lora_state_dict_cache
|
42 |
+
|
43 |
+
current_sd = sd_models.model_data.get_sd_model()
|
44 |
+
if current_sd is None:
|
45 |
+
return
|
46 |
+
|
47 |
+
loaded_networks.clear()
|
48 |
+
|
49 |
+
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
|
50 |
+
if any(x is None for x in networks_on_disk):
|
51 |
+
list_available_networks()
|
52 |
+
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
|
53 |
+
|
54 |
+
for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)):
|
55 |
+
try:
|
56 |
+
net = load_network(name, network_on_disk)
|
57 |
+
except Exception as e:
|
58 |
+
errors.display(e, f"loading network {network_on_disk.filename}")
|
59 |
+
continue
|
60 |
+
net.mentioned_name = name
|
61 |
+
network_on_disk.read_hash()
|
62 |
+
loaded_networks.append(net)
|
63 |
+
|
64 |
+
compiled_lora_targets = []
|
65 |
+
for a, b, c in zip(networks_on_disk, unet_multipliers, te_multipliers):
|
66 |
+
compiled_lora_targets.append([a.filename, b, c])
|
67 |
+
|
68 |
+
compiled_lora_targets_hash = str(compiled_lora_targets)
|
69 |
+
|
70 |
+
if current_sd.current_lora_hash == compiled_lora_targets_hash:
|
71 |
+
return
|
72 |
+
|
73 |
+
current_sd.current_lora_hash = compiled_lora_targets_hash
|
74 |
+
current_sd.forge_objects.unet = current_sd.forge_objects_original.unet
|
75 |
+
current_sd.forge_objects.clip = current_sd.forge_objects_original.clip
|
76 |
+
|
77 |
+
for filename, strength_model, strength_clip in compiled_lora_targets:
|
78 |
+
lora_sd = load_lora_state_dict(filename)
|
79 |
+
current_sd.forge_objects.unet, current_sd.forge_objects.clip = load_lora_for_models(
|
80 |
+
current_sd.forge_objects.unet, current_sd.forge_objects.clip, lora_sd, strength_model, strength_clip,
|
81 |
+
filename=filename)
|
82 |
+
|
83 |
+
current_sd.forge_objects_after_applying_lora = current_sd.forge_objects.shallow_copy()
|
84 |
+
return
|
85 |
+
|
86 |
+
|
87 |
+
def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
|
88 |
+
pass
|
89 |
+
|
90 |
+
|
91 |
+
def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
|
92 |
+
pass
|
93 |
+
|
94 |
+
|
95 |
+
def network_forward(org_module, input, original_forward):
|
96 |
+
pass
|
97 |
+
|
98 |
+
|
99 |
+
def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
|
100 |
+
pass
|
101 |
+
|
102 |
+
|
103 |
+
def network_Linear_forward(self, input):
|
104 |
+
pass
|
105 |
+
|
106 |
+
|
107 |
+
def network_Linear_load_state_dict(self, *args, **kwargs):
|
108 |
+
pass
|
109 |
+
|
110 |
+
|
111 |
+
def network_Conv2d_forward(self, input):
|
112 |
+
pass
|
113 |
+
|
114 |
+
|
115 |
+
def network_Conv2d_load_state_dict(self, *args, **kwargs):
|
116 |
+
pass
|
117 |
+
|
118 |
+
|
119 |
+
def network_GroupNorm_forward(self, input):
|
120 |
+
pass
|
121 |
+
|
122 |
+
|
123 |
+
def network_GroupNorm_load_state_dict(self, *args, **kwargs):
|
124 |
+
pass
|
125 |
+
|
126 |
+
|
127 |
+
def network_LayerNorm_forward(self, input):
|
128 |
+
pass
|
129 |
+
|
130 |
+
|
131 |
+
def network_LayerNorm_load_state_dict(self, *args, **kwargs):
|
132 |
+
pass
|
133 |
+
|
134 |
+
|
135 |
+
def network_MultiheadAttention_forward(self, *args, **kwargs):
|
136 |
+
pass
|
137 |
+
|
138 |
+
|
139 |
+
def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
|
140 |
+
pass
|
141 |
+
|
142 |
+
|
143 |
+
def list_available_networks():
|
144 |
+
available_networks.clear()
|
145 |
+
available_network_aliases.clear()
|
146 |
+
forbidden_network_aliases.clear()
|
147 |
+
available_network_hash_lookup.clear()
|
148 |
+
forbidden_network_aliases.update({"none": 1, "Addams": 1})
|
149 |
+
|
150 |
+
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
|
151 |
+
|
152 |
+
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
153 |
+
for filename in candidates:
|
154 |
+
if os.path.isdir(filename):
|
155 |
+
continue
|
156 |
+
|
157 |
+
name = os.path.splitext(os.path.basename(filename))[0]
|
158 |
+
try:
|
159 |
+
entry = network.NetworkOnDisk(name, filename)
|
160 |
+
except OSError: # should catch FileNotFoundError and PermissionError etc.
|
161 |
+
errors.report(f"Failed to load network {name} from {filename}", exc_info=True)
|
162 |
+
continue
|
163 |
+
|
164 |
+
available_networks[name] = entry
|
165 |
+
|
166 |
+
if entry.alias in available_network_aliases:
|
167 |
+
forbidden_network_aliases[entry.alias.lower()] = 1
|
168 |
+
|
169 |
+
available_network_aliases[name] = entry
|
170 |
+
available_network_aliases[entry.alias] = entry
|
171 |
+
|
172 |
+
|
173 |
+
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
|
174 |
+
|
175 |
+
|
176 |
+
def infotext_pasted(infotext, params):
|
177 |
+
if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:
|
178 |
+
return # if the other extension is active, it will handle those fields, no need to do anything
|
179 |
+
|
180 |
+
added = []
|
181 |
+
|
182 |
+
for k in params:
|
183 |
+
if not k.startswith("AddNet Model "):
|
184 |
+
continue
|
185 |
+
|
186 |
+
num = k[13:]
|
187 |
+
|
188 |
+
if params.get("AddNet Module " + num) != "LoRA":
|
189 |
+
continue
|
190 |
+
|
191 |
+
name = params.get("AddNet Model " + num)
|
192 |
+
if name is None:
|
193 |
+
continue
|
194 |
+
|
195 |
+
m = re_network_name.match(name)
|
196 |
+
if m:
|
197 |
+
name = m.group(1)
|
198 |
+
|
199 |
+
multiplier = params.get("AddNet Weight A " + num, "1.0")
|
200 |
+
|
201 |
+
added.append(f"<lora:{name}:{multiplier}>")
|
202 |
+
|
203 |
+
if added:
|
204 |
+
params["Prompt"] += "\n" + "".join(added)
|
205 |
+
|
206 |
+
|
207 |
+
originals: lora_patches.LoraPatches = None
|
208 |
+
|
209 |
+
extra_network_lora = None
|
210 |
+
|
211 |
+
available_networks = {}
|
212 |
+
available_network_aliases = {}
|
213 |
+
loaded_networks = []
|
214 |
+
loaded_bundle_embeddings = {}
|
215 |
+
networks_in_memory = {}
|
216 |
+
available_network_hash_lookup = {}
|
217 |
+
forbidden_network_aliases = {}
|
218 |
+
|
219 |
+
list_available_networks()
|
extensions-builtin/Lora/preload.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import paths
|
3 |
+
from modules.paths_internal import normalized_filepath
|
4 |
+
|
5 |
+
|
6 |
+
def preload(parser):
|
7 |
+
parser.add_argument("--lora-dir", type=normalized_filepath, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
|
8 |
+
parser.add_argument("--lyco-dir-backcompat", type=normalized_filepath, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS'))
|
extensions-builtin/Lora/scripts/__pycache__/lora_script.cpython-310.pyc
ADDED
Binary file (4.54 kB). View file
|
|
extensions-builtin/Lora/scripts/lora_script.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
from fastapi import FastAPI
|
5 |
+
|
6 |
+
import network
|
7 |
+
import networks
|
8 |
+
import lora # noqa:F401
|
9 |
+
import lora_patches
|
10 |
+
import extra_networks_lora
|
11 |
+
import ui_extra_networks_lora
|
12 |
+
from modules import script_callbacks, ui_extra_networks, extra_networks, shared
|
13 |
+
|
14 |
+
|
15 |
+
def unload():
|
16 |
+
networks.originals.undo()
|
17 |
+
|
18 |
+
|
19 |
+
def before_ui():
|
20 |
+
ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
|
21 |
+
|
22 |
+
networks.extra_network_lora = extra_networks_lora.ExtraNetworkLora()
|
23 |
+
extra_networks.register_extra_network(networks.extra_network_lora)
|
24 |
+
|
25 |
+
|
26 |
+
networks.originals = lora_patches.LoraPatches()
|
27 |
+
|
28 |
+
script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules)
|
29 |
+
script_callbacks.on_script_unloaded(unload)
|
30 |
+
script_callbacks.on_before_ui(before_ui)
|
31 |
+
script_callbacks.on_infotext_pasted(networks.infotext_pasted)
|
32 |
+
|
33 |
+
|
34 |
+
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
|
35 |
+
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
|
36 |
+
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
|
37 |
+
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
|
38 |
+
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
|
39 |
+
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
|
40 |
+
"lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
|
41 |
+
"lora_not_found_warning_console": shared.OptionInfo(False, "Lora not found warning in console"),
|
42 |
+
"lora_not_found_gradio_warning": shared.OptionInfo(False, "Lora not found warning popup in webui"),
|
43 |
+
}))
|
44 |
+
|
45 |
+
|
46 |
+
shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
|
47 |
+
"lora_functional": shared.OptionInfo(False, "Lora/Networks: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
|
48 |
+
}))
|
49 |
+
|
50 |
+
|
51 |
+
def create_lora_json(obj: network.NetworkOnDisk):
|
52 |
+
return {
|
53 |
+
"name": obj.name,
|
54 |
+
"alias": obj.alias,
|
55 |
+
"path": obj.filename,
|
56 |
+
"metadata": obj.metadata,
|
57 |
+
}
|
58 |
+
|
59 |
+
|
60 |
+
def api_networks(_: gr.Blocks, app: FastAPI):
|
61 |
+
@app.get("/sdapi/v1/loras")
|
62 |
+
async def get_loras():
|
63 |
+
return [create_lora_json(obj) for obj in networks.available_networks.values()]
|
64 |
+
|
65 |
+
@app.post("/sdapi/v1/refresh-loras")
|
66 |
+
async def refresh_loras():
|
67 |
+
return networks.list_available_networks()
|
68 |
+
|
69 |
+
|
70 |
+
script_callbacks.on_app_started(api_networks)
|
71 |
+
|
72 |
+
re_lora = re.compile("<lora:([^:]+):")
|
73 |
+
|
74 |
+
|
75 |
+
def infotext_pasted(infotext, d):
|
76 |
+
hashes = d.get("Lora hashes")
|
77 |
+
if not hashes:
|
78 |
+
return
|
79 |
+
|
80 |
+
hashes = [x.strip().split(':', 1) for x in hashes.split(",")]
|
81 |
+
hashes = {x[0].strip().replace(",", ""): x[1].strip() for x in hashes}
|
82 |
+
|
83 |
+
def network_replacement(m):
|
84 |
+
alias = m.group(1)
|
85 |
+
shorthash = hashes.get(alias)
|
86 |
+
if shorthash is None:
|
87 |
+
return m.group(0)
|
88 |
+
|
89 |
+
network_on_disk = networks.available_network_hash_lookup.get(shorthash)
|
90 |
+
if network_on_disk is None:
|
91 |
+
return m.group(0)
|
92 |
+
|
93 |
+
return f'<lora:{network_on_disk.get_alias()}:'
|
94 |
+
|
95 |
+
d["Prompt"] = re.sub(re_lora, network_replacement, d["Prompt"])
|
96 |
+
|
97 |
+
|
98 |
+
script_callbacks.on_infotext_pasted(infotext_pasted)
|
99 |
+
|
100 |
+
shared.opts.onchange("lora_in_memory_limit", networks.purge_networks_from_memory)
|