Spaces:
Runtime error
Runtime error
Add openpose model
Browse files- app_base.py +10 -0
- assets/README.md +1 -0
- assets/people.jpg +3 -0
- model.py +30 -4
app_base.py
CHANGED
|
@@ -122,6 +122,16 @@ def create_demo(model: Model) -> gr.Blocks:
|
|
| 122 |
0,
|
| 123 |
True,
|
| 124 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
]
|
| 126 |
|
| 127 |
with gr.Blocks() as demo:
|
|
|
|
| 122 |
0,
|
| 123 |
True,
|
| 124 |
],
|
| 125 |
+
[
|
| 126 |
+
"assets/people.jpg",
|
| 127 |
+
"A couple, 4k photo, highly detailed",
|
| 128 |
+
"openpose",
|
| 129 |
+
"Photographic",
|
| 130 |
+
5.0,
|
| 131 |
+
1.0,
|
| 132 |
+
0,
|
| 133 |
+
True,
|
| 134 |
+
],
|
| 135 |
]
|
| 136 |
|
| 137 |
with gr.Blocks() as demo:
|
assets/README.md
CHANGED
|
@@ -5,3 +5,4 @@ These images were from the following URL:
|
|
| 5 |
- https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_lin.jpg
|
| 6 |
- https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_mid.jpg
|
| 7 |
- https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_zeo.jpg
|
|
|
|
|
|
| 5 |
- https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_lin.jpg
|
| 6 |
- https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_mid.jpg
|
| 7 |
- https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_zeo.jpg
|
| 8 |
+
- https://huggingface.co/Adapter/t2iadapter/resolve/main/people.jpg
|
assets/people.jpg
ADDED
|
Git LFS Details
|
model.py
CHANGED
|
@@ -2,12 +2,14 @@ import gc
|
|
| 2 |
import os
|
| 3 |
from abc import ABC, abstractmethod
|
| 4 |
|
|
|
|
| 5 |
import PIL.Image
|
| 6 |
import torch
|
| 7 |
from controlnet_aux import (
|
| 8 |
CannyDetector,
|
| 9 |
LineartDetector,
|
| 10 |
MidasDetector,
|
|
|
|
| 11 |
PidiNetDetector,
|
| 12 |
ZoeDetector,
|
| 13 |
)
|
|
@@ -83,6 +85,7 @@ ADAPTER_REPO_IDS = {
|
|
| 83 |
"lineart": "TencentARC/t2i-adapter-lineart-sdxl-1.0",
|
| 84 |
"depth-midas": "TencentARC/t2i-adapter-depth-midas-sdxl-1.0",
|
| 85 |
"depth-zoe": "TencentARC/t2i-adapter-depth-zoe-sdxl-1.0",
|
|
|
|
| 86 |
# "recolor": "TencentARC/t2i-adapter-recolor-sdxl-1.0",
|
| 87 |
}
|
| 88 |
ADAPTER_NAMES = list(ADAPTER_REPO_IDS.keys())
|
|
@@ -114,7 +117,8 @@ class LineartPreprocessor(Preprocessor):
|
|
| 114 |
self.model = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
| 115 |
|
| 116 |
def to(self, device: torch.device | str) -> Preprocessor:
|
| 117 |
-
|
|
|
|
| 118 |
|
| 119 |
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
| 120 |
return self.model(image, detect_resolution=384, image_resolution=1024)
|
|
@@ -127,18 +131,35 @@ class MidasPreprocessor(Preprocessor):
|
|
| 127 |
)
|
| 128 |
|
| 129 |
def to(self, device: torch.device | str) -> Preprocessor:
|
| 130 |
-
|
|
|
|
| 131 |
|
| 132 |
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
| 133 |
return self.model(image, detect_resolution=512, image_resolution=1024)
|
| 134 |
|
| 135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
class PidiNetPreprocessor(Preprocessor):
|
| 137 |
def __init__(self):
|
| 138 |
self.model = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
| 139 |
|
| 140 |
def to(self, device: torch.device | str) -> Preprocessor:
|
| 141 |
-
|
|
|
|
| 142 |
|
| 143 |
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
| 144 |
return self.model(image, detect_resolution=512, image_resolution=1024, apply_filter=True)
|
|
@@ -159,7 +180,8 @@ class ZoePreprocessor(Preprocessor):
|
|
| 159 |
)
|
| 160 |
|
| 161 |
def to(self, device: torch.device | str) -> Preprocessor:
|
| 162 |
-
|
|
|
|
| 163 |
|
| 164 |
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
| 165 |
return self.model(image, gamma_corrected=True, image_resolution=1024)
|
|
@@ -175,6 +197,7 @@ if PRELOAD_PREPROCESSORS_IN_GPU_MEMORY:
|
|
| 175 |
"lineart": LineartPreprocessor().to(device),
|
| 176 |
"depth-midas": MidasPreprocessor().to(device),
|
| 177 |
"depth-zoe": ZoePreprocessor().to(device),
|
|
|
|
| 178 |
"recolor": RecolorPreprocessor().to(device),
|
| 179 |
}
|
| 180 |
|
|
@@ -188,6 +211,7 @@ elif PRELOAD_PREPROCESSORS_IN_CPU_MEMORY:
|
|
| 188 |
"lineart": LineartPreprocessor(),
|
| 189 |
"depth-midas": MidasPreprocessor(),
|
| 190 |
"depth-zoe": ZoePreprocessor(),
|
|
|
|
| 191 |
"recolor": RecolorPreprocessor(),
|
| 192 |
}
|
| 193 |
|
|
@@ -207,6 +231,8 @@ else:
|
|
| 207 |
return MidasPreprocessor()
|
| 208 |
elif adapter_name == "depth-zoe":
|
| 209 |
return ZoePreprocessor()
|
|
|
|
|
|
|
| 210 |
elif adapter_name == "recolor":
|
| 211 |
return RecolorPreprocessor()
|
| 212 |
else:
|
|
|
|
| 2 |
import os
|
| 3 |
from abc import ABC, abstractmethod
|
| 4 |
|
| 5 |
+
import numpy as np
|
| 6 |
import PIL.Image
|
| 7 |
import torch
|
| 8 |
from controlnet_aux import (
|
| 9 |
CannyDetector,
|
| 10 |
LineartDetector,
|
| 11 |
MidasDetector,
|
| 12 |
+
OpenposeDetector,
|
| 13 |
PidiNetDetector,
|
| 14 |
ZoeDetector,
|
| 15 |
)
|
|
|
|
| 85 |
"lineart": "TencentARC/t2i-adapter-lineart-sdxl-1.0",
|
| 86 |
"depth-midas": "TencentARC/t2i-adapter-depth-midas-sdxl-1.0",
|
| 87 |
"depth-zoe": "TencentARC/t2i-adapter-depth-zoe-sdxl-1.0",
|
| 88 |
+
"openpose": "TencentARC/t2i-adapter-openpose-sdxl-1.0",
|
| 89 |
# "recolor": "TencentARC/t2i-adapter-recolor-sdxl-1.0",
|
| 90 |
}
|
| 91 |
ADAPTER_NAMES = list(ADAPTER_REPO_IDS.keys())
|
|
|
|
| 117 |
self.model = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
| 118 |
|
| 119 |
def to(self, device: torch.device | str) -> Preprocessor:
|
| 120 |
+
self.model.to(device)
|
| 121 |
+
return self
|
| 122 |
|
| 123 |
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
| 124 |
return self.model(image, detect_resolution=384, image_resolution=1024)
|
|
|
|
| 131 |
)
|
| 132 |
|
| 133 |
def to(self, device: torch.device | str) -> Preprocessor:
|
| 134 |
+
self.model.to(device)
|
| 135 |
+
return self
|
| 136 |
|
| 137 |
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
| 138 |
return self.model(image, detect_resolution=512, image_resolution=1024)
|
| 139 |
|
| 140 |
|
| 141 |
+
class OpenposePreprocessor(Preprocessor):
|
| 142 |
+
def __init__(self):
|
| 143 |
+
self.model = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
| 144 |
+
|
| 145 |
+
def to(self, device: torch.device | str) -> Preprocessor:
|
| 146 |
+
self.model.to(device)
|
| 147 |
+
return self
|
| 148 |
+
|
| 149 |
+
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
| 150 |
+
out = self.model(image, detect_resolution=512, image_resolution=1024)
|
| 151 |
+
out = np.array(out)[:, :, ::-1]
|
| 152 |
+
out = PIL.Image.fromarray(np.uint8(out))
|
| 153 |
+
return out
|
| 154 |
+
|
| 155 |
+
|
| 156 |
class PidiNetPreprocessor(Preprocessor):
|
| 157 |
def __init__(self):
|
| 158 |
self.model = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
| 159 |
|
| 160 |
def to(self, device: torch.device | str) -> Preprocessor:
|
| 161 |
+
self.model.to(device)
|
| 162 |
+
return self
|
| 163 |
|
| 164 |
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
| 165 |
return self.model(image, detect_resolution=512, image_resolution=1024, apply_filter=True)
|
|
|
|
| 180 |
)
|
| 181 |
|
| 182 |
def to(self, device: torch.device | str) -> Preprocessor:
|
| 183 |
+
self.model.to(device)
|
| 184 |
+
return self
|
| 185 |
|
| 186 |
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
| 187 |
return self.model(image, gamma_corrected=True, image_resolution=1024)
|
|
|
|
| 197 |
"lineart": LineartPreprocessor().to(device),
|
| 198 |
"depth-midas": MidasPreprocessor().to(device),
|
| 199 |
"depth-zoe": ZoePreprocessor().to(device),
|
| 200 |
+
"openpose": OpenposePreprocessor().to(device),
|
| 201 |
"recolor": RecolorPreprocessor().to(device),
|
| 202 |
}
|
| 203 |
|
|
|
|
| 211 |
"lineart": LineartPreprocessor(),
|
| 212 |
"depth-midas": MidasPreprocessor(),
|
| 213 |
"depth-zoe": ZoePreprocessor(),
|
| 214 |
+
"openpose": OpenposePreprocessor(),
|
| 215 |
"recolor": RecolorPreprocessor(),
|
| 216 |
}
|
| 217 |
|
|
|
|
| 231 |
return MidasPreprocessor()
|
| 232 |
elif adapter_name == "depth-zoe":
|
| 233 |
return ZoePreprocessor()
|
| 234 |
+
elif adapter_name == "openpose":
|
| 235 |
+
return OpenposePreprocessor()
|
| 236 |
elif adapter_name == "recolor":
|
| 237 |
return RecolorPreprocessor()
|
| 238 |
else:
|