Spaces:
Running
on
Zero
Running
on
Zero
init
Browse files
app.py
CHANGED
|
@@ -199,7 +199,9 @@ class ControlNetDepthDesignModelMulti:
|
|
| 199 |
weight_name="ip-adapter_sd15.bin")
|
| 200 |
self.pipe.set_ip_adapter_scale(0.4)
|
| 201 |
self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
|
|
|
|
| 202 |
self.pipe = self.pipe.to(device)
|
|
|
|
| 203 |
self.guide_pipe = StableDiffusionXLPipeline.from_pretrained("segmind/SSD-1B",
|
| 204 |
torch_dtype=dtype, use_safetensors=True, variant="fp16")
|
| 205 |
self.guide_pipe = self.guide_pipe.to(device)
|
|
@@ -213,6 +215,17 @@ class ControlNetDepthDesignModelMulti:
|
|
| 213 |
self.depth_feature_extractor, self.depth_estimator = get_depth_pipeline()
|
| 214 |
self.depth_estimator = self.depth_estimator.to(device)
|
| 215 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
@spaces.GPU
|
| 217 |
def generate_design(self, empty_room_image: Image, prompt: str, guidance_scale: int = 10, num_steps: int = 50, strength: float =0.9, img_size: int = 640) -> Image:
|
| 218 |
"""
|
|
@@ -226,16 +239,6 @@ class ControlNetDepthDesignModelMulti:
|
|
| 226 |
If the size is not the same the submission will fail.
|
| 227 |
"""
|
| 228 |
print(prompt)
|
| 229 |
-
if torch.cuda.is_available():
|
| 230 |
-
# Print the number of available GPUs
|
| 231 |
-
print("Available GPU devices:")
|
| 232 |
-
for i in range(torch.cuda.device_count()):
|
| 233 |
-
print(f"Device {i}: {torch.cuda.get_device_name(i)}")
|
| 234 |
-
else:
|
| 235 |
-
print("No GPU devices available. Using CPU.")
|
| 236 |
-
|
| 237 |
-
print(self.depth_estimator.device)
|
| 238 |
-
print(self.pipe.device)
|
| 239 |
flush()
|
| 240 |
self.generator = torch.Generator(device=device).manual_seed(self.seed)
|
| 241 |
|
|
|
|
| 199 |
weight_name="ip-adapter_sd15.bin")
|
| 200 |
self.pipe.set_ip_adapter_scale(0.4)
|
| 201 |
self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
|
| 202 |
+
print(self.pipe.device)
|
| 203 |
self.pipe = self.pipe.to(device)
|
| 204 |
+
print(self.pipe.device)
|
| 205 |
self.guide_pipe = StableDiffusionXLPipeline.from_pretrained("segmind/SSD-1B",
|
| 206 |
torch_dtype=dtype, use_safetensors=True, variant="fp16")
|
| 207 |
self.guide_pipe = self.guide_pipe.to(device)
|
|
|
|
| 215 |
self.depth_feature_extractor, self.depth_estimator = get_depth_pipeline()
|
| 216 |
self.depth_estimator = self.depth_estimator.to(device)
|
| 217 |
|
| 218 |
+
if torch.cuda.is_available():
|
| 219 |
+
# Print the number of available GPUs
|
| 220 |
+
print("Available GPU devices:")
|
| 221 |
+
for i in range(torch.cuda.device_count()):
|
| 222 |
+
print(f"Device {i}: {torch.cuda.get_device_name(i)}")
|
| 223 |
+
else:
|
| 224 |
+
print("No GPU devices available. Using CPU.")
|
| 225 |
+
|
| 226 |
+
print(self.depth_estimator.device)
|
| 227 |
+
print(self.pipe.device)
|
| 228 |
+
|
| 229 |
@spaces.GPU
|
| 230 |
def generate_design(self, empty_room_image: Image, prompt: str, guidance_scale: int = 10, num_steps: int = 50, strength: float =0.9, img_size: int = 640) -> Image:
|
| 231 |
"""
|
|
|
|
| 239 |
If the size is not the same the submission will fail.
|
| 240 |
"""
|
| 241 |
print(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
flush()
|
| 243 |
self.generator = torch.Generator(device=device).manual_seed(self.seed)
|
| 244 |
|