Spaces:
Running
on
Zero
Running
on
Zero
Upload 3 files
Browse files- dc.py +1 -0
- requirements.txt +6 -7
dc.py
CHANGED
|
@@ -186,6 +186,7 @@ class GuiSD:
|
|
| 186 |
dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
|
| 187 |
|
| 188 |
if not os.path.exists(model_name):
|
|
|
|
| 189 |
_ = download_diffuser_repo(
|
| 190 |
repo_name=model_name,
|
| 191 |
model_type=model_type,
|
|
|
|
| 186 |
dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
|
| 187 |
|
| 188 |
if not os.path.exists(model_name):
|
| 189 |
+
print("debug", model_name, vae_model, task, controlnet_model)
|
| 190 |
_ = download_diffuser_repo(
|
| 191 |
repo_name=model_name,
|
| 192 |
model_type=model_type,
|
requirements.txt
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
stablepy==0.6.
|
| 2 |
-
diffusers==0.31.0
|
| 3 |
-
transformers==4.47.1
|
| 4 |
accelerate
|
| 5 |
invisible_watermark
|
| 6 |
datasets
|
|
@@ -8,10 +8,9 @@ torch==2.4.0
|
|
| 8 |
numpy<2
|
| 9 |
gdown
|
| 10 |
opencv-python
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
huggingface_hub==0.29.3
|
| 15 |
scikit-build-core
|
| 16 |
https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.4-cu124/llama_cpp_python-0.3.4-cp310-cp310-linux_x86_64.whl
|
| 17 |
git+https://github.com/Maximilian-Winter/llama-cpp-agent
|
|
|
|
| 1 |
+
stablepy==0.6.2
|
| 2 |
+
#diffusers==0.31.0
|
| 3 |
+
#transformers==4.47.1
|
| 4 |
accelerate
|
| 5 |
invisible_watermark
|
| 6 |
datasets
|
|
|
|
| 8 |
numpy<2
|
| 9 |
gdown
|
| 10 |
opencv-python
|
| 11 |
+
huggingface_hub
|
| 12 |
+
hf_xet
|
| 13 |
+
hf_transfer
|
|
|
|
| 14 |
scikit-build-core
|
| 15 |
https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.4-cu124/llama_cpp_python-0.3.4-cp310-cp310-linux_x86_64.whl
|
| 16 |
git+https://github.com/Maximilian-Winter/llama-cpp-agent
|