Spaces:
Paused
Paused
Commit
·
1893705
1
Parent(s):
db52967
Install required modules
Browse files- __init__.py +1 -0
- app.py +12 -1
- modules/layers.py +1 -0
- requirements.txt +23 -8
__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from . import modules
|
app.py
CHANGED
@@ -4,6 +4,11 @@ from PIL import Image
|
|
4 |
import os
|
5 |
import gradio as gr
|
6 |
from huggingface_hub import hf_hub_download, snapshot_download
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
def resolve_hf_path(path):
|
9 |
if isinstance(path, str) and path.startswith("hf://"):
|
@@ -44,6 +49,7 @@ from inference.registry import INFERENCES
|
|
44 |
|
45 |
config_path = os.path.join("config", "ace_plus_fft.yaml")
|
46 |
cfg = Config(load=True, cfg_file=config_path)
|
|
|
47 |
|
48 |
# Instantiate the ACEInference object.
|
49 |
ace_infer = ACEInference(cfg)
|
@@ -93,17 +99,22 @@ def face_swap_app(target_img, face_img):
|
|
93 |
face_img = face_img.convert("RGB")
|
94 |
|
95 |
edit_mask = create_face_mask(face_img)
|
|
|
96 |
|
97 |
output_img, edit_image, change_image, mask, seed = ace_infer(
|
98 |
reference_image=target_img,
|
99 |
edit_image=face_img,
|
100 |
edit_mask=edit_mask,
|
101 |
-
prompt="
|
102 |
output_height=1024,
|
103 |
output_width=1024,
|
104 |
sampler='flow_euler',
|
105 |
sample_steps=28,
|
106 |
guide_scale=50,
|
|
|
|
|
|
|
|
|
107 |
seed=-1
|
108 |
)
|
109 |
return output_img
|
|
|
4 |
import os
|
5 |
import gradio as gr
|
6 |
from huggingface_hub import hf_hub_download, snapshot_download
|
7 |
+
import shlex
|
8 |
+
import subprocess
|
9 |
+
|
10 |
+
subprocess.run(shlex.split('pip install flash-attn --no-build-isolation'),
|
11 |
+
env=os.environ | {'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"})
|
12 |
|
13 |
def resolve_hf_path(path):
|
14 |
if isinstance(path, str) and path.startswith("hf://"):
|
|
|
49 |
|
50 |
config_path = os.path.join("config", "ace_plus_fft.yaml")
|
51 |
cfg = Config(load=True, cfg_file=config_path)
|
52 |
+
model_cfg = Config(load=True, cfg_file="config/ace_plus_fft.yaml")
|
53 |
|
54 |
# Instantiate the ACEInference object.
|
55 |
ace_infer = ACEInference(cfg)
|
|
|
99 |
face_img = face_img.convert("RGB")
|
100 |
|
101 |
edit_mask = create_face_mask(face_img)
|
102 |
+
print(edit_mask)
|
103 |
|
104 |
output_img, edit_image, change_image, mask, seed = ace_infer(
|
105 |
reference_image=target_img,
|
106 |
edit_image=face_img,
|
107 |
edit_mask=edit_mask,
|
108 |
+
prompt="{image}, the person faces the camera.",
|
109 |
output_height=1024,
|
110 |
output_width=1024,
|
111 |
sampler='flow_euler',
|
112 |
sample_steps=28,
|
113 |
guide_scale=50,
|
114 |
+
repainting_scale=1.0,
|
115 |
+
use_change=True,
|
116 |
+
keep_pixels=True,
|
117 |
+
keep_pixels_rate=0.8,
|
118 |
seed=-1
|
119 |
)
|
120 |
return output_img
|
modules/layers.py
CHANGED
@@ -18,6 +18,7 @@ try:
|
|
18 |
)
|
19 |
FLASHATTN_IS_AVAILABLE = True
|
20 |
except ImportError:
|
|
|
21 |
FLASHATTN_IS_AVAILABLE = False
|
22 |
flash_attn_varlen_func = None
|
23 |
|
|
|
18 |
)
|
19 |
FLASHATTN_IS_AVAILABLE = True
|
20 |
except ImportError:
|
21 |
+
print("Importing error!")
|
22 |
FLASHATTN_IS_AVAILABLE = False
|
23 |
flash_attn_varlen_func = None
|
24 |
|
requirements.txt
CHANGED
@@ -1,10 +1,25 @@
|
|
|
|
1 |
diffusers
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
torchvision
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface_hub
|
2 |
diffusers
|
3 |
+
transformers==4.49.0
|
4 |
+
torch==2.4.0
|
5 |
+
xformers==0.0.27.post2
|
6 |
torchvision
|
7 |
+
gradio==4.44.1
|
8 |
+
opencv-python==4.10.0.84
|
9 |
+
albumentations
|
10 |
+
beautifulsoup4
|
11 |
+
bitsandbytes
|
12 |
+
einops
|
13 |
+
imagehash
|
14 |
+
modelscope[framework]
|
15 |
+
ms-swift
|
16 |
+
open-clip-torch
|
17 |
+
opencv-transforms>=0.0.6
|
18 |
+
oss2>=2.15.0
|
19 |
+
pycocotools
|
20 |
+
scikit-image
|
21 |
+
scikit-learn
|
22 |
+
sentencepiece
|
23 |
+
tiktoken
|
24 |
+
torchsde
|
25 |
+
transformers-stream-generator
|