Merge branch 'main' of https://huggingface.co/spaces/akhaliq/BlendGAN into main
Browse files- 000000.png +0 -0
- 000001.png +0 -0
- 100001.png +0 -0
- app.py +17 -26
- packages.txt +4 -0
- psp_encoder/psp_encoders.py +1 -1
- requirements.txt +2 -1
- style_transfer_folder.py +23 -31
000000.png
ADDED
![]() |
000001.png
ADDED
![]() |
100001.png
ADDED
![]() |
app.py
CHANGED
@@ -1,29 +1,20 @@
|
|
1 |
import os
|
2 |
-
os.system("git clone https://github.com/onion-liu/BlendGAN.git")
|
3 |
-
os.system("gdown https://drive.google.com/uc?id=1eF04jKMLAb9DvzI72m8Akn5ykWf3EafE")
|
4 |
-
os.system("gdown https://drive.google.com/uc?id=14nevG94hNkkwaoK5eJLF1iv78cv5O8fN")
|
5 |
-
from PIL import Image
|
6 |
-
import torch
|
7 |
import gradio as gr
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
)
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
size=512, device="cuda",side_by_side=False
|
19 |
-
)
|
20 |
-
def inference(img, ver):
|
21 |
-
os.system("""python style_transfer_folder.py --size 1024 --ckpt ./pretrained_models/blendgan.pt --psp_encoder_ckpt ./pretrained_models/psp_encoder.pt --style_img_path /content/BlendGAN/style/ --input_img_path /content/BlendGAN/input/ --outdir results/style_transfer/""")
|
22 |
-
return out
|
23 |
|
24 |
-
title = "
|
25 |
-
description = "Gradio Demo for
|
26 |
-
article = "<p style='text-align: center'><a href='https://
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
1 |
import os
|
|
|
|
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
os.system("wget https://www.dropbox.com/s/fgupbov77x4rrru/blendgan.pt")
|
6 |
+
os.system("wget https://www.dropbox.com/s/v8q0dd3r4u20659/psp_encoder.pt")
|
7 |
+
|
8 |
+
def inference(content, style, index):
|
9 |
+
content.save('content.png')
|
10 |
+
style.save('style.png')
|
11 |
+
os.system("""python style_transfer_folder.py --size 1024 --add_weight_index """+str(int(index))+""" --ckpt ./blendgan.pt --psp_encoder_ckpt ./psp_encoder.pt --style_img_path style.png --input_img_path content.png""")
|
12 |
+
return "out.jpg"
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
title = "BlendGAN"
|
15 |
+
description = "Gradio Demo for BlendGAN: Implicitly GAN Blending for Arbitrary Stylized Face Generation. To use it, simply upload your images, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below."
|
16 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2110.11728' target='_blank'>BlendGAN: Implicitly GAN Blending for Arbitrary Stylized Face Generation</a> | <a href='https://github.com/onion-liu/BlendGAN' target='_blank'>Github Repo</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/6346064/142623312-3e6f09aa-ce88-465c-b956-a8b4db95b4da.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/6346064/142621044-086cde48-8604-467b-8c43-8768b6670ec2.gif' alt='animation'/></p>"
|
17 |
+
|
18 |
+
examples=[['000000.png','100001.png',6]]
|
19 |
+
gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Image(type="pil"),gr.inputs.Slider(minimum=1, maximum=30, step=1, default=6, label="Weight Index")
|
20 |
+
], gr.outputs.Image(type="file"),title=title,description=description,article=article,enable_queue=True,examples=examples,allow_flagging=False).launch()
|
packages.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
wget
|
2 |
+
ffmpeg
|
3 |
+
libsm6
|
4 |
+
libxext6
|
psp_encoder/psp_encoders.py
CHANGED
@@ -141,7 +141,7 @@ class PSPEncoder(Module):
|
|
141 |
print('Loading psp encoders weights from irse50!')
|
142 |
encoder_ckpt = torch.load(encoder_ckpt_path, map_location='cpu')
|
143 |
self.encoder.load_state_dict(get_keys(encoder_ckpt, 'encoder'), strict=True)
|
144 |
-
self.latent_avg = encoder_ckpt['latent_avg']
|
145 |
|
146 |
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
|
147 |
|
|
|
141 |
print('Loading psp encoders weights from irse50!')
|
142 |
encoder_ckpt = torch.load(encoder_ckpt_path, map_location='cpu')
|
143 |
self.encoder.load_state_dict(get_keys(encoder_ckpt, 'encoder'), strict=True)
|
144 |
+
self.latent_avg = encoder_ckpt['latent_avg']
|
145 |
|
146 |
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
|
147 |
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
torch
|
2 |
numpy
|
3 |
opencv-python-headless
|
4 |
-
|
|
|
|
1 |
torch
|
2 |
numpy
|
3 |
opencv-python-headless
|
4 |
+
torchvision
|
5 |
+
Pillow
|
style_transfer_folder.py
CHANGED
@@ -20,7 +20,7 @@ torch.cuda.manual_seed_all(seed)
|
|
20 |
|
21 |
|
22 |
if __name__ == '__main__':
|
23 |
-
device = '
|
24 |
|
25 |
parser = argparse.ArgumentParser()
|
26 |
|
@@ -38,9 +38,7 @@ if __name__ == '__main__':
|
|
38 |
|
39 |
args = parser.parse_args()
|
40 |
|
41 |
-
|
42 |
-
if not os.path.exists(outdir):
|
43 |
-
os.makedirs(outdir, exist_ok=True)
|
44 |
|
45 |
args.latent = 512
|
46 |
args.n_mlp = 8
|
@@ -58,35 +56,29 @@ if __name__ == '__main__':
|
|
58 |
psp_encoder = PSPEncoder(args.psp_encoder_ckpt, output_size=args.size).to(device)
|
59 |
psp_encoder.eval()
|
60 |
|
61 |
-
input_img_paths = sorted(glob.glob(os.path.join(args.input_img_path, '*.*')))
|
62 |
-
style_img_paths = sorted(glob.glob(os.path.join(args.style_img_path, '*.*')))[:]
|
63 |
-
|
64 |
num = 0
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
out = np.concatenate([img_in, img_style, img_out], axis=1)
|
88 |
-
# out = img_out
|
89 |
-
cv2.imwrite(f'{args.outdir}/{name_in}_v_{name_style}.jpg', out)
|
90 |
|
91 |
print('Done!')
|
92 |
|
|
|
20 |
|
21 |
|
22 |
if __name__ == '__main__':
|
23 |
+
device = 'cpu'
|
24 |
|
25 |
parser = argparse.ArgumentParser()
|
26 |
|
|
|
38 |
|
39 |
args = parser.parse_args()
|
40 |
|
41 |
+
|
|
|
|
|
42 |
|
43 |
args.latent = 512
|
44 |
args.n_mlp = 8
|
|
|
56 |
psp_encoder = PSPEncoder(args.psp_encoder_ckpt, output_size=args.size).to(device)
|
57 |
psp_encoder.eval()
|
58 |
|
|
|
|
|
|
|
59 |
num = 0
|
60 |
|
61 |
+
|
62 |
+
print(num)
|
63 |
+
num += 1
|
64 |
+
|
65 |
+
img_in = cv2.imread(args.input_img_path)
|
66 |
+
img_in_ten = cv2ten(img_in, device)
|
67 |
+
img_in = cv2.resize(img_in, (args.size, args.size))
|
68 |
+
|
69 |
+
|
70 |
+
img_style = cv2.imread(args.style_img_path)
|
71 |
+
img_style_ten = cv2ten(img_style, device)
|
72 |
+
img_style = cv2.resize(img_style, (args.size, args.size))
|
73 |
+
|
74 |
+
with torch.no_grad():
|
75 |
+
sample_style = g_ema.get_z_embed(img_style_ten)
|
76 |
+
sample_in = psp_encoder(img_in_ten)
|
77 |
+
img_out_ten, _ = g_ema([sample_in], z_embed=sample_style, add_weight_index=args.add_weight_index,
|
78 |
+
input_is_latent=True, return_latents=False, randomize_noise=False)
|
79 |
+
img_out = ten2cv(img_out_ten)
|
80 |
+
#out = np.concatenate([img_in, img_style, img_out], axis=1)
|
81 |
+
cv2.imwrite('out.jpg', img_out)
|
|
|
|
|
|
|
82 |
|
83 |
print('Done!')
|
84 |
|