WiNE-iNEFF commited on
Commit
216945a
·
verified ·
1 Parent(s): b46af82

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -126
app.py CHANGED
@@ -1,127 +1,127 @@
1
- import gradio as gr
2
- import torch, torchvision
3
- from torchvision import transforms
4
- import torch.nn.functional as F
5
- import numpy as np
6
- import PIL
7
- from PIL import Image, ImageColor
8
- from diffusers import DiffusionPipeline
9
- from diffusers import DDIMScheduler, DDPMScheduler, DEISMultistepScheduler, LCMScheduler, PNDMScheduler, UniPCMultistepScheduler
10
- import base64
11
- from urllib.parse import quote_plus
12
- from io import BytesIO
13
-
14
- device = ("mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu")
15
-
16
-
17
- class MSPipeline(DiffusionPipeline):
18
- def __init__(self, unet, scheduler):
19
- super().__init__()
20
- self.register_modules(unet=unet, scheduler=scheduler)
21
-
22
- @torch.no_grad()
23
- def __call__(self, batch_size = 1, num_inference_steps = 1000):
24
- x = torch.randn(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size).to(device)
25
-
26
- self.scheduler.set_timesteps(num_inference_steps)
27
-
28
- for t in self.progress_bar(self.scheduler.timesteps):
29
- #x = self.scheduler.scale_model_input(x, t)
30
- model_output = self.unet(x, t).sample
31
- x = self.scheduler.step(model_output, t, x).prev_sample
32
-
33
- x = x.cpu().permute(0, 2, 3, 1).clip(0, 1).numpy() #* 255
34
- x = self.numpy_to_pil(x)
35
-
36
- return x
37
-
38
-
39
- def clear_pix(x):
40
- datas = []
41
- for pixel in list(x.getdata()):
42
- r,g,b,a = pixel
43
- if a == 0 or a < 150:
44
- datas.append((0,0,0,0))
45
- else:
46
- datas.append((r,g,b,255))
47
- x.putdata(datas)
48
- return x
49
-
50
-
51
- def show_3D(image, print_link = False):
52
- if isinstance(image, PIL.Image.Image):
53
- buffer = BytesIO()
54
- image.save(buffer, format="PNG")
55
- skin_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8")
56
- else:
57
- with open(image, "rb") as f:
58
- skin_base64 = base64.b64encode(f.read()).decode("utf-8")
59
-
60
- data = f"data:image/png;base64,{skin_base64}"
61
- quoted = quote_plus(data)
62
- url = f"https://wine-ineff.github.io/SkinViewFrame/skin.html?color=c2b4ff&data={quoted}"
63
-
64
- if print_link == True:
65
- print(url)
66
- return url
67
-
68
-
69
- def generate(schedulers, inference_steps, images_num):
70
- pipe = MSPipeline.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", use_safetensors=True).to(device)
71
-
72
- if schedulers == "DDIMScheduler":
73
- pipe.scheduler = DDIMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
74
- elif schedulers == "DDPMScheduler":
75
- pipe.scheduler = DDPMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
76
- elif schedulers == "DEISMultistepScheduler":
77
- pipe.scheduler = DEISMultistepScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
78
- elif schedulers == "LCMScheduler":
79
- pipe.scheduler = LCMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
80
- elif schedulers == "PNDMScheduler":
81
- pipe.scheduler = PNDMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
82
- elif schedulers == "UniPCMultistepScheduler":
83
- pipe.scheduler = UniPCMultistepScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
84
-
85
- images = pipe(batch_size = images_num, num_inference_steps = inference_steps)
86
- return images, update_iframe(images)
87
-
88
-
89
- def update_iframe(images):
90
- iframe_html = "<div style='display: grid; grid-template-columns: repeat(2, 1fr); gap: 10px'>"
91
- for img in images:
92
- iframe_url = show_3D(clear_pix(img), print_link=True)
93
- iframe_html += f"<iframe style='min-width: 100%;' src='{iframe_url}'></iframe>"
94
- iframe_html += "</div>"
95
- return iframe_html
96
-
97
-
98
- interface = gr.Blocks()
99
- with interface:
100
- gr.HTML(f"""
101
- <h1 style="min-width: 100%; text-align: center;">Mineskin Diffusion</h1>
102
- <p>This space run with {f"<b>{device.upper()}</b>." if device != "cpu" else f"<b>{device.upper()}</b>. This space will be work very slow. Pleasy use a <a href='https://colab.research.google.com/#fileId=https%3A//huggingface.co/WiNE-iNEFF/Mineskin-Diffusion-v1.0/MineskinDiffusion.ipynb' style='text-decoration: none; color: orange;'>Google Colab notebook</a> for get fully experience of model or dublicate this space with your devices"}</p>
103
- """)
104
- with gr.Tabs():
105
- with gr.TabItem("v1.0"):
106
- with gr.Column():
107
- scheduler_type = gr.Dropdown(
108
- ["DDIMScheduler", "DDPMScheduler", "DEISMultistepScheduler", "LCMScheduler", "PNDMScheduler", "UniPCMultistepScheduler"],
109
- label = "Type of scheduler"
110
- )
111
-
112
- with gr.Row():
113
- inference_steps = gr.Number(
114
- label = "Amount of denoising steps",
115
- value = 30, minimum = 5, maximum = 1000
116
- )
117
- images_num = gr.Number(
118
- label = "Amount of skins",
119
- value = 4, minimum = 1, maximum = 4
120
- )
121
-
122
- gallery = gr.Gallery(columns=4, object_fit="scale-down", min_width=76)
123
- iframe_output = gr.HTML()
124
- gen_btn = gr.Button("Generate")
125
- gen_btn.click(fn=generate, outputs=[gallery, iframe_output], inputs=[scheduler_type, inference_steps, images_num])
126
-
127
  interface.launch()
 
1
+ import gradio as gr
2
+ import torch, torchvision
3
+ from torchvision import transforms
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ import PIL
7
+ from PIL import Image, ImageColor
8
+ from diffusers import DiffusionPipeline
9
+ from diffusers import DDIMScheduler, DDPMScheduler, DEISMultistepScheduler, LCMScheduler, PNDMScheduler, UniPCMultistepScheduler
10
+ import base64
11
+ from urllib.parse import quote_plus
12
+ from io import BytesIO
13
+
14
+ device = ("mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu")
15
+
16
+
17
+ class MSPipeline(DiffusionPipeline):
18
+ def __init__(self, unet, scheduler):
19
+ super().__init__()
20
+ self.register_modules(unet=unet, scheduler=scheduler)
21
+
22
+ @torch.no_grad()
23
+ def __call__(self, batch_size = 1, num_inference_steps = 1000):
24
+ x = torch.randn(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size).to(device)
25
+
26
+ self.scheduler.set_timesteps(num_inference_steps)
27
+
28
+ for t in self.progress_bar(self.scheduler.timesteps):
29
+ #x = self.scheduler.scale_model_input(x, t)
30
+ model_output = self.unet(x, t).sample
31
+ x = self.scheduler.step(model_output, t, x).prev_sample
32
+
33
+ x = x.cpu().permute(0, 2, 3, 1).clip(0, 1).numpy() #* 255
34
+ x = self.numpy_to_pil(x)
35
+
36
+ return x
37
+
38
+
39
+ def clear_pix(x):
40
+ datas = []
41
+ for pixel in list(x.getdata()):
42
+ r,g,b,a = pixel
43
+ if a == 0 or a < 150:
44
+ datas.append((0,0,0,0))
45
+ else:
46
+ datas.append((r,g,b,255))
47
+ x.putdata(datas)
48
+ return x
49
+
50
+
51
+ def show_3D(image, print_link = False):
52
+ if isinstance(image, PIL.Image.Image):
53
+ buffer = BytesIO()
54
+ image.save(buffer, format="PNG")
55
+ skin_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8")
56
+ else:
57
+ with open(image, "rb") as f:
58
+ skin_base64 = base64.b64encode(f.read()).decode("utf-8")
59
+
60
+ data = f"data:image/png;base64,{skin_base64}"
61
+ quoted = quote_plus(data)
62
+ url = f"https://wine-ineff.github.io/SkinViewFrame/skin.html?color=c2b4ff&data={quoted}"
63
+
64
+ if print_link == True:
65
+ print(url)
66
+ return url
67
+
68
+
69
+ def generate(schedulers, inference_steps, images_num):
70
+ pipe = MSPipeline.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", use_safetensors=True).to(device)
71
+
72
+ if schedulers == "DDIMScheduler":
73
+ pipe.scheduler = DDIMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
74
+ elif schedulers == "DDPMScheduler":
75
+ pipe.scheduler = DDPMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
76
+ elif schedulers == "DEISMultistepScheduler":
77
+ pipe.scheduler = DEISMultistepScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
78
+ elif schedulers == "LCMScheduler":
79
+ pipe.scheduler = LCMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
80
+ elif schedulers == "PNDMScheduler":
81
+ pipe.scheduler = PNDMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
82
+ elif schedulers == "UniPCMultistepScheduler":
83
+ pipe.scheduler = UniPCMultistepScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler")
84
+
85
+ images = pipe(batch_size = images_num, num_inference_steps = inference_steps)
86
+ return images, update_iframe(images)
87
+
88
+
89
+ def update_iframe(images):
90
+ iframe_html = "<div style='display: grid; grid-template-columns: repeat(2, 1fr); gap: 10px'>"
91
+ for img in images:
92
+ iframe_url = show_3D(clear_pix(img), print_link=True)
93
+ iframe_html += f"<iframe style='min-width: 100%;' src='{iframe_url}'></iframe>"
94
+ iframe_html += "</div>"
95
+ return iframe_html
96
+
97
+
98
+ interface = gr.Blocks()
99
+ with interface:
100
+ gr.HTML(f"""
101
+ <h1 style="min-width: 100%; text-align: center;">Mineskin Diffusion</h1>
102
+ <p>This space run with {f"<b>{device.upper()}</b>." if device != "cpu" else f"<b>{device.upper()}</b>. This space will be work very slow. Pleasy use a <a href='https://colab.research.google.com/#fileId=https://huggingface.co/WiNE-iNEFF/Mineskin-Diffusion-v1.0/blob/main/MineskinDiffusion.ipynb' style='text-decoration: none; color: orange;'>Google Colab notebook</a> for get fully experience of model or dublicate this space with your devices"}</p>
103
+ """)
104
+ with gr.Tabs():
105
+ with gr.TabItem("v1.0"):
106
+ with gr.Column():
107
+ scheduler_type = gr.Dropdown(
108
+ ["DDIMScheduler", "DDPMScheduler", "DEISMultistepScheduler", "LCMScheduler", "PNDMScheduler", "UniPCMultistepScheduler"],
109
+ label = "Type of scheduler"
110
+ )
111
+
112
+ with gr.Row():
113
+ inference_steps = gr.Number(
114
+ label = "Amount of denoising steps",
115
+ value = 30, minimum = 5, maximum = 1000
116
+ )
117
+ images_num = gr.Number(
118
+ label = "Amount of skins",
119
+ value = 4, minimum = 1, maximum = 4
120
+ )
121
+
122
+ gallery = gr.Gallery(columns=4, object_fit="scale-down", min_width=76)
123
+ iframe_output = gr.HTML()
124
+ gen_btn = gr.Button("Generate")
125
+ gen_btn.click(fn=generate, outputs=[gallery, iframe_output], inputs=[scheduler_type, inference_steps, images_num])
126
+
127
  interface.launch()