Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,50 +2,33 @@ from PIL import Image
|
|
2 |
import cv2
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
5 |
-
import torch
|
6 |
from accelerate import Accelerator
|
7 |
from transformers import pipeline
|
8 |
from diffusers.utils import load_image
|
9 |
from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline
|
10 |
|
11 |
accelerator = Accelerator(cpu=True)
|
12 |
-
|
13 |
-
generator = torch.Generator(device="cpu").manual_seed(4096)
|
14 |
pope_prior = accelerator.prepare(KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float32))
|
15 |
pope_prior = pope_prior.to("cpu")
|
16 |
-
|
17 |
pope = accelerator.prepare(KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float32))
|
18 |
pope = pope.to("cpu")
|
|
|
19 |
|
20 |
def plex(img, cook, one, two, three):
|
21 |
goof = load_image(img).resize((512, 512))
|
22 |
-
# We pass the prompt and negative prompt through the prior to generate image embeddings
|
23 |
prompt = cook
|
24 |
negative_prior_prompt = "lowres,text,bad quality,low quality,jpeg artifacts,ugly,bad hands,bad face,blurry,bad eyes,watermark,signature"
|
25 |
img_emb = pope_prior(prompt=prompt, guidance_scale=0.85, num_inference_steps=5, generator=generator)
|
26 |
negative_emb = pope_prior(prompt=negative_prior_prompt, guidance_scale=1, num_inference_steps=5, generator=generator)
|
27 |
-
|
28 |
-
# run text2img pipeline
|
29 |
-
imags = pope(
|
30 |
-
image_embeds=img_emb.image_embeds,
|
31 |
-
negative_image_embeds=negative_emb.image_embeds,
|
32 |
-
num_inference_steps=10,
|
33 |
-
generator=generator,
|
34 |
-
height=512,
|
35 |
-
width=512,
|
36 |
-
).images[0]
|
37 |
-
|
38 |
-
## return imags
|
39 |
images_texts = [cook, goof, imags]
|
40 |
-
|
41 |
-
# specify the weights for each condition in images_texts
|
42 |
weights = [one, two, three]
|
43 |
-
|
44 |
-
# We can leave the prompt empty
|
45 |
primpt = ""
|
46 |
prior_out = pope_prior.interpolate(images_texts, weights, num_inference_steps=5)
|
47 |
imas = pope(**prior_out, height=512, width=512, num_inference_steps=25).images[0]
|
48 |
return imas
|
49 |
|
50 |
iface = gr.Interface(fn=plex,inputs=[gr.Image(label="drop", type="pil"), gr.Textbox(label="prompt"), gr.Slider(label="Text Guide",minimum=0.01,step=0.01,maximum=1,value=0.5), gr.Slider(label="Your Image Guide",minimum=0.01,step=0.01,maximum=1,value=0.5),gr.Slider(label="Generated Image Guide",minimum=0.01,step=0.01,maximum=1,value=0.3)], outputs=gr.Image(), title="Ksky22 Cntrl Gdd Interp", description="ksky22 Cntrl Gdd Interp")
|
51 |
-
iface.
|
|
|
|
2 |
import cv2
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
5 |
+
import torch, os, random
|
6 |
from accelerate import Accelerator
|
7 |
from transformers import pipeline
|
8 |
from diffusers.utils import load_image
|
9 |
from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline
|
10 |
|
11 |
accelerator = Accelerator(cpu=True)
|
|
|
|
|
12 |
pope_prior = accelerator.prepare(KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float32))
|
13 |
pope_prior = pope_prior.to("cpu")
|
|
|
14 |
pope = accelerator.prepare(KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float32))
|
15 |
pope = pope.to("cpu")
|
16 |
+
generator = torch.Generator(device="cpu").manual_seed(random.randint(1, 4876364))
|
17 |
|
18 |
def plex(img, cook, one, two, three):
|
19 |
goof = load_image(img).resize((512, 512))
|
|
|
20 |
prompt = cook
|
21 |
negative_prior_prompt = "lowres,text,bad quality,low quality,jpeg artifacts,ugly,bad hands,bad face,blurry,bad eyes,watermark,signature"
|
22 |
img_emb = pope_prior(prompt=prompt, guidance_scale=0.85, num_inference_steps=5, generator=generator)
|
23 |
negative_emb = pope_prior(prompt=negative_prior_prompt, guidance_scale=1, num_inference_steps=5, generator=generator)
|
24 |
+
imags = pope(image_embeds=img_emb.image_embeds,negative_image_embeds=negative_emb.image_embeds,num_inference_steps=10,generator=generator,height=512,width=512,).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
images_texts = [cook, goof, imags]
|
|
|
|
|
26 |
weights = [one, two, three]
|
|
|
|
|
27 |
primpt = ""
|
28 |
prior_out = pope_prior.interpolate(images_texts, weights, num_inference_steps=5)
|
29 |
imas = pope(**prior_out, height=512, width=512, num_inference_steps=25).images[0]
|
30 |
return imas
|
31 |
|
32 |
iface = gr.Interface(fn=plex,inputs=[gr.Image(label="drop", type="pil"), gr.Textbox(label="prompt"), gr.Slider(label="Text Guide",minimum=0.01,step=0.01,maximum=1,value=0.5), gr.Slider(label="Your Image Guide",minimum=0.01,step=0.01,maximum=1,value=0.5),gr.Slider(label="Generated Image Guide",minimum=0.01,step=0.01,maximum=1,value=0.3)], outputs=gr.Image(), title="Ksky22 Cntrl Gdd Interp", description="ksky22 Cntrl Gdd Interp")
|
33 |
+
iface.queue(max_size=1)
|
34 |
+
iface.launch(max_threads=1)
|