Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,6 @@ import torch
|
|
| 5 |
|
| 6 |
controlnet = ControlNetModel.from_pretrained("ioclab/control_v1p_sd15_brightness", torch_dtype=torch.float32, use_safetensors=True)
|
| 7 |
|
| 8 |
-
|
| 9 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 10 |
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float32,
|
| 11 |
)
|
|
@@ -16,19 +15,22 @@ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
|
| 16 |
pipe.enable_model_cpu_offload()
|
| 17 |
|
| 18 |
|
| 19 |
-
def infer(prompt, negative_prompt, num_inference_steps,
|
| 20 |
-
|
| 21 |
conditioning_image = Image.fromarray(conditioning_image)
|
| 22 |
-
|
|
|
|
|
|
|
| 23 |
|
| 24 |
output_image = pipe(
|
| 25 |
prompt,
|
| 26 |
conditioning_image,
|
| 27 |
-
height=
|
| 28 |
-
width=
|
| 29 |
num_inference_steps=num_inference_steps,
|
| 30 |
generator=generator,
|
| 31 |
negative_prompt=negative_prompt,
|
|
|
|
| 32 |
controlnet_conditioning_scale=1.0,
|
| 33 |
).images[0]
|
| 34 |
|
|
@@ -50,14 +52,36 @@ with gr.Blocks() as demo:
|
|
| 50 |
negative_prompt = gr.Textbox(
|
| 51 |
label="Negative Prompt",
|
| 52 |
)
|
| 53 |
-
num_inference_steps = gr.Slider(
|
| 54 |
-
10, 40, 20,
|
| 55 |
-
step=1,
|
| 56 |
-
label="Steps",
|
| 57 |
-
)
|
| 58 |
conditioning_image = gr.Image(
|
| 59 |
label="Conditioning Image",
|
| 60 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
submit_btn = gr.Button(
|
| 62 |
value="Submit",
|
| 63 |
variant="primary"
|
|
@@ -70,7 +94,7 @@ with gr.Blocks() as demo:
|
|
| 70 |
submit_btn.click(
|
| 71 |
fn=infer,
|
| 72 |
inputs=[
|
| 73 |
-
prompt, negative_prompt, num_inference_steps,
|
| 74 |
],
|
| 75 |
outputs=output
|
| 76 |
)
|
|
|
|
| 5 |
|
| 6 |
controlnet = ControlNetModel.from_pretrained("ioclab/control_v1p_sd15_brightness", torch_dtype=torch.float32, use_safetensors=True)
|
| 7 |
|
|
|
|
| 8 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 9 |
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float32,
|
| 10 |
)
|
|
|
|
| 15 |
pipe.enable_model_cpu_offload()
|
| 16 |
|
| 17 |
|
| 18 |
+
def infer(prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed):
|
| 19 |
+
|
| 20 |
conditioning_image = Image.fromarray(conditioning_image)
|
| 21 |
+
conditioning_image = conditioning_image.convert('L')
|
| 22 |
+
|
| 23 |
+
generator = torch.Generator(device="cpu").manual_seed(seed)
|
| 24 |
|
| 25 |
output_image = pipe(
|
| 26 |
prompt,
|
| 27 |
conditioning_image,
|
| 28 |
+
height=size,
|
| 29 |
+
width=size,
|
| 30 |
num_inference_steps=num_inference_steps,
|
| 31 |
generator=generator,
|
| 32 |
negative_prompt=negative_prompt,
|
| 33 |
+
guidance_scale=guidance_scale,
|
| 34 |
controlnet_conditioning_scale=1.0,
|
| 35 |
).images[0]
|
| 36 |
|
|
|
|
| 52 |
negative_prompt = gr.Textbox(
|
| 53 |
label="Negative Prompt",
|
| 54 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
conditioning_image = gr.Image(
|
| 56 |
label="Conditioning Image",
|
| 57 |
)
|
| 58 |
+
with gr.Accordion('Advanced options', open=False):
|
| 59 |
+
with gr.Row():
|
| 60 |
+
num_inference_steps = gr.Slider(
|
| 61 |
+
10, 40, 20,
|
| 62 |
+
step=1,
|
| 63 |
+
label="Steps",
|
| 64 |
+
)
|
| 65 |
+
size = gr.Slider(
|
| 66 |
+
256, 768, 512,
|
| 67 |
+
step=128,
|
| 68 |
+
label="Size",
|
| 69 |
+
)
|
| 70 |
+
with gr.Row():
|
| 71 |
+
guidance_scale = gr.Slider(
|
| 72 |
+
label='Guidance Scale',
|
| 73 |
+
minimum=0.1,
|
| 74 |
+
maximum=30.0,
|
| 75 |
+
value=7.0,
|
| 76 |
+
step=0.1
|
| 77 |
+
)
|
| 78 |
+
seed = gr.Slider(
|
| 79 |
+
label='Seed',
|
| 80 |
+
minimum=-1,
|
| 81 |
+
maximum=2147483647,
|
| 82 |
+
step=1,
|
| 83 |
+
randomize=True
|
| 84 |
+
)
|
| 85 |
submit_btn = gr.Button(
|
| 86 |
value="Submit",
|
| 87 |
variant="primary"
|
|
|
|
| 94 |
submit_btn.click(
|
| 95 |
fn=infer,
|
| 96 |
inputs=[
|
| 97 |
+
prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed
|
| 98 |
],
|
| 99 |
outputs=output
|
| 100 |
)
|