Spaces:
Runtime error
Runtime error
pass image in default format
Browse files
app.py
CHANGED
@@ -36,7 +36,7 @@ generator = torch.manual_seed(0)
|
|
36 |
def infer(prompt, negative_prompt, image):
|
37 |
# implement your inference function here
|
38 |
|
39 |
-
cond_input = conditioning_image_transforms(
|
40 |
#cond_input = T.ToPILImage(cond_input)
|
41 |
|
42 |
output = pipe(
|
@@ -62,7 +62,7 @@ gr.Interface(
|
|
62 |
max_lines=1,
|
63 |
placeholder="low quality",
|
64 |
),
|
65 |
-
gr.Image(
|
66 |
],
|
67 |
outputs=gr.Gallery().style(grid=[2], height="auto"),
|
68 |
title="Generate controlled outputs with Categorical Conditioning on Waifu Diffusion 1.5 beta 2.",
|
|
|
36 |
def infer(prompt, negative_prompt, image):
|
37 |
# implement your inference function here
|
38 |
|
39 |
+
cond_input = conditioning_image_transforms(image)
|
40 |
#cond_input = T.ToPILImage(cond_input)
|
41 |
|
42 |
output = pipe(
|
|
|
62 |
max_lines=1,
|
63 |
placeholder="low quality",
|
64 |
),
|
65 |
+
gr.Image(),
|
66 |
],
|
67 |
outputs=gr.Gallery().style(grid=[2], height="auto"),
|
68 |
title="Generate controlled outputs with Categorical Conditioning on Waifu Diffusion 1.5 beta 2.",
|