Cognomen commited on
Commit
d92c1cc
Β·
1 Parent(s): 9c7a312
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -30,6 +30,7 @@ generator = torch.manual_seed(0)
30
  # inference function takes prompt, negative prompt and image
31
  def infer(prompt, negative_prompt, image):
32
  # implement your inference function here
 
33
 
34
  cond_input = conditioning_image_transforms(image)
35
 
@@ -44,14 +45,12 @@ def infer(prompt, negative_prompt, image):
44
  return output[0]
45
 
46
  # you need to pass inputs and outputs according to inference function
47
- gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image").launch()
48
-
49
  title = "Categorical Conditioning Controlnet for One-Shot Image Stylization."
50
  description = "This is a demo on ControlNet which generates images based on the style of the conditioning input."
51
  # you need to pass your examples according to your inputs
52
  # each inner list is one example, each element in the list corresponding to a component in the `inputs`.
53
  examples = [["1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, watercolor, night, turtleneck", "low quality", "wikipe_cond_1.png"]]
54
- gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image",
55
  title = title, description = description, examples = examples, theme='gradio/soft').launch()
56
 
57
 
 
30
  # inference function takes prompt, negative prompt and image
31
  def infer(prompt, negative_prompt, image):
32
  # implement your inference function here
33
+
34
 
35
  cond_input = conditioning_image_transforms(image)
36
 
 
45
  return output[0]
46
 
47
  # you need to pass inputs and outputs according to inference function
 
 
48
  title = "Categorical Conditioning Controlnet for One-Shot Image Stylization."
49
  description = "This is a demo on ControlNet which generates images based on the style of the conditioning input."
50
  # you need to pass your examples according to your inputs
51
  # each inner list is one example, each element in the list corresponding to a component in the `inputs`.
52
  examples = [["1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, watercolor, night, turtleneck", "low quality", "wikipe_cond_1.png"]]
53
+ gr.Interface(fn = infer, inputs = ["text", "text", gr.Image(type="pil") ], outputs = "image",
54
  title = title, description = description, examples = examples, theme='gradio/soft').launch()
55
 
56