Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +36 -2
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🌍
4
  colorFrom: purple
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.50.2
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: purple
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 5.38.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
app.py CHANGED
@@ -19,6 +19,15 @@ css = "#col-container {margin:0 auto; max-width:960px;}"
19
 
20
  # Background generation via Replicate
21
  def _gen_bg(prompt: str):
 
 
 
 
 
 
 
 
 
22
  url = replicate.run(
23
  "google/imagen-4-fast",
24
  input={"prompt": prompt or "cinematic background", "aspect_ratio": "1:1"},
@@ -28,6 +37,21 @@ def _gen_bg(prompt: str):
28
 
29
  # Main processing function
30
  def process_image_and_text(subject_image, adapter_dict, prompt, _unused1, _unused2, size=ADAPTER_SIZE, rank=10.0):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  seed, guidance_scale, steps = 42, 2.5, 28
32
 
33
  adapter_image = adapter_dict["image"] if isinstance(adapter_dict, dict) else adapter_dict
@@ -111,7 +135,7 @@ with gr.Blocks(css=css, title="ZenCtrl Inpainting") as demo:
111
 
112
  gr.HTML(header_html)
113
  gr.Markdown(
114
- "**Generate context-aware images of your subject with ZenCtrls inpainting playground.** Upload a subject + optional mask, write a prompt, and hit **Generate**. \n"
115
  "Open *Advanced Settings* for an AI-generated background. \n\n"
116
  "**Note:** The model was trained mainly on interior scenes and other *rigid* objects. Results on people or highly deformable items may contain visual distortions."
117
  )
@@ -146,12 +170,22 @@ with gr.Blocks(css=css, title="ZenCtrl Inpainting") as demo:
146
 
147
  def _load_and_show(subj_path, bg_path, prompt_text):
148
  """
 
 
149
  Takes the three values coming from an Examples row
150
  and returns FOUR objects – one for every output widget:
151
  1. subject PIL image -> subj_img
152
  2. dict for the sketch component -> ref_img
153
  3. prompt string -> promptbox
154
  4. pre-rendered result PIL -> output_img
 
 
 
 
 
 
 
 
155
  """
156
  out_path = subj_path.replace(".png", "_out.png") # your saved result
157
  return (
@@ -203,4 +237,4 @@ with gr.Blocks(css=css, title="ZenCtrl Inpainting") as demo:
203
 
204
  # ---------------- Launch ---------------------------------------
205
  if __name__ == "__main__":
206
- demo.launch()
 
19
 
20
  # Background generation via Replicate
21
  def _gen_bg(prompt: str):
22
+ """
23
+ Generate a background image using Replicate's imagen-4-fast model.
24
+
25
+ Args:
26
+ prompt: Text description for the background to generate
27
+
28
+ Returns:
29
+ PIL.Image: Generated background image in RGB format
30
+ """
31
  url = replicate.run(
32
  "google/imagen-4-fast",
33
  input={"prompt": prompt or "cinematic background", "aspect_ratio": "1:1"},
 
37
 
38
  # Main processing function
39
  def process_image_and_text(subject_image, adapter_dict, prompt, _unused1, _unused2, size=ADAPTER_SIZE, rank=10.0):
40
+ """
41
+ Process subject and adapter images with text prompt to generate inpainted result.
42
+
43
+ Args:
44
+ subject_image: PIL.Image of the subject to be placed
45
+ adapter_dict: Either a PIL.Image or dict with 'image' and 'mask' keys for background/sketch
46
+ prompt: Text description for the generation
47
+ _unused1: Unused parameter (placeholder)
48
+ _unused2: Unused parameter (placeholder)
49
+ size: Target size for processing (default: ADAPTER_SIZE)
50
+ rank: Rank parameter for the model (default: 10.0)
51
+
52
+ Returns:
53
+ tuple: (output_image, raw_image) - both are the same PIL.Image result
54
+ """
55
  seed, guidance_scale, steps = 42, 2.5, 28
56
 
57
  adapter_image = adapter_dict["image"] if isinstance(adapter_dict, dict) else adapter_dict
 
135
 
136
  gr.HTML(header_html)
137
  gr.Markdown(
138
+ "**Generate context-aware images of your subject with ZenCtrl's inpainting playground.** Upload a subject + optional mask, write a prompt, and hit **Generate**. \n"
139
  "Open *Advanced Settings* for an AI-generated background. \n\n"
140
  "**Note:** The model was trained mainly on interior scenes and other *rigid* objects. Results on people or highly deformable items may contain visual distortions."
141
  )
 
170
 
171
  def _load_and_show(subj_path, bg_path, prompt_text):
172
  """
173
+ Load example images and prompt for display in the interface.
174
+
175
  Takes the three values coming from an Examples row
176
  and returns FOUR objects – one for every output widget:
177
  1. subject PIL image -> subj_img
178
  2. dict for the sketch component -> ref_img
179
  3. prompt string -> promptbox
180
  4. pre-rendered result PIL -> output_img
181
+
182
+ Args:
183
+ subj_path: Path to subject image file
184
+ bg_path: Path to background image file
185
+ prompt_text: Example prompt text
186
+
187
+ Returns:
188
+ tuple: (subject_image, sketch_dict, prompt, output_image)
189
  """
190
  out_path = subj_path.replace(".png", "_out.png") # your saved result
191
  return (
 
237
 
238
  # ---------------- Launch ---------------------------------------
239
  if __name__ == "__main__":
240
+ demo.launch(mcp_server=True)