pablo commited on
Commit
ede7254
·
1 Parent(s): d70699a
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -12,6 +12,8 @@ import cv2
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
 
14
  # Inpainting pipeline
 
 
15
  unet = UNet2DConditionModel.from_pretrained("pablodawson/ldm3d-inpainting", cache_dir="cache", subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True)
16
  pipe = StableDiffusionLDM3DInpaintPipeline.from_pretrained("Intel/ldm3d-4c", cache_dir="cache" ).to(device)
17
 
@@ -50,7 +52,7 @@ def estimate_depth(image):
50
 
51
  output = prediction.cpu().numpy()
52
 
53
- output= 255 * output/np.max(output)
54
 
55
  return Image.fromarray(output.astype("uint8"))
56
 
@@ -70,11 +72,12 @@ def predict(dict, depth, prompt="", negative_prompt="", guidance_scale=7.5, step
70
 
71
  init_image = cv2.resize(dict["image"], (512, 512))
72
 
73
- if (depth == None):
74
  depth_image = estimate_depth(init_image)
75
  else:
76
  depth_image = depth
77
-
 
78
  scheduler = getattr(diffusers, scheduler_class_name)
79
  pipe.scheduler = scheduler.from_pretrained("Intel/ldm3d-4c", subfolder="scheduler")
80
 
@@ -130,7 +133,7 @@ with image_blocks as demo:
130
  with gr.Column():
131
  image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="numpy", label="Upload",height=400)
132
  depth = gr.Image(source='upload', elem_id="depth_upload", type="numpy", label="Upload",height=400)
133
-
134
  with gr.Row(elem_id="prompt-container", mobile_collapse=False, equal_height=True):
135
  with gr.Row():
136
  prompt = gr.Textbox(placeholder="Your prompt (what you want in place of what is erased)", show_label=False, elem_id="prompt")
 
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
 
14
  # Inpainting pipeline
15
+
16
+
17
  unet = UNet2DConditionModel.from_pretrained("pablodawson/ldm3d-inpainting", cache_dir="cache", subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True)
18
  pipe = StableDiffusionLDM3DInpaintPipeline.from_pretrained("Intel/ldm3d-4c", cache_dir="cache" ).to(device)
19
 
 
52
 
53
  output = prediction.cpu().numpy()
54
 
55
+ output= 255 * (output - np.min(output))/(np.max(output) - np.min(output))
56
 
57
  return Image.fromarray(output.astype("uint8"))
58
 
 
72
 
73
  init_image = cv2.resize(dict["image"], (512, 512))
74
 
75
+ if (depth is None):
76
  depth_image = estimate_depth(init_image)
77
  else:
78
  depth_image = depth
79
+ depth_image = Image.fromarray(depth_image[:,:,0].astype("uint8"))
80
+
81
  scheduler = getattr(diffusers, scheduler_class_name)
82
  pipe.scheduler = scheduler.from_pretrained("Intel/ldm3d-4c", subfolder="scheduler")
83
 
 
133
  with gr.Column():
134
  image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="numpy", label="Upload",height=400)
135
  depth = gr.Image(source='upload', elem_id="depth_upload", type="numpy", label="Upload",height=400)
136
+
137
  with gr.Row(elem_id="prompt-container", mobile_collapse=False, equal_height=True):
138
  with gr.Row():
139
  prompt = gr.Textbox(placeholder="Your prompt (what you want in place of what is erased)", show_label=False, elem_id="prompt")