OmPrakashSingh1704 commited on
Commit
4e8bbff
·
1 Parent(s): 3462e51
Files changed (2) hide show
  1. app.py +2 -2
  2. options/Banner_Model/Image2Image_2.py +1 -1
app.py CHANGED
@@ -137,8 +137,8 @@ with gr.Blocks() as demo:
137
  img = gr.Image()
138
  prompt = gr.Textbox(label="Enter the text to get a good start")
139
  btn = gr.Button()
140
- size = gr.Slider(label="Size", minimum=256, maximum=MAX_IMAGE_SIZE, value=1024)
141
- num_inference_steps = gr.Slider(label="num_inference_steps", minimum=1, maximum=100, step=8, value=20)
142
  out_img = gr.Image()
143
  btn.click(Banner.Image2Image_2, [prompt, img,size,num_inference_steps], out_img)
144
 
 
137
  img = gr.Image()
138
  prompt = gr.Textbox(label="Enter the text to get a good start")
139
  btn = gr.Button()
140
+ size = gr.Slider(label="Size", minimum=256, maximum=MAX_IMAGE_SIZE, step=8, value=1024)
141
+ num_inference_steps = gr.Slider(label="num_inference_steps", minimum=1, maximum=100, value=20)
142
  out_img = gr.Image()
143
  btn.click(Banner.Image2Image_2, [prompt, img,size,num_inference_steps], out_img)
144
 
options/Banner_Model/Image2Image_2.py CHANGED
@@ -23,5 +23,5 @@ def I2I_2(image, prompt,size,num_inference_steps):
23
  image.resize((size,size))
24
  image=processor(image)
25
  generator = torch.Generator(device=device).manual_seed(0)
26
- image = pipe(prompt, num_inference_steps=num_inference_steps, generator=generator, image=image).images[0]
27
  return image
 
23
  image.resize((size,size))
24
  image=processor(image)
25
  generator = torch.Generator(device=device).manual_seed(0)
26
+ image = pipe(prompt, num_inference_steps=num_inference_steps, generator=generator, image=image,negative_prompt="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality").images[0]
27
  return image