FaceGEN / app.py
Venkateshwar Reddy
fixed order of parameters
ef9a4d3
raw
history blame
3.91 kB
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline, DDPMPipeline, DDPMScheduler
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
noise_scheduler = DDPMScheduler(num_train_timesteps=1000)
if torch.cuda.is_available():
torch.cuda.max_memory_allocated(device=device)
pipe = DDPMPipeline.from_pretrained("FrozenScar/cartoon_face", torch_dtype=torch.float16, variant="fp16", use_safetensors=True,scheduler=noise_scheduler)
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
else:
pipe = DDPMPipeline.from_pretrained("FrozenScar/cartoon_face", scheduler=noise_scheduler, use_safetensors=True)
pipe = pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
def infer(num_inference_steps,prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale):
#if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(generator=generator,num_inference_steps=num_inference_steps).images[0]
return image
examples = [
"OK broo",
"Nothing brooo"
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
if torch.cuda.is_available():
power_device = "GPU"
else:
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# FACE GENERATOR
Currently running on {power_device}.
""")
with gr.Row():
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=20,
step=1,
value=6,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
# with gr.Accordion("Advanced Settings", open=False):
# negative_prompt = gr.Text(
# label="Negative prompt",
# max_lines=1,
# placeholder="Enter a negative prompt",
# visible=False,
# )
# seed = gr.Slider(
# label="Seed",
# minimum=0,
# maximum=MAX_SEED,
# step=1,
# value=0,
# )
# randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
# with gr.Row():
# width = gr.Slider(
# label="Width",
# minimum=256,
# maximum=MAX_IMAGE_SIZE,
# step=32,
# value=512,
# )
# height = gr.Slider(
# label="Height",
# minimum=256,
# maximum=MAX_IMAGE_SIZE,
# step=32,
# value=512,
# )
# with gr.Row():
# guidance_scale = gr.Slider(
# label="Guidance scale",
# minimum=0.0,
# maximum=10.0,
# step=0.1,
# value=0.0,
# )
# num_inference_steps = gr.Slider(
# label="Number of inference steps",
# minimum=1,
# maximum=120,
# step=1,
# value=2,
# )
# gr.Examples(
# examples = examples,
# inputs = [prompt]
# )
run_button.click(
fn = infer,
inputs = [ num_inference_steps],
outputs = [result]
)
demo.queue().launch()