generative_photography / app_bokehK.py
pandaphd's picture
nice demo
cc3773d
import gradio as gr
import tempfile
import json
from inference_bokehK import load_models, run_inference, OmegaConf
import torch
# Initialize models once at startup
cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_bokehK.yaml")
pipeline, device = load_models(cfg)
def generate_video(base_scene, bokehK_list):
try:
# Validate input
if len(json.loads(bokehK_list)) != 5:
raise ValueError("Exactly 5 Bokeh K values required")
# Run inference
video_path = run_inference(
pipeline=pipeline,
tokenizer=pipeline.tokenizer,
text_encoder=pipeline.text_encoder,
base_scene=base_scene,
bokehK_list=bokehK_list,
device=device
)
return video_path
except Exception as e:
raise gr.Error(f"Generation failed: {str(e)}")
# Example inputs
examples = [
[
"A young boy wearing an orange jacket is standing on a crosswalk, waiting to cross the street.",
"[2.5, 6.3, 10.1, 17.2, 24.0]"
],
[
"A display of frozen desserts, including cupcakes and donuts, is arranged in a row on a counter.",
"[20.0, 18.5, 15.0, 10.5, 5.0]"
]
]
with gr.Blocks(title="Bokeh Effect Generator") as demo:
gr.Markdown("#Dynamic Bokeh Effect Generation")
with gr.Row():
with gr.Column():
scene_input = gr.Textbox(
label="Scene Description",
placeholder="Describe the scene you want to generate..."
)
bokeh_input = gr.Textbox(
label="Bokeh Blur Values",
placeholder="Enter 5 comma-separated values from 1-30 (e.g., [2.44, 8.3, 10.1, 17.2, 24.0])"
)
submit_btn = gr.Button("Generate Video", variant="primary")
with gr.Column():
video_output = gr.Video(label="Generated Video")
error_output = gr.Textbox(label="Error Messages", visible=False)
gr.Examples(
examples=examples,
inputs=[scene_input, bokeh_input],
outputs=[video_output],
fn=generate_video,
cache_examples=True
)
submit_btn.click(
fn=generate_video,
inputs=[scene_input, bokeh_input],
outputs=[video_output],
)
if __name__ == "__main__":
demo.launch(share=True)