Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import tempfile | |
| import json | |
| from inference_focal_length import load_models, run_inference, OmegaConf | |
| import torch | |
| # Initialize models once at startup | |
| cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_focal_length.yaml") | |
| pipeline, device = load_models(cfg) | |
| def generate_video(base_scene, focal_length_list): | |
| try: | |
| # Validate input | |
| if len(json.loads(focal_length_list)) != 5: | |
| raise ValueError("Exactly 5 focal_length values required") | |
| # Run inference | |
| video_path = run_inference( | |
| pipeline=pipeline, | |
| tokenizer=pipeline.tokenizer, | |
| text_encoder=pipeline.text_encoder, | |
| base_scene=base_scene, | |
| focal_length_list=focal_length_list, | |
| device=device | |
| ) | |
| return video_path | |
| except Exception as e: | |
| raise gr.Error(f"Generation failed: {str(e)}") | |
| # Example inputs | |
| examples = [ | |
| [ | |
| "A small office cubicle with a desk, computer, and chair.", | |
| "[25.1, 36.1, 47.1, 58.1, 69.1]" | |
| ], | |
| [ | |
| "A large, white couch is placed in a living room, with a mirror above it. The couch is covered with various items, including a blue box, a pink towel, and a pair of shoes.", | |
| "[55.0, 46.0, 37.0, 28.0, 25.0]" | |
| ] | |
| ] | |
| with gr.Blocks(title="Focal Length Effect Generator") as demo: | |
| gr.Markdown("#Dynamic Focal Length Effect Generation") | |
| with gr.Row(): | |
| with gr.Column(): | |
| scene_input = gr.Textbox( | |
| label="Scene Description", | |
| placeholder="Describe the scene you want to generate..." | |
| ) | |
| focal_length_input = gr.Textbox( | |
| label="Focal Length Values", | |
| placeholder="Enter 5 comma-separated values from 24-70 (e.g., [25.1, 30.2, 33.3, 40.8, 54.0])" | |
| ) | |
| submit_btn = gr.Button("Generate Video", variant="primary") | |
| with gr.Column(): | |
| video_output = gr.Video(label="Generated Video") | |
| error_output = gr.Textbox(label="Error Messages", visible=False) | |
| gr.Examples( | |
| examples=examples, | |
| inputs=[scene_input, focal_length_input], | |
| outputs=[video_output], | |
| fn=generate_video, | |
| cache_examples=True | |
| ) | |
| submit_btn.click( | |
| fn=generate_video, | |
| inputs=[scene_input, focal_length_input], | |
| outputs=[video_output], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(share=True) | |