Spaces:
Runtime error
Runtime error
File size: 5,028 Bytes
55e719f 67c95e3 086bafc 67c95e3 ecd0db9 6a96530 ecd0db9 7265fcb 0edac6b 6ed4aaa 0edac6b 67c95e3 8db257f 6ed4aaa 67c95e3 0396c9b 6ed4aaa 67c95e3 6ed4aaa 67c95e3 6ed4aaa 67c95e3 ecd0db9 49f3202 6ed4aaa 67c95e3 ecd0db9 67c95e3 ecd0db9 67c95e3 ecd0db9 67c95e3 ecd0db9 67c95e3 ecd0db9 f8a22eb 8db257f ecd0db9 f8a22eb ecd0db9 f8a22eb ecd0db9 0edac6b ecd0db9 f8a22eb ecd0db9 8db257f 97ab6fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import spaces
import torch
from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
import os
import time
from datetime import datetime
import gradio as gr
@spaces.GPU
def generate_video(
prompt,
resolution,
video_length,
seed,
num_inference_steps,
guidance_scale,
flow_shift, # TODO: change to flow_shift
embedded_guidance_scale # TODO: change to embedded_guidance_scale
):
seed = None if seed == -1 else seed
width, height = resolution.split("x")
width, height = int(width), int(height)
model = "hunyuanvideo-community/HunyuanVideo"
transformer = HunyuanVideoTransformer3DModel.from_pretrained(
model,
subfolder="transformer",
device_map="balanced",
torch_dtype=torch.float16, # TODO: change to bfloat16
)
print(f"transformer device: {transformer.device}")
# Cargar el pipeline
pipeline = HunyuanVideoPipeline.from_pretrained(
model,
transformer=transformer,
torch_dtype=torch.float16, # TODO: change to bfloat16
device_map="balanced",
)
print(f"pipeline device: {pipeline.device}")
# TODO: pipeline.vae.enable_tiling()
# TODO: pipeline.to("cuda")
# Generar el video usando el pipeline
video = pipeline(
prompt=prompt,
height=height,
width=width,
num_frames=video_length,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
).frames[0]
# Guardar el video
save_path = os.path.join(os.getcwd(), "gradio_outputs")
os.makedirs(save_path, exist_ok=True)
time_flag = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d-%H:%M:%S")
video_path = f"{save_path}/{time_flag}_seed{seed}_{prompt[:100].replace('/','')}.mp4"
from diffusers.utils import export_to_video
export_to_video(video, video_path, fps=24)
print(f'Sample saved to: {video_path}')
return video_path
def create_demo():
with gr.Blocks() as demo:
gr.Markdown("# Hunyuan Video Generation")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", value="A cat walks on the grass, realistic style.")
with gr.Row():
resolution = gr.Dropdown(
choices=[
# 720p
("1280x720 (16:9, 720p)", "1280x720"),
("720x1280 (9:16, 720p)", "720x1280"),
("1104x832 (4:3, 720p)", "1104x832"),
("832x1104 (3:4, 720p)", "832x1104"),
("960x960 (1:1, 720p)", "960x960"),
# 540p
("960x544 (16:9, 540p)", "960x544"),
("544x960 (9:16, 540p)", "544x960"),
("832x624 (4:3, 540p)", "832x624"),
("624x832 (3:4, 540p)", "624x832"),
("720x720 (1:1, 540p)", "720x720"),
],
value="1280x720",
label="Resolution"
)
video_length = gr.Dropdown(
label="Video Length",
choices=[
("2s(65f)", 65),
("5s(129f)", 129),
],
value=129,
)
num_inference_steps = gr.Slider(1, 100, value=50, step=1, label="Number of Inference Steps")
show_advanced = gr.Checkbox(label="Show Advanced Options", value=False)
with gr.Row(visible=False) as advanced_row:
with gr.Column():
seed = gr.Number(value=-1, label="Seed (-1 for random)")
guidance_scale = gr.Slider(1.0, 20.0, value=1.0, step=0.5, label="Guidance Scale")
flow_shift = gr.Slider(0.0, 10.0, value=7.0, step=0.1, label="Flow Shift")
embedded_guidance_scale = gr.Slider(1.0, 20.0, value=6.0, step=0.5, label="Embedded Guidance Scale")
show_advanced.change(fn=lambda x: gr.Row(visible=x), inputs=[show_advanced], outputs=[advanced_row])
generate_btn = gr.Button("Generate")
with gr.Column():
output = gr.Video(label="Generated Video")
generate_btn.click(
fn=lambda *inputs: generate_video(*inputs),
inputs=[
prompt,
resolution,
video_length,
seed,
num_inference_steps,
guidance_scale,
flow_shift,
embedded_guidance_scale
],
outputs=output
)
return demo
if __name__ == "__main__":
print("Starting Gradio server...")
demo = create_demo()
demo.launch() |