Spaces:
Sleeping
Sleeping
File size: 1,400 Bytes
f4d805d 99cb38f 29b20e5 59e171a 29b20e5 59e171a 29b20e5 227b172 29b20e5 f4d805d 29b20e5 ad370c4 29b20e5 8659e6d 29b20e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import os
import gradio as gr
from huggingface_hub import InferenceClient
# βββ STEP 1: read your token from the Spaces secrets
HF_TOKEN = os.environ.get("HF_HUB_TOKEN")
if HF_TOKEN is None:
raise RuntimeError("β Please add your HF_HUB_TOKEN under Settings β Variables and secrets")
# βββ STEP 2: initialize the client (no repo_id arg any more)
client = InferenceClient(token=HF_TOKEN)
# βββ STEP 3: the generation function
def generate_video(prompt: str) -> str:
try:
# pass your model ID as the first arg, and the text as `inputs=β¦`
result = client.text_to_video(
"ali-vilab/text-to-video-ms-1.7b",
inputs=prompt
)
video_bytes = result["video"]
out_path = "output.mp4"
with open(out_path, "wb") as f:
f.write(video_bytes)
return out_path
except Exception as e:
# re-raise so Gradio logs it for you
raise RuntimeError(f"Video generation failed: {e}")
# βββ STEP 4: wire it up in Gradio
with gr.Blocks() as demo:
gr.Markdown("## π¬ Video Generator")
with gr.Row():
inp = gr.Textbox(placeholder="Type your prompt here", label="Enter your prompt")
vid = gr.Video(label="Generated Video")
btn = gr.Button("Generate")
btn.click(fn=generate_video, inputs=inp, outputs=vid)
# βββ STEP 5: launch
demo.launch()
|