Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
# βββ STEP 1: read your token from the Spaces secrets | |
HF_TOKEN = os.environ.get("HF_HUB_TOKEN") | |
if HF_TOKEN is None: | |
raise RuntimeError("β Please add your HF_HUB_TOKEN under Settings β Variables and secrets") | |
# βββ STEP 2: initialize the client (no repo_id arg any more) | |
client = InferenceClient(token=HF_TOKEN) | |
# βββ STEP 3: the generation function | |
def generate_video(prompt: str) -> str: | |
try: | |
# pass your model ID as the first arg, and the text as `inputs=β¦` | |
result = client.text_to_video( | |
"ali-vilab/text-to-video-ms-1.7b", | |
inputs=prompt | |
) | |
video_bytes = result["video"] | |
out_path = "output.mp4" | |
with open(out_path, "wb") as f: | |
f.write(video_bytes) | |
return out_path | |
except Exception as e: | |
# re-raise so Gradio logs it for you | |
raise RuntimeError(f"Video generation failed: {e}") | |
# βββ STEP 4: wire it up in Gradio | |
with gr.Blocks() as demo: | |
gr.Markdown("## π¬ Video Generator") | |
with gr.Row(): | |
inp = gr.Textbox(placeholder="Type your prompt here", label="Enter your prompt") | |
vid = gr.Video(label="Generated Video") | |
btn = gr.Button("Generate") | |
btn.click(fn=generate_video, inputs=inp, outputs=vid) | |
# βββ STEP 5: launch | |
demo.launch() | |