import os import gradio as gr from huggingface_hub import InferenceClient # ─── STEP 1: read your token from the Spaces secrets HF_TOKEN = os.environ.get("HF_HUB_TOKEN") if HF_TOKEN is None: raise RuntimeError("❌ Please add your HF_HUB_TOKEN under Settings → Variables and secrets") # ─── STEP 2: initialize the client (no repo_id arg any more) client = InferenceClient(token=HF_TOKEN) # ─── STEP 3: the generation function def generate_video(prompt: str) -> str: try: # pass your model ID as the first arg, and the text as `inputs=…` result = client.text_to_video( "ali-vilab/text-to-video-ms-1.7b", inputs=prompt ) video_bytes = result["video"] out_path = "output.mp4" with open(out_path, "wb") as f: f.write(video_bytes) return out_path except Exception as e: # re-raise so Gradio logs it for you raise RuntimeError(f"Video generation failed: {e}") # ─── STEP 4: wire it up in Gradio with gr.Blocks() as demo: gr.Markdown("## 🎬 Video Generator") with gr.Row(): inp = gr.Textbox(placeholder="Type your prompt here", label="Enter your prompt") vid = gr.Video(label="Generated Video") btn = gr.Button("Generate") btn.click(fn=generate_video, inputs=inp, outputs=vid) # ─── STEP 5: launch demo.launch()