Spaces:
Sleeping
Sleeping
| import os | |
| from huggingface_hub import InferenceClient | |
| import gradio as gr | |
| # βββ STEP 1 βββ | |
| # Paste your HF token here (bypasses the need for the Spaces secrets UI) | |
| os.environ["HF_HUB_TOKEN"] = "YOUR_HF_TOKEN_HERE" | |
| # βββ STEP 2 βββ | |
| # Initialize the InferenceClient | |
| client = InferenceClient( | |
| repo_id="ali-vilab/text-to-video-ms-1.7b", | |
| token=os.environ["HF_HUB_TOKEN"], | |
| ) | |
| # βββ STEP 3 βββ | |
| # Define the function that takes a prompt, calls the API, and writes out an MP4 | |
| def generate_video(prompt: str) -> str: | |
| result = client.text_to_video(prompt) # {'video': b'...'} | |
| video_bytes = result["video"] | |
| out_path = "out.mp4" | |
| with open(out_path, "wb") as f: | |
| f.write(video_bytes) | |
| return out_path | |
| # βββ STEP 4 βββ | |
| # Wire it up in Gradio | |
| demo = gr.Interface( | |
| fn=generate_video, | |
| inputs=gr.Textbox(label="Enter your prompt here"), | |
| outputs=gr.Video(label="Generated Video"), | |
| title="Video Generator", | |
| ) | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |