Spaces:
Running
Running
File size: 1,586 Bytes
9d668ad 3039940 25ef180 aa5e404 8b6a74d 0bb12d1 9d668ad c8ca6fe a4dc2bc 3039940 c8ca6fe eeb4c53 aa5e404 2893544 a1cfbda 2893544 a1cfbda 2893544 25ef180 aa5e404 5612db5 c8ca6fe aa5e404 88697bb 8b6a74d c8ca6fe a52084a c8ca6fe 4bde8da 8b6a74d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import spaces
import torch,os,imageio
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import load_image, export_to_video
from PIL import Image
from glob import glob
from pathlib import Path
import numpy as np
# Check if CUDA (GPU) is available, otherwise use CPU
device = "cuda" if torch.cuda.is_available() else "cpu"
def save_video(frames, save_path, fps, quality=9):
writer = imageio.get_writer(save_path, fps=fps, quality=quality)
for frame in frames:
frame = np.array(frame)
writer.append_data(frame)
writer.close()
# Function to generate the video
@spaces.GPU(duration=100)
def Video(image):
pipeline = StableVideoDiffusionPipeline.from_pretrained(
"stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16
).to(device)
# Enable model offloading if using the CPU
if device == "cpu":
pipeline.enable_model_cpu_offload()
else:
pipeline.enable_sequential_cpu_offload()
image = Image.fromarray(image)
image = image.resize((1024, 576))
# Set random seed for reproducibility
generator = torch.manual_seed(42)
output_folder= "outputs"
os.makedirs(output_folder, exist_ok=True)
base_count = len(glob(os.path.join(output_folder, "*.mp4")))
video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
# Generate the video frames
frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
# Export the frames to a video file
export_to_video(frames, video_path, fps=7)
return video_path |