Spaces:
Running
Running
Commit
·
a52084a
1
Parent(s):
a4dc2bc
cpu_2
Browse files
options/Video_model/Model.py
CHANGED
@@ -6,8 +6,8 @@ from PIL import Image
|
|
6 |
device="cuda" if torch.cuda.is_available() else "cpu"
|
7 |
|
8 |
pipeline = StableVideoDiffusionPipeline.from_pretrained(
|
9 |
-
"stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.
|
10 |
-
)
|
11 |
pipeline.enable_model_cpu_offload()
|
12 |
|
13 |
def Video(image):
|
@@ -15,6 +15,6 @@ def Video(image):
|
|
15 |
image = image.resize((1024, 576))
|
16 |
|
17 |
generator = torch.manual_seed(42)
|
18 |
-
frames = pipeline(image, decode_chunk_size=8, generator=generator
|
19 |
export_to_video(frames, "generated.mp4", fps=7)
|
20 |
return "generated.mp4"
|
|
|
6 |
device="cuda" if torch.cuda.is_available() else "cpu"
|
7 |
|
8 |
pipeline = StableVideoDiffusionPipeline.from_pretrained(
|
9 |
+
"stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float32
|
10 |
+
).to(device)
|
11 |
pipeline.enable_model_cpu_offload()
|
12 |
|
13 |
def Video(image):
|
|
|
15 |
image = image.resize((1024, 576))
|
16 |
|
17 |
generator = torch.manual_seed(42)
|
18 |
+
frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
|
19 |
export_to_video(frames, "generated.mp4", fps=7)
|
20 |
return "generated.mp4"
|
options/Video_model/__pycache__/Model.cpython-310.pyc
CHANGED
Binary files a/options/Video_model/__pycache__/Model.cpython-310.pyc and b/options/Video_model/__pycache__/Model.cpython-310.pyc differ
|
|