File size: 747 Bytes
4204ea9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# base dependencies, for Wan and Gradio

# we don't have a flash attention binary yet for Pytorch 2.7
# so we use pytorch 2.6
torch==2.6.0
torchvision==0.21.0
torchdata==0.10.1
torchao==0.9.0

numpy>=1.26.4

# some important dependendencies
ftfy

# peft==0.15.1
peft

# it is recommended to always use the latest version
diffusers @ git+https://github.com/huggingface/diffusers.git@main

flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl

transformers>=4.45.2

accelerate
safetensors

imageio
imageio-ffmpeg

gradio==5.38.2

# more dependencies, specific to OmniAvatar
librosa==0.10.2.post1
tqdm
scipy==1.14.0
xfuser==0.4.1
einops