internal-v0 / requirements.txt
carlosh93's picture
updating new version with supabase and vlm
ed8368e
absl-py==2.2.2
accelerate==1.9.0
aiofiles==23.2.1
aiohappyeyeballs==2.6.1
aiohttp==3.11.16
aiosignal==1.3.2
annotated-types==0.7.0
anyio==4.9.0
astunparse==1.6.3
async-timeout==5.0.1
attrs==25.3.0
av==15.1.0
bcrypt==4.3.0
beautifulsoup4==4.13.3
bitsandbytes==0.46.1
blis==1.3.0
catalogue==2.0.10
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
cloudpathlib==0.21.1
confection==0.1.5
cycler==0.12.1
cymem==2.0.11
datasets==3.5.0
decord==0.6.0
deep-translator==1.11.4
deplacy==2.1.0
dill==0.3.8
einops==0.8.1
et_xmlfile==2.0.0
exceptiongroup==1.2.2
fastapi==0.115.12
ffmpy==0.5.0
filelock==3.18.0
flatbuffers==25.2.10
frozenlist==1.5.0
fsspec==2024.12.0
gast==0.6.0
gdown==5.2.0
google-pasta==0.2.0
gradio==5.23.3
gradio_client==1.8.0
gradio_modal==0.0.4
groovy==0.1.2
grpcio==1.71.0
h11==0.14.0
h5py==3.13.0
httpcore==1.0.7
httpx==0.28.1
huggingface-hub==0.30.1
idna==3.10
Jinja2==3.1.6
keras==3.9.2
langcodes==3.5.0
language_data==1.3.0
libclang==18.1.1
marisa-trie==1.2.1
Markdown==3.8
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mdurl==0.1.2
ml_dtypes==0.5.1
mpmath==1.3.0
multidict==6.3.2
multiprocess==0.70.16
murmurhash==1.0.13
namex==0.0.8
networkx==3.4.2
numpy==2.1.3
nvidia-cublas-cu12==12.6.4.1
nvidia-cuda-cupti-cu12==12.6.80
nvidia-cuda-nvrtc-cu12==12.6.77
nvidia-cuda-runtime-cu12==12.6.77
nvidia-cudnn-cu12==9.5.1.17
nvidia-cufft-cu12==11.3.0.4
nvidia-cufile-cu12==1.11.1.6
nvidia-curand-cu12==10.3.7.77
nvidia-cusolver-cu12==11.7.1.2
nvidia-cusparse-cu12==12.5.4.2
nvidia-cusparselt-cu12==0.6.3
nvidia-nccl-cu12==2.26.2
nvidia-nvjitlink-cu12==12.6.85
nvidia-nvtx-cu12==12.6.77
opencv-python==4.11.0.86
openpyxl==3.1.5
opt_einsum==3.4.0
optree==0.15.0
orjson==3.10.16
packaging==24.2
pandas==2.2.3
pillow==11.1.0
pillow_heif==1.0.0
preshed==3.0.10
propcache==0.3.1
protobuf==5.29.4
psutil==7.0.0
pyarrow==19.0.1
pydantic
pydantic_core
pydub==0.25.1
Pygments==2.19.1
PySocks==1.7.1
pythainlp==5.1.2
python-dateutil==2.9.0.post0
python-dotenv==1.1.0
python-multipart==0.0.20
pytz==2025.2
pyuca==1.2
PyYAML==6.0.2
qwen-vl-utils==0.0.8
regex==2024.11.6
requests==2.32.3
retina-face==0.0.17
rich==14.0.0
ruff==0.11.4
safehttpx==0.1.6
safetensors==0.5.3
semantic-version==2.10.0
shellingham==1.5.4
six==1.17.0
smart_open==7.3.0.post1
sniffio==1.3.1
soupsieve==2.6
spacy==3.8.7
spacy-legacy==3.0.12
spacy-loggers==1.0.5
spacy-thai==0.7.8
spacy-udpipe==1.0.0
srsly==2.5.1
starlette==0.46.1
sympy==1.14.0
tensorboard==2.19.0
tensorboard-data-server==0.7.2
tensorflow==2.19.0
tensorflow-io-gcs-filesystem==0.37.1
termcolor==3.0.1
tf_keras==2.19.0
thinc==8.3.6
timm==1.0.19
tokenizers==0.21.2
tomlkit==0.13.2
torch==2.7.1
https://github.com/Dao-AILab/flash-attention/releases/download/v2.8.3/flash_attn-2.8.3+cu12torch2.7cxx11abiTRUE-cp310-cp310-linux_x86_64.whl
torchao==0.13.0
torchvision==0.22.1
tqdm==4.67.1
transformers==4.53.3
triton==3.3.1
typer==0.15.2
typing-inspection
typing_extensions
tzdata==2025.2
ufal.udpipe==1.3.1.1
urllib3==2.3.0
uvicorn==0.34.0
wasabi==1.1.3
weasel==0.4.1
websockets==15.0.1
Werkzeug==3.1.3
wrapt==1.17.2
xxhash==3.5.0
yarl==1.19.0
supabase==2.18.1
supabase_auth==2.12.3
supabase_functions==0.10.1
# flash_attn==2.8.1