Spaces:
Running
Running
# This file was autogenerated by uv via the following command: | |
# uv pip compile pyproject.toml -o requirements.txt | |
absl-py==2.1.0 | |
# via mesop | |
aiohappyeyeballs==2.4.0 | |
# via aiohttp | |
aiohttp==3.10.11 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
aiosignal==1.3.1 | |
# via aiohttp | |
annotated-types==0.7.0 | |
# via pydantic | |
anyio==4.4.0 | |
# via | |
# httpx | |
# openai | |
async-timeout==4.0.3 | |
# via aiohttp | |
attrs==24.2.0 | |
# via aiohttp | |
beautifulsoup4==4.12.3 | |
# via llama-index-readers-file | |
blinker==1.8.2 | |
# via flask | |
bm25s==0.1.10 | |
# via llama-index-retrievers-bm25 | |
cachetools==5.5.0 | |
# via google-auth | |
certifi==2024.8.30 | |
# via | |
# httpcore | |
# httpx | |
# requests | |
charset-normalizer==3.3.2 | |
# via requests | |
click==8.1.7 | |
# via | |
# flask | |
# nltk | |
dataclasses-json==0.6.7 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
deepdiff==6.7.1 | |
# via mesop | |
deprecated==1.2.14 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
dirtyjson==1.0.8 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
distro==1.9.0 | |
# via openai | |
exceptiongroup==1.2.2 | |
# via anyio | |
flask==3.0.3 | |
# via mesop | |
frozenlist==1.4.1 | |
# via | |
# aiohttp | |
# aiosignal | |
fsspec==2024.6.1 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
google-ai-generativelanguage==0.6.4 | |
# via google-generativeai | |
google-api-core==2.19.1 | |
# via | |
# google-ai-generativelanguage | |
# google-api-python-client | |
# google-generativeai | |
google-api-python-client==2.142.0 | |
# via google-generativeai | |
google-auth==2.34.0 | |
# via | |
# google-ai-generativelanguage | |
# google-api-core | |
# google-api-python-client | |
# google-auth-httplib2 | |
# google-generativeai | |
google-auth-httplib2==0.2.0 | |
# via google-api-python-client | |
google-generativeai==0.5.4 | |
# via | |
# docbot (pyproject.toml) | |
# llama-index-embeddings-google | |
# llama-index-llms-gemini | |
googleapis-common-protos==1.63.2 | |
# via | |
# google-api-core | |
# grpcio-status | |
greenlet==3.0.3 | |
# via sqlalchemy | |
grpcio==1.66.0 | |
# via | |
# google-api-core | |
# grpcio-status | |
grpcio-status==1.62.3 | |
# via google-api-core | |
gunicorn==23.0.0 | |
# via docbot (pyproject.toml) | |
h11==0.14.0 | |
# via httpcore | |
httpcore==1.0.5 | |
# via httpx | |
httplib2==0.22.0 | |
# via | |
# google-api-python-client | |
# google-auth-httplib2 | |
httpx==0.27.0 | |
# via | |
# llama-cloud | |
# llama-index-core | |
# llama-index-legacy | |
# openai | |
idna==3.8 | |
# via | |
# anyio | |
# httpx | |
# requests | |
# yarl | |
itsdangerous==2.2.0 | |
# via flask | |
jinja2==3.1.4 | |
# via flask | |
jiter==0.5.0 | |
# via openai | |
joblib==1.4.2 | |
# via nltk | |
llama-cloud==0.0.15 | |
# via llama-index-indices-managed-llama-cloud | |
llama-index==0.10.68 | |
# via docbot (pyproject.toml) | |
llama-index-agent-openai==0.2.9 | |
# via | |
# llama-index | |
# llama-index-program-openai | |
llama-index-cli==0.1.13 | |
# via llama-index | |
llama-index-core==0.10.68.post1 | |
# via | |
# llama-index | |
# llama-index-agent-openai | |
# llama-index-cli | |
# llama-index-embeddings-google | |
# llama-index-embeddings-openai | |
# llama-index-indices-managed-llama-cloud | |
# llama-index-llms-gemini | |
# llama-index-llms-openai | |
# llama-index-multi-modal-llms-openai | |
# llama-index-program-openai | |
# llama-index-question-gen-openai | |
# llama-index-readers-file | |
# llama-index-readers-llama-parse | |
# llama-index-retrievers-bm25 | |
# llama-parse | |
llama-index-embeddings-google==0.1.6 | |
# via docbot (pyproject.toml) | |
llama-index-embeddings-openai==0.1.11 | |
# via | |
# llama-index | |
# llama-index-cli | |
llama-index-indices-managed-llama-cloud==0.2.7 | |
# via llama-index | |
llama-index-legacy==0.9.48.post3 | |
# via llama-index | |
llama-index-llms-gemini==0.2.0 | |
# via docbot (pyproject.toml) | |
llama-index-llms-openai==0.1.31 | |
# via | |
# llama-index | |
# llama-index-agent-openai | |
# llama-index-cli | |
# llama-index-multi-modal-llms-openai | |
# llama-index-program-openai | |
# llama-index-question-gen-openai | |
llama-index-multi-modal-llms-openai==0.1.9 | |
# via llama-index | |
llama-index-program-openai==0.1.7 | |
# via | |
# llama-index | |
# llama-index-question-gen-openai | |
llama-index-question-gen-openai==0.1.3 | |
# via llama-index | |
llama-index-readers-file==0.1.33 | |
# via llama-index | |
llama-index-readers-llama-parse==0.1.6 | |
# via llama-index | |
llama-index-retrievers-bm25==0.2.2 | |
# via docbot (pyproject.toml) | |
llama-parse==0.4.9 | |
# via llama-index-readers-llama-parse | |
markupsafe==2.1.5 | |
# via | |
# jinja2 | |
# werkzeug | |
marshmallow==3.22.0 | |
# via dataclasses-json | |
mesop==1.0.0 | |
# via docbot (pyproject.toml) | |
msgpack==1.0.8 | |
# via mesop | |
multidict==6.0.5 | |
# via | |
# aiohttp | |
# yarl | |
mypy-extensions==1.0.0 | |
# via typing-inspect | |
nest-asyncio==1.6.0 | |
# via | |
# docbot (pyproject.toml) | |
# llama-index-core | |
# llama-index-legacy | |
networkx==3.3 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
nltk==3.9.1 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
numpy==1.26.4 | |
# via | |
# bm25s | |
# llama-index-core | |
# llama-index-legacy | |
# pandas | |
# scipy | |
openai==1.42.0 | |
# via | |
# llama-index-agent-openai | |
# llama-index-legacy | |
# llama-index-llms-openai | |
ordered-set==4.1.0 | |
# via deepdiff | |
packaging==24.1 | |
# via | |
# gunicorn | |
# marshmallow | |
pandas==2.2.2 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
pillow==10.4.0 | |
# via | |
# llama-index-core | |
# llama-index-llms-gemini | |
propcache==0.3.0 | |
# via yarl | |
proto-plus==1.24.0 | |
# via | |
# google-ai-generativelanguage | |
# google-api-core | |
protobuf==4.25.4 | |
# via | |
# google-ai-generativelanguage | |
# google-api-core | |
# google-generativeai | |
# googleapis-common-protos | |
# grpcio-status | |
# mesop | |
# proto-plus | |
pyasn1==0.6.0 | |
# via | |
# pyasn1-modules | |
# rsa | |
pyasn1-modules==0.4.0 | |
# via google-auth | |
pydantic==2.8.2 | |
# via | |
# google-generativeai | |
# llama-cloud | |
# llama-index-core | |
# mesop | |
# openai | |
pydantic-core==2.20.1 | |
# via pydantic | |
pyparsing==3.1.4 | |
# via httplib2 | |
pypdf==4.3.1 | |
# via llama-index-readers-file | |
pystemmer==2.2.0.1 | |
# via llama-index-retrievers-bm25 | |
python-dateutil==2.9.0.post0 | |
# via pandas | |
python-dotenv==1.0.1 | |
# via mesop | |
pytz==2024.1 | |
# via pandas | |
pyyaml==6.0.2 | |
# via llama-index-core | |
regex==2024.7.24 | |
# via | |
# nltk | |
# tiktoken | |
requests==2.32.3 | |
# via | |
# google-api-core | |
# llama-index-core | |
# llama-index-legacy | |
# tiktoken | |
rsa==4.9 | |
# via google-auth | |
scipy==1.14.1 | |
# via bm25s | |
six==1.16.0 | |
# via python-dateutil | |
sniffio==1.3.1 | |
# via | |
# anyio | |
# httpx | |
# openai | |
soupsieve==2.6 | |
# via beautifulsoup4 | |
sqlalchemy==2.0.32 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
striprtf==0.0.26 | |
# via llama-index-readers-file | |
tenacity==8.5.0 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
tiktoken==0.7.0 | |
# via | |
# llama-index-core | |
# llama-index-legacy | |
tqdm==4.66.5 | |
# via | |
# google-generativeai | |
# llama-index-core | |
# nltk | |
# openai | |
typing-extensions==4.12.2 | |
# via | |
# anyio | |
# google-generativeai | |
# llama-index-core | |
# llama-index-legacy | |
# openai | |
# pydantic | |
# pydantic-core | |
# pypdf | |
# sqlalchemy | |
# typing-inspect | |
typing-inspect==0.9.0 | |
# via | |
# dataclasses-json | |
# llama-index-core | |
# llama-index-legacy | |
tzdata==2024.1 | |
# via pandas | |
uritemplate==4.1.1 | |
# via google-api-python-client | |
urllib3==2.2.2 | |
# via requests | |
watchdog==4.0.2 | |
# via mesop | |
werkzeug==3.0.6 | |
# via | |
# flask | |
# mesop | |
wrapt==1.16.0 | |
# via | |
# deprecated | |
# llama-index-core | |
yarl==1.18.3 | |
# via aiohttp | |