URGENT: CVE's/vulns Present, Update Dependencies

#155
Files changed (1) hide show
  1. Dockerfile +8 -8
Dockerfile CHANGED
@@ -1,4 +1,4 @@
1
- FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
2
 
3
  ENV DEBIAN_FRONTEND=noninteractive
4
  RUN apt-get update && \
@@ -23,23 +23,24 @@ RUN apt-get update && \
23
  libxmlsec1-dev \
24
  libffi-dev \
25
  liblzma-dev \
26
- ffmpeg \
27
- nvidia-driver-515
28
 
29
- RUN useradd -m -u 1000 user
30
- USER user
 
31
  ENV HOME=/home/user \
32
  PATH=/home/user/.local/bin:${PATH}
33
  WORKDIR ${HOME}/app
34
 
35
  RUN curl https://pyenv.run | bash
36
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
37
- ARG PYTHON_VERSION=3.10.13
38
  RUN pyenv install ${PYTHON_VERSION} && \
39
  pyenv global ${PYTHON_VERSION} && \
40
  pyenv rehash && \
41
  pip install --no-cache-dir -U pip setuptools wheel && \
42
- pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
43
 
44
  COPY --chown=1000 . ${HOME}/app
45
  RUN git clone https://github.com/ggerganov/llama.cpp
@@ -60,6 +61,5 @@ ENV PYTHONPATH=${HOME}/app \
60
  LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
61
  PATH=/usr/local/nvidia/bin:${PATH}
62
 
63
-
64
  ENTRYPOINT /bin/bash start.sh
65
 
 
1
+ FROM nvidia/cuda:12.8.0-cudnn-devel-ubuntu24.04
2
 
3
  ENV DEBIAN_FRONTEND=noninteractive
4
  RUN apt-get update && \
 
23
  libxmlsec1-dev \
24
  libffi-dev \
25
  liblzma-dev \
26
+ ffmpeg \
27
+ nvidia-driver-570
28
 
29
+ # Check if user with UID 1000 exists, if not create it
30
+ RUN id -u 1000 &>/dev/null || useradd -m -u 1000 user
31
+ USER 1000
32
  ENV HOME=/home/user \
33
  PATH=/home/user/.local/bin:${PATH}
34
  WORKDIR ${HOME}/app
35
 
36
  RUN curl https://pyenv.run | bash
37
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
38
+ ARG PYTHON_VERSION=3.11
39
  RUN pyenv install ${PYTHON_VERSION} && \
40
  pyenv global ${PYTHON_VERSION} && \
41
  pyenv rehash && \
42
  pip install --no-cache-dir -U pip setuptools wheel && \
43
+ pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.8" "APScheduler"
44
 
45
  COPY --chown=1000 . ${HOME}/app
46
  RUN git clone https://github.com/ggerganov/llama.cpp
 
61
  LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
62
  PATH=/usr/local/nvidia/bin:${PATH}
63
 
 
64
  ENTRYPOINT /bin/bash start.sh
65