Qwen3-Ollama-Small / Dockerfile
khoipm08's picture
Update Dockerfile
9f17288 verified
raw
history blame contribute delete
899 Bytes
# Use official Python image as base
FROM python:3.9
# Install system dependencies and Ollama
RUN apt-get update && apt-get install -y \
curl \
&& rm -rf /var/lib/apt/lists/*
# Install Ollama
RUN curl -fsSL https://ollama.ai/install.sh | sh
# Create user
RUN useradd -m -u 1000 user
USER user
ENV PATH="/home/user/.local/bin:$PATH"
# Set working directory
WORKDIR /app
# Copy requirements and install Python dependencies
COPY requirements.txt .
RUN pip install --user --no-cache-dir -r requirements.txt
# Copy application files
COPY app.py .
# Expose port 7860 (required for HF Spaces)
EXPOSE 7860
# Start Ollama server, pull models, then start Flask app
CMD bash -c "\
echo 'Starting Ollama server...' && \
export OLLAMA_KEEP_ALIVE=-1 && \
ollama serve & \
sleep 5 && \
ollama pull qwen3:8b && \
echo 'Start Flask application' && \
python3 app.py \
"