x402-fast-payout-demo / Dockerfile
hamel88's picture
Upload Dockerfile
235c0c6 verified
# Use Python 3.11 slim image for better compatibility with HF Spaces
FROM python:3.11-slim
# Set working directory
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
curl \
build-essential \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements first for better Docker layer caching
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir --upgrade pip
RUN pip install --no-cache-dir -r requirements.txt
# Copy application files
COPY . .
# Create directories for models and cache
RUN mkdir -p /app/cache /app/models
# Set environment variables for HF Spaces
ENV PYTHONPATH=/app
ENV PYTHONUNBUFFERED=1
ENV HF_HOME=/app/cache
ENV TRANSFORMERS_CACHE=/app/cache
ENV TORCH_HOME=/app/cache
# Pre-download models to reduce startup time
RUN python -c "\
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, AutoModelForSequenceClassification; \
import torch; \
print('πŸ“¦ Pre-downloading DistilGPT-2...'); \
tokenizer = AutoTokenizer.from_pretrained('distilgpt2'); \
model = AutoModelForCausalLM.from_pretrained('distilgpt2'); \
print('πŸ“¦ Pre-downloading RoBERTa sentiment model...'); \
sentiment_model = AutoModelForSequenceClassification.from_pretrained('cardiffnlp/twitter-roberta-base-sentiment-latest'); \
sentiment_tokenizer = AutoTokenizer.from_pretrained('cardiffnlp/twitter-roberta-base-sentiment-latest'); \
print('βœ… Models downloaded successfully!')"
# Expose port 7860 (HF Spaces default)
EXPOSE 7860
# Health check
HEALTHCHECK --interval=30s --timeout=30s --start-period=60s --retries=3 \
CMD curl -f http://localhost:7860/health || exit 1
# Run the application
CMD ["python", "app.py"]