# Use an official Python runtime as a parent image FROM python:3.9-slim # Set environment variables for Hugging Face cache ENV TRANSFORMERS_CACHE=/app/.cache ENV HF_DATASETS_CACHE=/app/.cache # Create the cache directory RUN mkdir -p /app/.cache # Set the working directory in the container WORKDIR /app # Install system dependencies (required for soundfile and other libraries) RUN apt-get update && apt-get install -y \ libsndfile1 \ && rm -rf /var/lib/apt/lists/* # Copy the requirements file into the container COPY requirements.txt . # Install Python dependencies RUN pip install --no-cache-dir -r requirements.txt # Copy the current directory contents into the container COPY . . # Pre-download the Whisper model during the build process RUN python -c "from transformers import WhisperProcessor, WhisperForConditionalGeneration; \ WhisperProcessor.from_pretrained('openai/whisper-large-v3', cache_dir='/app/.cache'); \ WhisperForConditionalGeneration.from_pretrained('openai/whisper-large-v3', cache_dir='/app/.cache')" # Expose port 7860 (default port for Gradio) EXPOSE 7860 # Run the app when the container launches CMD ["python", "app.py"]