# Hugging Face TGI image
FROM ghcr.io/huggingface/text-generation-inference:3.0.2

# Set working directory
WORKDIR /app

# Create the /data directory inside the container
RUN mkdir -p /data
RUN chmod 777 /data
RUN mkdir -p /.cache
RUN chmod 777 /.cache
RUN mkdir -p /.triton
RUN chmod 777 /.triton

# Expose the model on port 8080
EXPOSE 8080

# Set the Hugging Face token as an environment variable
ARG HF_TOKEN
ENV HF_TOKEN=${HF_TOKEN}

# Run the TGI server directly
CMD ["--model-id", "meta-llama/Llama-3.2-1B-Instruct", "--port", "8080"]