File size: 3,162 Bytes
6d27d90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
services:

  #-----------------------------------
  #---- Private-GPT services ---------
  #-----------------------------------

  # Private-GPT service for the Ollama CPU and GPU modes
  # This service builds from an external Dockerfile and runs the Ollama mode.
  private-gpt-ollama:
    image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-ollama  # x-release-please-version
    build:
      context: .
      dockerfile: Dockerfile.ollama
    volumes:
      - ./local_data/:/home/worker/app/local_data
    ports:
      - "8001:8001"
    environment:
      PORT: 8001
      PGPT_PROFILES: docker
      PGPT_MODE: ollama
      PGPT_EMBED_MODE: ollama
      PGPT_OLLAMA_API_BASE: http://ollama:11434
      HF_TOKEN: ${HF_TOKEN:-}
    profiles:
      - ""
      - ollama-cpu
      - ollama-cuda
      - ollama-api

  # Private-GPT service for the local mode
  # This service builds from a local Dockerfile and runs the application in local mode.
  private-gpt-llamacpp-cpu:
    image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-llamacpp-cpu # x-release-please-version
    build:
      context: .
      dockerfile: Dockerfile.llamacpp-cpu
    volumes:
      - ./local_data/:/home/worker/app/local_data
      - ./models/:/home/worker/app/models
    entrypoint: sh -c ".venv/bin/python scripts/setup && .venv/bin/python -m private_gpt"
    ports:
      - "8001:8001"
    environment:
      PORT: 8001
      PGPT_PROFILES: local
      HF_TOKEN: ${HF_TOKEN}
    profiles:
      - llamacpp-cpu

  #-----------------------------------
  #---- Ollama services --------------
  #-----------------------------------

  # Traefik reverse proxy for the Ollama service
  # This will route requests to the Ollama service based on the profile.
  ollama:
    image: traefik:v2.10
    ports:
      - "8081:8080"
    command:
      - "--providers.file.filename=/etc/router.yml"
      - "--log.level=ERROR"
      - "--api.insecure=true"
      - "--providers.docker=true"
      - "--providers.docker.exposedbydefault=false"
      - "--entrypoints.web.address=:11434"
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - ./.docker/router.yml:/etc/router.yml:ro
    extra_hosts:
      - "host.docker.internal:host-gateway"
    profiles:
      - ""
      - ollama-cpu
      - ollama-cuda
      - ollama-api

  # Ollama service for the CPU mode
  ollama-cpu:
    image: ollama/ollama:latest
    volumes:
      - ./models:/root/.ollama
    profiles:
      - ""
      - ollama-cpu

  # Ollama service for the CUDA mode
  ollama-cuda:
    image: ollama/ollama:latest
    volumes:
      - ./models:/root/.ollama
    deploy:
      resources:
        reservations:
          devices:
            - driver: nvidia
              count: 1
              capabilities: [gpu]
    profiles:
      - ollama-cuda
  
  openai:
    image: 3x3cut0r/privategpt:latest
    build:
      context: .
      dockerfile: Dockerfile.openai
    container_name: privategpt
    ports:
      - 8080:8080/tcp
    environment:
      OPENAI_API_KEY: ${OPENAI_API_KEY}
      OPENAI_API_BASE: https://api.openai.com/v1
      OPENAI_MODEL: gpt-4o-mini
      OPENAI_TEMPERATURE: 0.5