CatPtain commited on
Commit
31431b3
·
verified ·
1 Parent(s): 5fefff9

Upload 30 files

Browse files
docker/.env.example ADDED
@@ -0,0 +1,938 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------
2
+ # Environment Variables for API service & worker
3
+ # ------------------------------
4
+
5
+ # ------------------------------
6
+ # Common Variables
7
+ # ------------------------------
8
+
9
+ # The backend URL of the console API,
10
+ # used to concatenate the authorization callback.
11
+ # If empty, it is the same domain.
12
+ # Example: https://api.console.dify.ai
13
+ CONSOLE_API_URL=
14
+
15
+ # The front-end URL of the console web,
16
+ # used to concatenate some front-end addresses and for CORS configuration use.
17
+ # If empty, it is the same domain.
18
+ # Example: https://console.dify.ai
19
+ CONSOLE_WEB_URL=
20
+
21
+ # Service API Url,
22
+ # used to display Service API Base Url to the front-end.
23
+ # If empty, it is the same domain.
24
+ # Example: https://api.dify.ai
25
+ SERVICE_API_URL=
26
+
27
+ # WebApp API backend Url,
28
+ # used to declare the back-end URL for the front-end API.
29
+ # If empty, it is the same domain.
30
+ # Example: https://api.app.dify.ai
31
+ APP_API_URL=
32
+
33
+ # WebApp Url,
34
+ # used to display WebAPP API Base Url to the front-end.
35
+ # If empty, it is the same domain.
36
+ # Example: https://app.dify.ai
37
+ APP_WEB_URL=
38
+
39
+ # File preview or download Url prefix.
40
+ # used to display File preview or download Url to the front-end or as Multi-model inputs;
41
+ # Url is signed and has expiration time.
42
+ FILES_URL=
43
+
44
+ # ------------------------------
45
+ # Server Configuration
46
+ # ------------------------------
47
+
48
+ # The log level for the application.
49
+ # Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
50
+ LOG_LEVEL=INFO
51
+ # Log file path
52
+ LOG_FILE=/app/logs/server.log
53
+ # Log file max size, the unit is MB
54
+ LOG_FILE_MAX_SIZE=20
55
+ # Log file max backup count
56
+ LOG_FILE_BACKUP_COUNT=5
57
+ # Log dateformat
58
+ LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
59
+ # Log Timezone
60
+ LOG_TZ=UTC
61
+
62
+ # Debug mode, default is false.
63
+ # It is recommended to turn on this configuration for local development
64
+ # to prevent some problems caused by monkey patch.
65
+ DEBUG=false
66
+
67
+ # Flask debug mode, it can output trace information at the interface when turned on,
68
+ # which is convenient for debugging.
69
+ FLASK_DEBUG=false
70
+
71
+ # A secretkey that is used for securely signing the session cookie
72
+ # and encrypting sensitive information on the database.
73
+ # You can generate a strong key using `openssl rand -base64 42`.
74
+ SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
75
+
76
+ # Password for admin user initialization.
77
+ # If left unset, admin user will not be prompted for a password
78
+ # when creating the initial admin account.
79
+ # The length of the password cannot exceed 30 charactors.
80
+ INIT_PASSWORD=
81
+
82
+ # Deployment environment.
83
+ # Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`.
84
+ # Testing environment. There will be a distinct color label on the front-end page,
85
+ # indicating that this environment is a testing environment.
86
+ DEPLOY_ENV=PRODUCTION
87
+
88
+ # Whether to enable the version check policy.
89
+ # If set to empty, https://updates.dify.ai will be called for version check.
90
+ CHECK_UPDATE_URL=https://updates.dify.ai
91
+
92
+ # Used to change the OpenAI base address, default is https://api.openai.com/v1.
93
+ # When OpenAI cannot be accessed in China, replace it with a domestic mirror address,
94
+ # or when a local model provides OpenAI compatible API, it can be replaced.
95
+ OPENAI_API_BASE=https://api.openai.com/v1
96
+
97
+ # When enabled, migrations will be executed prior to application startup
98
+ # and the application will start after the migrations have completed.
99
+ MIGRATION_ENABLED=true
100
+
101
+ # File Access Time specifies a time interval in seconds for the file to be accessed.
102
+ # The default value is 300 seconds.
103
+ FILES_ACCESS_TIMEOUT=300
104
+
105
+ # Access token expiration time in minutes
106
+ ACCESS_TOKEN_EXPIRE_MINUTES=60
107
+
108
+ # Refresh token expiration time in days
109
+ REFRESH_TOKEN_EXPIRE_DAYS=30
110
+
111
+ # The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer.
112
+ APP_MAX_ACTIVE_REQUESTS=0
113
+ APP_MAX_EXECUTION_TIME=1200
114
+
115
+ # ------------------------------
116
+ # Container Startup Related Configuration
117
+ # Only effective when starting with docker image or docker-compose.
118
+ # ------------------------------
119
+
120
+ # API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed.
121
+ DIFY_BIND_ADDRESS=0.0.0.0
122
+
123
+ # API service binding port number, default 5001.
124
+ DIFY_PORT=5001
125
+
126
+ # The number of API server workers, i.e., the number of workers.
127
+ # Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent
128
+ # Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
129
+ SERVER_WORKER_AMOUNT=1
130
+
131
+ # Defaults to gevent. If using windows, it can be switched to sync or solo.
132
+ SERVER_WORKER_CLASS=gevent
133
+
134
+ # Default number of worker connections, the default is 10.
135
+ SERVER_WORKER_CONNECTIONS=10
136
+
137
+ # Similar to SERVER_WORKER_CLASS.
138
+ # If using windows, it can be switched to sync or solo.
139
+ CELERY_WORKER_CLASS=
140
+
141
+ # Request handling timeout. The default is 200,
142
+ # it is recommended to set it to 360 to support a longer sse connection time.
143
+ GUNICORN_TIMEOUT=360
144
+
145
+ # The number of Celery workers. The default is 1, and can be set as needed.
146
+ CELERY_WORKER_AMOUNT=
147
+
148
+ # Flag indicating whether to enable autoscaling of Celery workers.
149
+ #
150
+ # Autoscaling is useful when tasks are CPU intensive and can be dynamically
151
+ # allocated and deallocated based on the workload.
152
+ #
153
+ # When autoscaling is enabled, the maximum and minimum number of workers can
154
+ # be specified. The autoscaling algorithm will dynamically adjust the number
155
+ # of workers within the specified range.
156
+ #
157
+ # Default is false (i.e., autoscaling is disabled).
158
+ #
159
+ # Example:
160
+ # CELERY_AUTO_SCALE=true
161
+ CELERY_AUTO_SCALE=false
162
+
163
+ # The maximum number of Celery workers that can be autoscaled.
164
+ # This is optional and only used when autoscaling is enabled.
165
+ # Default is not set.
166
+ CELERY_MAX_WORKERS=
167
+
168
+ # The minimum number of Celery workers that can be autoscaled.
169
+ # This is optional and only used when autoscaling is enabled.
170
+ # Default is not set.
171
+ CELERY_MIN_WORKERS=
172
+
173
+ # API Tool configuration
174
+ API_TOOL_DEFAULT_CONNECT_TIMEOUT=10
175
+ API_TOOL_DEFAULT_READ_TIMEOUT=60
176
+
177
+
178
+ # ------------------------------
179
+ # Database Configuration
180
+ # The database uses PostgreSQL. Please use the public schema.
181
+ # It is consistent with the configuration in the 'db' service below.
182
+ # ------------------------------
183
+
184
+ DB_USERNAME=postgres
185
+ DB_PASSWORD=difyai123456
186
+ DB_HOST=db
187
+ DB_PORT=5432
188
+ DB_DATABASE=dify
189
+ # The size of the database connection pool.
190
+ # The default is 30 connections, which can be appropriately increased.
191
+ SQLALCHEMY_POOL_SIZE=30
192
+ # Database connection pool recycling time, the default is 3600 seconds.
193
+ SQLALCHEMY_POOL_RECYCLE=3600
194
+ # Whether to print SQL, default is false.
195
+ SQLALCHEMY_ECHO=false
196
+
197
+ # Maximum number of connections to the database
198
+ # Default is 100
199
+ #
200
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
201
+ POSTGRES_MAX_CONNECTIONS=100
202
+
203
+ # Sets the amount of shared memory used for postgres's shared buffers.
204
+ # Default is 128MB
205
+ # Recommended value: 25% of available memory
206
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
207
+ POSTGRES_SHARED_BUFFERS=128MB
208
+
209
+ # Sets the amount of memory used by each database worker for working space.
210
+ # Default is 4MB
211
+ #
212
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
213
+ POSTGRES_WORK_MEM=4MB
214
+
215
+ # Sets the amount of memory reserved for maintenance activities.
216
+ # Default is 64MB
217
+ #
218
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
219
+ POSTGRES_MAINTENANCE_WORK_MEM=64MB
220
+
221
+ # Sets the planner's assumption about the effective cache size.
222
+ # Default is 4096MB
223
+ #
224
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
225
+ POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
226
+
227
+ # ------------------------------
228
+ # Redis Configuration
229
+ # This Redis configuration is used for caching and for pub/sub during conversation.
230
+ # ------------------------------
231
+
232
+ REDIS_HOST=redis
233
+ REDIS_PORT=6379
234
+ REDIS_USERNAME=
235
+ REDIS_PASSWORD=difyai123456
236
+ REDIS_USE_SSL=false
237
+ REDIS_DB=0
238
+
239
+ # Whether to use Redis Sentinel mode.
240
+ # If set to true, the application will automatically discover and connect to the master node through Sentinel.
241
+ REDIS_USE_SENTINEL=false
242
+
243
+ # List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port.
244
+ # Format: `<sentinel1_ip>:<sentinel1_port>,<sentinel2_ip>:<sentinel2_port>,<sentinel3_ip>:<sentinel3_port>`
245
+ REDIS_SENTINELS=
246
+ REDIS_SENTINEL_SERVICE_NAME=
247
+ REDIS_SENTINEL_USERNAME=
248
+ REDIS_SENTINEL_PASSWORD=
249
+ REDIS_SENTINEL_SOCKET_TIMEOUT=0.1
250
+
251
+ # List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port.
252
+ # Format: `<Cluster1_ip>:<Cluster1_port>,<Cluster2_ip>:<Cluster2_port>,<Cluster3_ip>:<Cluster3_port>`
253
+ REDIS_USE_CLUSTERS=false
254
+ REDIS_CLUSTERS=
255
+ REDIS_CLUSTERS_PASSWORD=
256
+
257
+ # ------------------------------
258
+ # Celery Configuration
259
+ # ------------------------------
260
+
261
+ # Use redis as the broker, and redis db 1 for celery broker.
262
+ # Format as follows: `redis://<redis_username>:<redis_password>@<redis_host>:<redis_port>/<redis_database>`
263
+ # Example: redis://:difyai123456@redis:6379/1
264
+ # If use Redis Sentinel, format as follows: `sentinel://<sentinel_username>:<sentinel_password>@<sentinel_host>:<sentinel_port>/<redis_database>`
265
+ # Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1
266
+ CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1
267
+ BROKER_USE_SSL=false
268
+
269
+ # If you are using Redis Sentinel for high availability, configure the following settings.
270
+ CELERY_USE_SENTINEL=false
271
+ CELERY_SENTINEL_MASTER_NAME=
272
+ CELERY_SENTINEL_SOCKET_TIMEOUT=0.1
273
+
274
+ # ------------------------------
275
+ # CORS Configuration
276
+ # Used to set the front-end cross-domain access policy.
277
+ # ------------------------------
278
+
279
+ # Specifies the allowed origins for cross-origin requests to the Web API,
280
+ # e.g. https://dify.app or * for all origins.
281
+ WEB_API_CORS_ALLOW_ORIGINS=*
282
+
283
+ # Specifies the allowed origins for cross-origin requests to the console API,
284
+ # e.g. https://cloud.dify.ai or * for all origins.
285
+ CONSOLE_CORS_ALLOW_ORIGINS=*
286
+
287
+ # ------------------------------
288
+ # File Storage Configuration
289
+ # ------------------------------
290
+
291
+ # The type of storage to use for storing user files.
292
+ STORAGE_TYPE=opendal
293
+
294
+ # Apache OpenDAL Configuration
295
+ # The configuration for OpenDAL consists of the following format: OPENDAL_<SCHEME_NAME>_<CONFIG_NAME>.
296
+ # You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services.
297
+ # Dify will scan configurations starting with OPENDAL_<SCHEME_NAME> and automatically apply them.
298
+ # The scheme name for the OpenDAL storage.
299
+ OPENDAL_SCHEME=fs
300
+ # Configurations for OpenDAL Local File System.
301
+ OPENDAL_FS_ROOT=storage
302
+
303
+ # S3 Configuration
304
+ #
305
+ S3_ENDPOINT=
306
+ S3_REGION=us-east-1
307
+ S3_BUCKET_NAME=difyai
308
+ S3_ACCESS_KEY=
309
+ S3_SECRET_KEY=
310
+ # Whether to use AWS managed IAM roles for authenticating with the S3 service.
311
+ # If set to false, the access key and secret key must be provided.
312
+ S3_USE_AWS_MANAGED_IAM=false
313
+
314
+ # Azure Blob Configuration
315
+ #
316
+ AZURE_BLOB_ACCOUNT_NAME=difyai
317
+ AZURE_BLOB_ACCOUNT_KEY=difyai
318
+ AZURE_BLOB_CONTAINER_NAME=difyai-container
319
+ AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
320
+
321
+ # Google Storage Configuration
322
+ #
323
+ GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
324
+ GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=
325
+
326
+ # The Alibaba Cloud OSS configurations,
327
+ #
328
+ ALIYUN_OSS_BUCKET_NAME=your-bucket-name
329
+ ALIYUN_OSS_ACCESS_KEY=your-access-key
330
+ ALIYUN_OSS_SECRET_KEY=your-secret-key
331
+ ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com
332
+ ALIYUN_OSS_REGION=ap-southeast-1
333
+ ALIYUN_OSS_AUTH_VERSION=v4
334
+ # Don't start with '/'. OSS doesn't support leading slash in object names.
335
+ ALIYUN_OSS_PATH=your-path
336
+
337
+ # Tencent COS Configuration
338
+ #
339
+ TENCENT_COS_BUCKET_NAME=your-bucket-name
340
+ TENCENT_COS_SECRET_KEY=your-secret-key
341
+ TENCENT_COS_SECRET_ID=your-secret-id
342
+ TENCENT_COS_REGION=your-region
343
+ TENCENT_COS_SCHEME=your-scheme
344
+
345
+ # Oracle Storage Configuration
346
+ #
347
+ OCI_ENDPOINT=https://objectstorage.us-ashburn-1.oraclecloud.com
348
+ OCI_BUCKET_NAME=your-bucket-name
349
+ OCI_ACCESS_KEY=your-access-key
350
+ OCI_SECRET_KEY=your-secret-key
351
+ OCI_REGION=us-ashburn-1
352
+
353
+ # Huawei OBS Configuration
354
+ #
355
+ HUAWEI_OBS_BUCKET_NAME=your-bucket-name
356
+ HUAWEI_OBS_SECRET_KEY=your-secret-key
357
+ HUAWEI_OBS_ACCESS_KEY=your-access-key
358
+ HUAWEI_OBS_SERVER=your-server-url
359
+
360
+ # Volcengine TOS Configuration
361
+ #
362
+ VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name
363
+ VOLCENGINE_TOS_SECRET_KEY=your-secret-key
364
+ VOLCENGINE_TOS_ACCESS_KEY=your-access-key
365
+ VOLCENGINE_TOS_ENDPOINT=your-server-url
366
+ VOLCENGINE_TOS_REGION=your-region
367
+
368
+ # Baidu OBS Storage Configuration
369
+ #
370
+ BAIDU_OBS_BUCKET_NAME=your-bucket-name
371
+ BAIDU_OBS_SECRET_KEY=your-secret-key
372
+ BAIDU_OBS_ACCESS_KEY=your-access-key
373
+ BAIDU_OBS_ENDPOINT=your-server-url
374
+
375
+ # Supabase Storage Configuration
376
+ #
377
+ SUPABASE_BUCKET_NAME=your-bucket-name
378
+ SUPABASE_API_KEY=your-access-key
379
+ SUPABASE_URL=your-server-url
380
+
381
+ # ------------------------------
382
+ # Vector Database Configuration
383
+ # ------------------------------
384
+
385
+ # The type of vector store to use.
386
+ # Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`.
387
+ VECTOR_STORE=weaviate
388
+
389
+ # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
390
+ WEAVIATE_ENDPOINT=http://weaviate:8080
391
+ WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
392
+
393
+ # The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
394
+ QDRANT_URL=http://qdrant:6333
395
+ QDRANT_API_KEY=difyai123456
396
+ QDRANT_CLIENT_TIMEOUT=20
397
+ QDRANT_GRPC_ENABLED=false
398
+ QDRANT_GRPC_PORT=6334
399
+
400
+ # Milvus configuration Only available when VECTOR_STORE is `milvus`.
401
+ # The milvus uri.
402
+ MILVUS_URI=http://127.0.0.1:19530
403
+ MILVUS_TOKEN=
404
+ MILVUS_USER=root
405
+ MILVUS_PASSWORD=Milvus
406
+ MILVUS_ENABLE_HYBRID_SEARCH=False
407
+
408
+ # MyScale configuration, only available when VECTOR_STORE is `myscale`
409
+ # For multi-language support, please set MYSCALE_FTS_PARAMS with referring to:
410
+ # https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters
411
+ MYSCALE_HOST=myscale
412
+ MYSCALE_PORT=8123
413
+ MYSCALE_USER=default
414
+ MYSCALE_PASSWORD=
415
+ MYSCALE_DATABASE=dify
416
+ MYSCALE_FTS_PARAMS=
417
+
418
+ # Couchbase configurations, only available when VECTOR_STORE is `couchbase`
419
+ # The connection string must include hostname defined in the docker-compose file (couchbase-server in this case)
420
+ COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server
421
+ COUCHBASE_USER=Administrator
422
+ COUCHBASE_PASSWORD=password
423
+ COUCHBASE_BUCKET_NAME=Embeddings
424
+ COUCHBASE_SCOPE_NAME=_default
425
+
426
+ # pgvector configurations, only available when VECTOR_STORE is `pgvector`
427
+ PGVECTOR_HOST=pgvector
428
+ PGVECTOR_PORT=5432
429
+ PGVECTOR_USER=postgres
430
+ PGVECTOR_PASSWORD=difyai123456
431
+ PGVECTOR_DATABASE=dify
432
+ PGVECTOR_MIN_CONNECTION=1
433
+ PGVECTOR_MAX_CONNECTION=5
434
+
435
+ # pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs`
436
+ PGVECTO_RS_HOST=pgvecto-rs
437
+ PGVECTO_RS_PORT=5432
438
+ PGVECTO_RS_USER=postgres
439
+ PGVECTO_RS_PASSWORD=difyai123456
440
+ PGVECTO_RS_DATABASE=dify
441
+
442
+ # analyticdb configurations, only available when VECTOR_STORE is `analyticdb`
443
+ ANALYTICDB_KEY_ID=your-ak
444
+ ANALYTICDB_KEY_SECRET=your-sk
445
+ ANALYTICDB_REGION_ID=cn-hangzhou
446
+ ANALYTICDB_INSTANCE_ID=gp-ab123456
447
+ ANALYTICDB_ACCOUNT=testaccount
448
+ ANALYTICDB_PASSWORD=testpassword
449
+ ANALYTICDB_NAMESPACE=dify
450
+ ANALYTICDB_NAMESPACE_PASSWORD=difypassword
451
+ ANALYTICDB_HOST=gp-test.aliyuncs.com
452
+ ANALYTICDB_PORT=5432
453
+ ANALYTICDB_MIN_CONNECTION=1
454
+ ANALYTICDB_MAX_CONNECTION=5
455
+
456
+ # TiDB vector configurations, only available when VECTOR_STORE is `tidb`
457
+ TIDB_VECTOR_HOST=tidb
458
+ TIDB_VECTOR_PORT=4000
459
+ TIDB_VECTOR_USER=
460
+ TIDB_VECTOR_PASSWORD=
461
+ TIDB_VECTOR_DATABASE=dify
462
+
463
+ # Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant`
464
+ TIDB_ON_QDRANT_URL=http://127.0.0.1
465
+ TIDB_ON_QDRANT_API_KEY=dify
466
+ TIDB_ON_QDRANT_CLIENT_TIMEOUT=20
467
+ TIDB_ON_QDRANT_GRPC_ENABLED=false
468
+ TIDB_ON_QDRANT_GRPC_PORT=6334
469
+ TIDB_PUBLIC_KEY=dify
470
+ TIDB_PRIVATE_KEY=dify
471
+ TIDB_API_URL=http://127.0.0.1
472
+ TIDB_IAM_API_URL=http://127.0.0.1
473
+ TIDB_REGION=regions/aws-us-east-1
474
+ TIDB_PROJECT_ID=dify
475
+ TIDB_SPEND_LIMIT=100
476
+
477
+ # Chroma configuration, only available when VECTOR_STORE is `chroma`
478
+ CHROMA_HOST=127.0.0.1
479
+ CHROMA_PORT=8000
480
+ CHROMA_TENANT=default_tenant
481
+ CHROMA_DATABASE=default_database
482
+ CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider
483
+ CHROMA_AUTH_CREDENTIALS=
484
+
485
+ # Oracle configuration, only available when VECTOR_STORE is `oracle`
486
+ ORACLE_HOST=oracle
487
+ ORACLE_PORT=1521
488
+ ORACLE_USER=dify
489
+ ORACLE_PASSWORD=dify
490
+ ORACLE_DATABASE=FREEPDB1
491
+
492
+ # relyt configurations, only available when VECTOR_STORE is `relyt`
493
+ RELYT_HOST=db
494
+ RELYT_PORT=5432
495
+ RELYT_USER=postgres
496
+ RELYT_PASSWORD=difyai123456
497
+ RELYT_DATABASE=postgres
498
+
499
+ # open search configuration, only available when VECTOR_STORE is `opensearch`
500
+ OPENSEARCH_HOST=opensearch
501
+ OPENSEARCH_PORT=9200
502
+ OPENSEARCH_USER=admin
503
+ OPENSEARCH_PASSWORD=admin
504
+ OPENSEARCH_SECURE=true
505
+
506
+ # tencent vector configurations, only available when VECTOR_STORE is `tencent`
507
+ TENCENT_VECTOR_DB_URL=http://127.0.0.1
508
+ TENCENT_VECTOR_DB_API_KEY=dify
509
+ TENCENT_VECTOR_DB_TIMEOUT=30
510
+ TENCENT_VECTOR_DB_USERNAME=dify
511
+ TENCENT_VECTOR_DB_DATABASE=dify
512
+ TENCENT_VECTOR_DB_SHARD=1
513
+ TENCENT_VECTOR_DB_REPLICAS=2
514
+
515
+ # ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch`
516
+ ELASTICSEARCH_HOST=0.0.0.0
517
+ ELASTICSEARCH_PORT=9200
518
+ ELASTICSEARCH_USERNAME=elastic
519
+ ELASTICSEARCH_PASSWORD=elastic
520
+ KIBANA_PORT=5601
521
+
522
+ # baidu vector configurations, only available when VECTOR_STORE is `baidu`
523
+ BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287
524
+ BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000
525
+ BAIDU_VECTOR_DB_ACCOUNT=root
526
+ BAIDU_VECTOR_DB_API_KEY=dify
527
+ BAIDU_VECTOR_DB_DATABASE=dify
528
+ BAIDU_VECTOR_DB_SHARD=1
529
+ BAIDU_VECTOR_DB_REPLICAS=3
530
+
531
+ # VikingDB configurations, only available when VECTOR_STORE is `vikingdb`
532
+ VIKINGDB_ACCESS_KEY=your-ak
533
+ VIKINGDB_SECRET_KEY=your-sk
534
+ VIKINGDB_REGION=cn-shanghai
535
+ VIKINGDB_HOST=api-vikingdb.xxx.volces.com
536
+ VIKINGDB_SCHEMA=http
537
+ VIKINGDB_CONNECTION_TIMEOUT=30
538
+ VIKINGDB_SOCKET_TIMEOUT=30
539
+
540
+ # Lindorm configuration, only available when VECTOR_STORE is `lindorm`
541
+ LINDORM_URL=http://lindorm:30070
542
+ LINDORM_USERNAME=lindorm
543
+ LINDORM_PASSWORD=lindorm
544
+
545
+ # OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase`
546
+ OCEANBASE_VECTOR_HOST=oceanbase
547
+ OCEANBASE_VECTOR_PORT=2881
548
+ OCEANBASE_VECTOR_USER=root@test
549
+ OCEANBASE_VECTOR_PASSWORD=difyai123456
550
+ OCEANBASE_VECTOR_DATABASE=test
551
+ OCEANBASE_CLUSTER_NAME=difyai
552
+ OCEANBASE_MEMORY_LIMIT=6G
553
+
554
+ # Upstash Vector configuration, only available when VECTOR_STORE is `upstash`
555
+ UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io
556
+ UPSTASH_VECTOR_TOKEN=dify
557
+
558
+ # ------------------------------
559
+ # Knowledge Configuration
560
+ # ------------------------------
561
+
562
+ # Upload file size limit, default 15M.
563
+ UPLOAD_FILE_SIZE_LIMIT=15
564
+
565
+ # The maximum number of files that can be uploaded at a time, default 5.
566
+ UPLOAD_FILE_BATCH_LIMIT=5
567
+
568
+ # ETL type, support: `dify`, `Unstructured`
569
+ # `dify` Dify's proprietary file extraction scheme
570
+ # `Unstructured` Unstructured.io file extraction scheme
571
+ ETL_TYPE=dify
572
+
573
+ # Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured
574
+ # Or using Unstructured for document extractor node for pptx.
575
+ # For example: http://unstructured:8000/general/v0/general
576
+ UNSTRUCTURED_API_URL=
577
+ UNSTRUCTURED_API_KEY=
578
+ SCARF_NO_ANALYTICS=true
579
+
580
+ # ------------------------------
581
+ # Model Configuration
582
+ # ------------------------------
583
+
584
+ # The maximum number of tokens allowed for prompt generation.
585
+ # This setting controls the upper limit of tokens that can be used by the LLM
586
+ # when generating a prompt in the prompt generation tool.
587
+ # Default: 512 tokens.
588
+ PROMPT_GENERATION_MAX_TOKENS=512
589
+
590
+ # The maximum number of tokens allowed for code generation.
591
+ # This setting controls the upper limit of tokens that can be used by the LLM
592
+ # when generating code in the code generation tool.
593
+ # Default: 1024 tokens.
594
+ CODE_GENERATION_MAX_TOKENS=1024
595
+
596
+ # ------------------------------
597
+ # Multi-modal Configuration
598
+ # ------------------------------
599
+
600
+ # The format of the image/video/audio/document sent when the multi-modal model is input,
601
+ # the default is base64, optional url.
602
+ # The delay of the call in url mode will be lower than that in base64 mode.
603
+ # It is generally recommended to use the more compatible base64 mode.
604
+ # If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document.
605
+ MULTIMODAL_SEND_FORMAT=base64
606
+ # Upload image file size limit, default 10M.
607
+ UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
608
+ # Upload video file size limit, default 100M.
609
+ UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
610
+ # Upload audio file size limit, default 50M.
611
+ UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
612
+
613
+ # ------------------------------
614
+ # Sentry Configuration
615
+ # Used for application monitoring and error log tracking.
616
+ # ------------------------------
617
+ SENTRY_DSN=
618
+
619
+ # API Service Sentry DSN address, default is empty, when empty,
620
+ # all monitoring information is not reported to Sentry.
621
+ # If not set, Sentry error reporting will be disabled.
622
+ API_SENTRY_DSN=
623
+ # API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%.
624
+ API_SENTRY_TRACES_SAMPLE_RATE=1.0
625
+ # API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%.
626
+ API_SENTRY_PROFILES_SAMPLE_RATE=1.0
627
+
628
+ # Web Service Sentry DSN address, default is empty, when empty,
629
+ # all monitoring information is not reported to Sentry.
630
+ # If not set, Sentry error reporting will be disabled.
631
+ WEB_SENTRY_DSN=
632
+
633
+ # ------------------------------
634
+ # Notion Integration Configuration
635
+ # Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations
636
+ # ------------------------------
637
+
638
+ # Configure as "public" or "internal".
639
+ # Since Notion's OAuth redirect URL only supports HTTPS,
640
+ # if deploying locally, please use Notion's internal integration.
641
+ NOTION_INTEGRATION_TYPE=public
642
+ # Notion OAuth client secret (used for public integration type)
643
+ NOTION_CLIENT_SECRET=
644
+ # Notion OAuth client id (used for public integration type)
645
+ NOTION_CLIENT_ID=
646
+ # Notion internal integration secret.
647
+ # If the value of NOTION_INTEGRATION_TYPE is "internal",
648
+ # you need to configure this variable.
649
+ NOTION_INTERNAL_SECRET=
650
+
651
+ # ------------------------------
652
+ # Mail related configuration
653
+ # ------------------------------
654
+
655
+ # Mail type, support: resend, smtp
656
+ MAIL_TYPE=resend
657
+
658
+ # Default send from email address, if not specified
659
+ MAIL_DEFAULT_SEND_FROM=
660
+
661
+ # API-Key for the Resend email provider, used when MAIL_TYPE is `resend`.
662
+ RESEND_API_URL=https://api.resend.com
663
+ RESEND_API_KEY=your-resend-api-key
664
+
665
+
666
+ # SMTP server configuration, used when MAIL_TYPE is `smtp`
667
+ SMTP_SERVER=
668
+ SMTP_PORT=465
669
+ SMTP_USERNAME=
670
+ SMTP_PASSWORD=
671
+ SMTP_USE_TLS=true
672
+ SMTP_OPPORTUNISTIC_TLS=false
673
+
674
+ # ------------------------------
675
+ # Others Configuration
676
+ # ------------------------------
677
+
678
+ # Maximum length of segmentation tokens for indexing
679
+ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
680
+
681
+ # Member invitation link valid time (hours),
682
+ # Default: 72.
683
+ INVITE_EXPIRY_HOURS=72
684
+
685
+ # Reset password token valid time (minutes),
686
+ RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
687
+
688
+ # The sandbox service endpoint.
689
+ CODE_EXECUTION_ENDPOINT=http://sandbox:8194
690
+ CODE_EXECUTION_API_KEY=dify-sandbox
691
+ CODE_MAX_NUMBER=9223372036854775807
692
+ CODE_MIN_NUMBER=-9223372036854775808
693
+ CODE_MAX_DEPTH=5
694
+ CODE_MAX_PRECISION=20
695
+ CODE_MAX_STRING_LENGTH=80000
696
+ CODE_MAX_STRING_ARRAY_LENGTH=30
697
+ CODE_MAX_OBJECT_ARRAY_LENGTH=30
698
+ CODE_MAX_NUMBER_ARRAY_LENGTH=1000
699
+ CODE_EXECUTION_CONNECT_TIMEOUT=10
700
+ CODE_EXECUTION_READ_TIMEOUT=60
701
+ CODE_EXECUTION_WRITE_TIMEOUT=10
702
+ TEMPLATE_TRANSFORM_MAX_LENGTH=80000
703
+
704
+ # Workflow runtime configuration
705
+ WORKFLOW_MAX_EXECUTION_STEPS=500
706
+ WORKFLOW_MAX_EXECUTION_TIME=1200
707
+ WORKFLOW_CALL_MAX_DEPTH=5
708
+ MAX_VARIABLE_SIZE=204800
709
+ WORKFLOW_PARALLEL_DEPTH_LIMIT=3
710
+ WORKFLOW_FILE_UPLOAD_LIMIT=10
711
+
712
+ # HTTP request node in workflow configuration
713
+ HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
714
+ HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
715
+
716
+ # SSRF Proxy server HTTP URL
717
+ SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128
718
+ # SSRF Proxy server HTTPS URL
719
+ SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128
720
+
721
+ # ------------------------------
722
+ # Environment Variables for web Service
723
+ # ------------------------------
724
+
725
+ # The timeout for the text generation in millisecond
726
+ TEXT_GENERATION_TIMEOUT_MS=60000
727
+
728
+ # ------------------------------
729
+ # Environment Variables for db Service
730
+ # ------------------------------
731
+
732
+ PGUSER=${DB_USERNAME}
733
+ # The password for the default postgres user.
734
+ POSTGRES_PASSWORD=${DB_PASSWORD}
735
+ # The name of the default postgres database.
736
+ POSTGRES_DB=${DB_DATABASE}
737
+ # postgres data directory
738
+ PGDATA=/var/lib/postgresql/data/pgdata
739
+
740
+ # ------------------------------
741
+ # Environment Variables for sandbox Service
742
+ # ------------------------------
743
+
744
+ # The API key for the sandbox service
745
+ SANDBOX_API_KEY=dify-sandbox
746
+ # The mode in which the Gin framework runs
747
+ SANDBOX_GIN_MODE=release
748
+ # The timeout for the worker in seconds
749
+ SANDBOX_WORKER_TIMEOUT=15
750
+ # Enable network for the sandbox service
751
+ SANDBOX_ENABLE_NETWORK=true
752
+ # HTTP proxy URL for SSRF protection
753
+ SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128
754
+ # HTTPS proxy URL for SSRF protection
755
+ SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128
756
+ # The port on which the sandbox service runs
757
+ SANDBOX_PORT=8194
758
+
759
+ # ------------------------------
760
+ # Environment Variables for weaviate Service
761
+ # (only used when VECTOR_STORE is weaviate)
762
+ # ------------------------------
763
+ WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate
764
+ WEAVIATE_QUERY_DEFAULTS_LIMIT=25
765
+ WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
766
+ WEAVIATE_DEFAULT_VECTORIZER_MODULE=none
767
+ WEAVIATE_CLUSTER_HOSTNAME=node1
768
+ WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true
769
+ WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
770
771
+ WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
772
773
+
774
+ # ------------------------------
775
+ # Environment Variables for Chroma
776
+ # (only used when VECTOR_STORE is chroma)
777
+ # ------------------------------
778
+
779
+ # Authentication credentials for Chroma server
780
+ CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456
781
+ # Authentication provider for Chroma server
782
+ CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider
783
+ # Persistence setting for Chroma server
784
+ CHROMA_IS_PERSISTENT=TRUE
785
+
786
+ # ------------------------------
787
+ # Environment Variables for Oracle Service
788
+ # (only used when VECTOR_STORE is Oracle)
789
+ # ------------------------------
790
+ ORACLE_PWD=Dify123456
791
+ ORACLE_CHARACTERSET=AL32UTF8
792
+
793
+ # ------------------------------
794
+ # Environment Variables for milvus Service
795
+ # (only used when VECTOR_STORE is milvus)
796
+ # ------------------------------
797
+ # ETCD configuration for auto compaction mode
798
+ ETCD_AUTO_COMPACTION_MODE=revision
799
+ # ETCD configuration for auto compaction retention in terms of number of revisions
800
+ ETCD_AUTO_COMPACTION_RETENTION=1000
801
+ # ETCD configuration for backend quota in bytes
802
+ ETCD_QUOTA_BACKEND_BYTES=4294967296
803
+ # ETCD configuration for the number of changes before triggering a snapshot
804
+ ETCD_SNAPSHOT_COUNT=50000
805
+ # MinIO access key for authentication
806
+ MINIO_ACCESS_KEY=minioadmin
807
+ # MinIO secret key for authentication
808
+ MINIO_SECRET_KEY=minioadmin
809
+ # ETCD service endpoints
810
+ ETCD_ENDPOINTS=etcd:2379
811
+ # MinIO service address
812
+ MINIO_ADDRESS=minio:9000
813
+ # Enable or disable security authorization
814
+ MILVUS_AUTHORIZATION_ENABLED=true
815
+
816
+ # ------------------------------
817
+ # Environment Variables for pgvector / pgvector-rs Service
818
+ # (only used when VECTOR_STORE is pgvector / pgvector-rs)
819
+ # ------------------------------
820
+ PGVECTOR_PGUSER=postgres
821
+ # The password for the default postgres user.
822
+ PGVECTOR_POSTGRES_PASSWORD=difyai123456
823
+ # The name of the default postgres database.
824
+ PGVECTOR_POSTGRES_DB=dify
825
+ # postgres data directory
826
+ PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata
827
+
828
+ # ------------------------------
829
+ # Environment Variables for opensearch
830
+ # (only used when VECTOR_STORE is opensearch)
831
+ # ------------------------------
832
+ OPENSEARCH_DISCOVERY_TYPE=single-node
833
+ OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true
834
+ OPENSEARCH_JAVA_OPTS_MIN=512m
835
+ OPENSEARCH_JAVA_OPTS_MAX=1024m
836
+ OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123
837
+ OPENSEARCH_MEMLOCK_SOFT=-1
838
+ OPENSEARCH_MEMLOCK_HARD=-1
839
+ OPENSEARCH_NOFILE_SOFT=65536
840
+ OPENSEARCH_NOFILE_HARD=65536
841
+
842
+ # ------------------------------
843
+ # Environment Variables for Nginx reverse proxy
844
+ # ------------------------------
845
+ NGINX_SERVER_NAME=_
846
+ NGINX_HTTPS_ENABLED=false
847
+ # HTTP port
848
+ NGINX_PORT=80
849
+ # SSL settings are only applied when HTTPS_ENABLED is true
850
+ NGINX_SSL_PORT=443
851
+ # if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
852
+ # and modify the env vars below accordingly.
853
+ NGINX_SSL_CERT_FILENAME=dify.crt
854
+ NGINX_SSL_CERT_KEY_FILENAME=dify.key
855
+ NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3
856
+
857
+ # Nginx performance tuning
858
+ NGINX_WORKER_PROCESSES=auto
859
+ NGINX_CLIENT_MAX_BODY_SIZE=15M
860
+ NGINX_KEEPALIVE_TIMEOUT=65
861
+
862
+ # Proxy settings
863
+ NGINX_PROXY_READ_TIMEOUT=3600s
864
+ NGINX_PROXY_SEND_TIMEOUT=3600s
865
+
866
+ # Set true to accept requests for /.well-known/acme-challenge/
867
+ NGINX_ENABLE_CERTBOT_CHALLENGE=false
868
+
869
+ # ------------------------------
870
+ # Certbot Configuration
871
+ # ------------------------------
872
+
873
+ # Email address (required to get certificates from Let's Encrypt)
874
875
+
876
+ # Domain name
877
+ CERTBOT_DOMAIN=your_domain.com
878
+
879
+ # certbot command options
880
+ # i.e: --force-renewal --dry-run --test-cert --debug
881
+ CERTBOT_OPTIONS=
882
+
883
+ # ------------------------------
884
+ # Environment Variables for SSRF Proxy
885
+ # ------------------------------
886
+ SSRF_HTTP_PORT=3128
887
+ SSRF_COREDUMP_DIR=/var/spool/squid
888
+ SSRF_REVERSE_PROXY_PORT=8194
889
+ SSRF_SANDBOX_HOST=sandbox
890
+ SSRF_DEFAULT_TIME_OUT=5
891
+ SSRF_DEFAULT_CONNECT_TIME_OUT=5
892
+ SSRF_DEFAULT_READ_TIME_OUT=5
893
+ SSRF_DEFAULT_WRITE_TIME_OUT=5
894
+
895
+ # ------------------------------
896
+ # docker env var for specifying vector db type at startup
897
+ # (based on the vector db type, the corresponding docker
898
+ # compose profile will be used)
899
+ # if you want to use unstructured, add ',unstructured' to the end
900
+ # ------------------------------
901
+ COMPOSE_PROFILES=${VECTOR_STORE:-weaviate}
902
+
903
+ # ------------------------------
904
+ # Docker Compose Service Expose Host Port Configurations
905
+ # ------------------------------
906
+ EXPOSE_NGINX_PORT=80
907
+ EXPOSE_NGINX_SSL_PORT=443
908
+
909
+ # ----------------------------------------------------------------------------
910
+ # ModelProvider & Tool Position Configuration
911
+ # Used to specify the model providers and tools that can be used in the app.
912
+ # ----------------------------------------------------------------------------
913
+
914
+ # Pin, include, and exclude tools
915
+ # Use comma-separated values with no spaces between items.
916
+ # Example: POSITION_TOOL_PINS=bing,google
917
+ POSITION_TOOL_PINS=
918
+ POSITION_TOOL_INCLUDES=
919
+ POSITION_TOOL_EXCLUDES=
920
+
921
+ # Pin, include, and exclude model providers
922
+ # Use comma-separated values with no spaces between items.
923
+ # Example: POSITION_PROVIDER_PINS=openai,openllm
924
+ POSITION_PROVIDER_PINS=
925
+ POSITION_PROVIDER_INCLUDES=
926
+ POSITION_PROVIDER_EXCLUDES=
927
+
928
+ # CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP
929
+ CSP_WHITELIST=
930
+
931
+ # Enable or disable create tidb service job
932
+ CREATE_TIDB_SERVICE_JOB_ENABLED=false
933
+
934
+ # Maximum number of submitted thread count in a ThreadPool for parallel node execution
935
+ MAX_SUBMIT_COUNT=100
936
+
937
+ # The maximum number of top-k value for RAG.
938
+ TOP_K_MAX_VALUE=10
docker/README.md ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## README for docker Deployment
2
+
3
+ Welcome to the new `docker` directory for deploying Dify using Docker Compose. This README outlines the updates, deployment instructions, and migration details for existing users.
4
+
5
+ ### What's Updated
6
+
7
+ - **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.
8
+ For more information, refer `docker/certbot/README.md`.
9
+
10
+ - **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments.
11
+
12
+ > What is `.env`? </br> </br>
13
+ > The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments.
14
+
15
+ - **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file.
16
+ - **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades.
17
+ - **Legacy Support**: Previous deployment files are now located in the `docker-legacy` directory and will no longer be maintained.
18
+
19
+ ### How to Deploy Dify with `docker-compose.yaml`
20
+
21
+ 1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system.
22
+ 2. **Environment Setup**:
23
+ - Navigate to the `docker` directory.
24
+ - Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`.
25
+ - Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options.
26
+ 3. **Running the Services**:
27
+ - Execute `docker compose up` from the `docker` directory to start the services.
28
+ - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
29
+ 4. **SSL Certificate Setup**:
30
+ - Rrefer `docker/certbot/README.md` to set up SSL certificates using Certbot.
31
+
32
+ ### How to Deploy Middleware for Developing Dify
33
+
34
+ 1. **Middleware Setup**:
35
+ - Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches.
36
+ - Navigate to the `docker` directory.
37
+ - Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file).
38
+ 2. **Running Middleware Services**:
39
+ - Execute `docker-compose -f docker-compose.middleware.yaml up --env-file middleware.env -d` to start the middleware services.
40
+
41
+ ### Migration for Existing Users
42
+
43
+ For users migrating from the `docker-legacy` setup:
44
+
45
+ 1. **Review Changes**: Familiarize yourself with the new `.env` configuration and Docker Compose setup.
46
+ 2. **Transfer Customizations**:
47
+ - If you have customized configurations such as `docker-compose.yaml`, `ssrf_proxy/squid.conf`, or `nginx/conf.d/default.conf`, you will need to reflect these changes in the `.env` file you create.
48
+ 3. **Data Migration**:
49
+ - Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary.
50
+
51
+ ### Overview of `.env`
52
+
53
+ #### Key Modules and Customization
54
+
55
+ - **Vector Database Services**: Depending on the type of vector database used (`VECTOR_STORE`), users can set specific endpoints, ports, and authentication details.
56
+ - **Storage Services**: Depending on the storage type (`STORAGE_TYPE`), users can configure specific settings for S3, Azure Blob, Google Storage, etc.
57
+ - **API and Web Services**: Users can define URLs and other settings that affect how the API and web frontends operate.
58
+
59
+ #### Other notable variables
60
+
61
+ The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables:
62
+
63
+ 1. **Common Variables**:
64
+ - `CONSOLE_API_URL`, `SERVICE_API_URL`: URLs for different API services.
65
+ - `APP_WEB_URL`: Frontend application URL.
66
+ - `FILES_URL`: Base URL for file downloads and previews.
67
+
68
+ 2. **Server Configuration**:
69
+ - `LOG_LEVEL`, `DEBUG`, `FLASK_DEBUG`: Logging and debug settings.
70
+ - `SECRET_KEY`: A key for encrypting session cookies and other sensitive data.
71
+
72
+ 3. **Database Configuration**:
73
+ - `DB_USERNAME`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_DATABASE`: PostgreSQL database credentials and connection details.
74
+
75
+ 4. **Redis Configuration**:
76
+ - `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`: Redis server connection settings.
77
+
78
+ 5. **Celery Configuration**:
79
+ - `CELERY_BROKER_URL`: Configuration for Celery message broker.
80
+
81
+ 6. **Storage Configuration**:
82
+ - `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc.
83
+
84
+ 7. **Vector Database Configuration**:
85
+ - `VECTOR_STORE`: Type of vector database (e.g., `weaviate`, `milvus`).
86
+ - Specific settings for each vector store like `WEAVIATE_ENDPOINT`, `MILVUS_URI`.
87
+
88
+ 8. **CORS Configuration**:
89
+ - `WEB_API_CORS_ALLOW_ORIGINS`, `CONSOLE_CORS_ALLOW_ORIGINS`: Settings for cross-origin resource sharing.
90
+
91
+ 9. **Other Service-Specific Environment Variables**:
92
+ - Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`.
93
+
94
+ ### Additional Information
95
+
96
+ - **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions.
97
+ - **Support**: For detailed configuration options and environment variable settings, refer to the `.env.example` file and the Docker Compose configuration files in the `docker` directory.
98
+
99
+ This README aims to guide you through the deployment process using the new Docker Compose setup. For any issues or further assistance, please refer to the official documentation or contact support.
docker/certbot/README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Launching new servers with SSL certificates
2
+
3
+ ## Short description
4
+
5
+ docker compose certbot configurations with Backward compatibility (without certbot container).
6
+ Use `docker compose --profile certbot up` to use this features.
7
+
8
+ ## The simplest way for launching new servers with SSL certificates
9
+
10
+ 1. Get letsencrypt certs
11
+ set `.env` values
12
+ ```properties
13
+ NGINX_SSL_CERT_FILENAME=fullchain.pem
14
+ NGINX_SSL_CERT_KEY_FILENAME=privkey.pem
15
+ NGINX_ENABLE_CERTBOT_CHALLENGE=true
16
+ CERTBOT_DOMAIN=your_domain.com
17
+ CERTBOT_EMAIL=example@your_domain.com
18
+ ```
19
+ execute command:
20
+ ```shell
21
+ docker network prune
22
+ docker compose --profile certbot up --force-recreate -d
23
+ ```
24
+ then after the containers launched:
25
+ ```shell
26
+ docker compose exec -it certbot /bin/sh /update-cert.sh
27
+ ```
28
+ 2. Edit `.env` file and `docker compose --profile certbot up` again.
29
+ set `.env` value additionally
30
+ ```properties
31
+ NGINX_HTTPS_ENABLED=true
32
+ ```
33
+ execute command:
34
+ ```shell
35
+ docker compose --profile certbot up -d --no-deps --force-recreate nginx
36
+ ```
37
+ Then you can access your serve with HTTPS.
38
+ [https://your_domain.com](https://your_domain.com)
39
+
40
+ ## SSL certificates renewal
41
+
42
+ For SSL certificates renewal, execute commands below:
43
+
44
+ ```shell
45
+ docker compose exec -it certbot /bin/sh /update-cert.sh
46
+ docker compose exec nginx nginx -s reload
47
+ ```
48
+
49
+ ## Options for certbot
50
+
51
+ `CERTBOT_OPTIONS` key might be helpful for testing. i.e.,
52
+
53
+ ```properties
54
+ CERTBOT_OPTIONS=--dry-run
55
+ ```
56
+
57
+ To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates.
58
+
59
+ ```shell
60
+ docker compose --profile certbot up -d --no-deps --force-recreate certbot
61
+ docker compose exec -it certbot /bin/sh /update-cert.sh
62
+ ```
63
+
64
+ Then, reload the nginx container if necessary.
65
+
66
+ ```shell
67
+ docker compose exec nginx nginx -s reload
68
+ ```
69
+
70
+ ## For legacy servers
71
+
72
+ To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option.
73
+
74
+ ```shell
75
+ docker compose up -d
76
+ ```
docker/certbot/docker-entrypoint.sh ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+ set -e
3
+
4
+ printf '%s\n' "Docker entrypoint script is running"
5
+
6
+ printf '%s\n' "\nChecking specific environment variables:"
7
+ printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}"
8
+ printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}"
9
+ printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}"
10
+
11
+ printf '%s\n' "\nChecking mounted directories:"
12
+ for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do
13
+ if [ -d "$dir" ]; then
14
+ printf '%s\n' "$dir exists. Contents:"
15
+ ls -la "$dir"
16
+ else
17
+ printf '%s\n' "$dir does not exist."
18
+ fi
19
+ done
20
+
21
+ printf '%s\n' "\nGenerating update-cert.sh from template"
22
+ sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \
23
+ -e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \
24
+ -e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \
25
+ /update-cert.template.txt > /update-cert.sh
26
+
27
+ chmod +x /update-cert.sh
28
+
29
+ printf '%s\n' "\nExecuting command:" "$@"
30
+ exec "$@"
docker/certbot/update-cert.template.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ DOMAIN="${CERTBOT_DOMAIN}"
5
+ EMAIL="${CERTBOT_EMAIL}"
6
+ OPTIONS="${CERTBOT_OPTIONS}"
7
+ CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする
8
+
9
+ # Check if the certificate already exists
10
+ if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then
11
+ echo "Certificate exists. Attempting to renew..."
12
+ certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS}
13
+ else
14
+ echo "Certificate does not exist. Obtaining a new certificate..."
15
+ certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS}
16
+ fi
17
+ echo "Certificate operation successful"
18
+ # Note: Nginx reload should be handled outside this container
19
+ echo "Please ensure to reload Nginx to apply any certificate changes."
docker/couchbase-server/Dockerfile ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ FROM couchbase/server:latest AS stage_base
2
+ # FROM couchbase:latest AS stage_base
3
+ COPY init-cbserver.sh /opt/couchbase/init/
4
+ RUN chmod +x /opt/couchbase/init/init-cbserver.sh
docker/couchbase-server/init-cbserver.sh ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # used to start couchbase server - can't get around this as docker compose only allows you to start one command - so we have to start couchbase like the standard couchbase Dockerfile would
3
+ # https://github.com/couchbase/docker/blob/master/enterprise/couchbase-server/7.2.0/Dockerfile#L88
4
+
5
+ /entrypoint.sh couchbase-server &
6
+
7
+ # track if setup is complete so we don't try to setup again
8
+ FILE=/opt/couchbase/init/setupComplete.txt
9
+
10
+ if ! [ -f "$FILE" ]; then
11
+ # used to automatically create the cluster based on environment variables
12
+ # https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-cluster-init.html
13
+
14
+ echo $COUCHBASE_ADMINISTRATOR_USERNAME ":" $COUCHBASE_ADMINISTRATOR_PASSWORD
15
+
16
+ sleep 20s
17
+ /opt/couchbase/bin/couchbase-cli cluster-init -c 127.0.0.1 \
18
+ --cluster-username $COUCHBASE_ADMINISTRATOR_USERNAME \
19
+ --cluster-password $COUCHBASE_ADMINISTRATOR_PASSWORD \
20
+ --services data,index,query,fts \
21
+ --cluster-ramsize $COUCHBASE_RAM_SIZE \
22
+ --cluster-index-ramsize $COUCHBASE_INDEX_RAM_SIZE \
23
+ --cluster-eventing-ramsize $COUCHBASE_EVENTING_RAM_SIZE \
24
+ --cluster-fts-ramsize $COUCHBASE_FTS_RAM_SIZE \
25
+ --index-storage-setting default
26
+
27
+ sleep 2s
28
+
29
+ # used to auto create the bucket based on environment variables
30
+ # https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-bucket-create.html
31
+
32
+ /opt/couchbase/bin/couchbase-cli bucket-create -c localhost:8091 \
33
+ --username $COUCHBASE_ADMINISTRATOR_USERNAME \
34
+ --password $COUCHBASE_ADMINISTRATOR_PASSWORD \
35
+ --bucket $COUCHBASE_BUCKET \
36
+ --bucket-ramsize $COUCHBASE_BUCKET_RAMSIZE \
37
+ --bucket-type couchbase
38
+
39
+ # create file so we know that the cluster is setup and don't run the setup again
40
+ touch $FILE
41
+ fi
42
+ # docker compose will stop the container from running unless we do this
43
+ # known issue and workaround
44
+ tail -f /dev/null
docker/docker-compose-template.yaml ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ x-shared-env: &shared-api-worker-env
2
+ services:
3
+ # API service
4
+ api:
5
+ image: langgenius/dify-api:0.15.3
6
+ restart: always
7
+ environment:
8
+ # Use the shared environment variables.
9
+ <<: *shared-api-worker-env
10
+ # Startup mode, 'api' starts the API server.
11
+ MODE: api
12
+ SENTRY_DSN: ${API_SENTRY_DSN:-}
13
+ SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
14
+ SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
15
+ depends_on:
16
+ - db
17
+ - redis
18
+ volumes:
19
+ # Mount the storage directory to the container, for storing user files.
20
+ - ./volumes/app/storage:/app/api/storage
21
+ networks:
22
+ - ssrf_proxy_network
23
+ - default
24
+
25
+ # worker service
26
+ # The Celery worker for processing the queue.
27
+ worker:
28
+ image: langgenius/dify-api:0.15.3
29
+ restart: always
30
+ environment:
31
+ # Use the shared environment variables.
32
+ <<: *shared-api-worker-env
33
+ # Startup mode, 'worker' starts the Celery worker for processing the queue.
34
+ MODE: worker
35
+ SENTRY_DSN: ${API_SENTRY_DSN:-}
36
+ SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
37
+ SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
38
+ depends_on:
39
+ - db
40
+ - redis
41
+ volumes:
42
+ # Mount the storage directory to the container, for storing user files.
43
+ - ./volumes/app/storage:/app/api/storage
44
+ networks:
45
+ - ssrf_proxy_network
46
+ - default
47
+
48
+ # Frontend web application.
49
+ web:
50
+ image: langgenius/dify-web:0.15.3
51
+ restart: always
52
+ environment:
53
+ CONSOLE_API_URL: ${CONSOLE_API_URL:-}
54
+ APP_API_URL: ${APP_API_URL:-}
55
+ SENTRY_DSN: ${WEB_SENTRY_DSN:-}
56
+ NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
57
+ TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
58
+ CSP_WHITELIST: ${CSP_WHITELIST:-}
59
+ TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
60
+ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
61
+
62
+ # The postgres database.
63
+ db:
64
+ image: postgres:15-alpine
65
+ restart: always
66
+ environment:
67
+ PGUSER: ${PGUSER:-postgres}
68
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
69
+ POSTGRES_DB: ${POSTGRES_DB:-dify}
70
+ PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
71
+ command: >
72
+ postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
73
+ -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
74
+ -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
75
+ -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
76
+ -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
77
+ volumes:
78
+ - ./volumes/db/data:/var/lib/postgresql/data
79
+ healthcheck:
80
+ test: [ 'CMD', 'pg_isready' ]
81
+ interval: 1s
82
+ timeout: 3s
83
+ retries: 30
84
+
85
+ # The redis cache.
86
+ redis:
87
+ image: redis:6-alpine
88
+ restart: always
89
+ environment:
90
+ REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
91
+ volumes:
92
+ # Mount the redis data directory to the container.
93
+ - ./volumes/redis/data:/data
94
+ # Set the redis password when startup redis server.
95
+ command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
96
+ healthcheck:
97
+ test: [ 'CMD', 'redis-cli', 'ping' ]
98
+
99
+ # The DifySandbox
100
+ sandbox:
101
+ image: langgenius/dify-sandbox:0.2.10
102
+ restart: always
103
+ environment:
104
+ # The DifySandbox configurations
105
+ # Make sure you are changing this key for your deployment with a strong key.
106
+ # You can generate a strong key using `openssl rand -base64 42`.
107
+ API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
108
+ GIN_MODE: ${SANDBOX_GIN_MODE:-release}
109
+ WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
110
+ ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
111
+ HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
112
+ HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
113
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
114
+ volumes:
115
+ - ./volumes/sandbox/dependencies:/dependencies
116
+ healthcheck:
117
+ test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
118
+ networks:
119
+ - ssrf_proxy_network
120
+
121
+ # ssrf_proxy server
122
+ # for more information, please refer to
123
+ # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
124
+ ssrf_proxy:
125
+ image: ubuntu/squid:latest
126
+ restart: always
127
+ volumes:
128
+ - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
129
+ - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
130
+ entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
131
+ environment:
132
+ # pls clearly modify the squid env vars to fit your network environment.
133
+ HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
134
+ COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
135
+ REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
136
+ SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
137
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
138
+ networks:
139
+ - ssrf_proxy_network
140
+ - default
141
+
142
+ # Certbot service
143
+ # use `docker-compose --profile certbot up` to start the certbot service.
144
+ certbot:
145
+ image: certbot/certbot
146
+ profiles:
147
+ - certbot
148
+ volumes:
149
+ - ./volumes/certbot/conf:/etc/letsencrypt
150
+ - ./volumes/certbot/www:/var/www/html
151
+ - ./volumes/certbot/logs:/var/log/letsencrypt
152
+ - ./volumes/certbot/conf/live:/etc/letsencrypt/live
153
+ - ./certbot/update-cert.template.txt:/update-cert.template.txt
154
+ - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
155
+ environment:
156
+ - CERTBOT_EMAIL=${CERTBOT_EMAIL}
157
+ - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
158
+ - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
159
+ entrypoint: [ '/docker-entrypoint.sh' ]
160
+ command: [ 'tail', '-f', '/dev/null' ]
161
+
162
+ # The nginx reverse proxy.
163
+ # used for reverse proxying the API service and Web service.
164
+ nginx:
165
+ image: nginx:latest
166
+ restart: always
167
+ volumes:
168
+ - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
169
+ - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
170
+ - ./nginx/https.conf.template:/etc/nginx/https.conf.template
171
+ - ./nginx/conf.d:/etc/nginx/conf.d
172
+ - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
173
+ - ./nginx/ssl:/etc/ssl # cert dir (legacy)
174
+ - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
175
+ - ./volumes/certbot/conf:/etc/letsencrypt
176
+ - ./volumes/certbot/www:/var/www/html
177
+ entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
178
+ environment:
179
+ NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
180
+ NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
181
+ NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
182
+ NGINX_PORT: ${NGINX_PORT:-80}
183
+ # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
184
+ # and modify the env vars below in .env if HTTPS_ENABLED is true.
185
+ NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
186
+ NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
187
+ NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
188
+ NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
189
+ NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
190
+ NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
191
+ NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
192
+ NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
193
+ NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
194
+ CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
195
+ depends_on:
196
+ - api
197
+ - web
198
+ ports:
199
+ - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
200
+ - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
201
+
202
+ # The TiDB vector store.
203
+ # For production use, please refer to https://github.com/pingcap/tidb-docker-compose
204
+ tidb:
205
+ image: pingcap/tidb:v8.4.0
206
+ profiles:
207
+ - tidb
208
+ command:
209
+ - --store=unistore
210
+ restart: always
211
+
212
+ # The Weaviate vector store.
213
+ weaviate:
214
+ image: semitechnologies/weaviate:1.19.0
215
+ profiles:
216
+ - ''
217
+ - weaviate
218
+ restart: always
219
+ volumes:
220
+ # Mount the Weaviate data directory to the con tainer.
221
+ - ./volumes/weaviate:/var/lib/weaviate
222
+ environment:
223
+ # The Weaviate configurations
224
+ # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
225
+ PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
226
+ QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
227
+ AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
228
+ DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
229
+ CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
230
+ AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
231
+ AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
232
+ AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:[email protected]}
233
+ AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
234
+ AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:[email protected]}
235
+
236
+ # Qdrant vector store.
237
+ # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
238
+ qdrant:
239
+ image: langgenius/qdrant:v1.7.3
240
+ profiles:
241
+ - qdrant
242
+ restart: always
243
+ volumes:
244
+ - ./volumes/qdrant:/qdrant/storage
245
+ environment:
246
+ QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
247
+
248
+ # The Couchbase vector store.
249
+ couchbase-server:
250
+ build: ./couchbase-server
251
+ profiles:
252
+ - couchbase
253
+ restart: always
254
+ environment:
255
+ - CLUSTER_NAME=dify_search
256
+ - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
257
+ - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
258
+ - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
259
+ - COUCHBASE_BUCKET_RAMSIZE=512
260
+ - COUCHBASE_RAM_SIZE=2048
261
+ - COUCHBASE_EVENTING_RAM_SIZE=512
262
+ - COUCHBASE_INDEX_RAM_SIZE=512
263
+ - COUCHBASE_FTS_RAM_SIZE=1024
264
+ hostname: couchbase-server
265
+ container_name: couchbase-server
266
+ working_dir: /opt/couchbase
267
+ stdin_open: true
268
+ tty: true
269
+ entrypoint: [ "" ]
270
+ command: sh -c "/opt/couchbase/init/init-cbserver.sh"
271
+ volumes:
272
+ - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
273
+ healthcheck:
274
+ # ensure bucket was created before proceeding
275
+ test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
276
+ interval: 10s
277
+ retries: 10
278
+ start_period: 30s
279
+ timeout: 10s
280
+
281
+ # The pgvector vector database.
282
+ pgvector:
283
+ image: pgvector/pgvector:pg16
284
+ profiles:
285
+ - pgvector
286
+ restart: always
287
+ environment:
288
+ PGUSER: ${PGVECTOR_PGUSER:-postgres}
289
+ # The password for the default postgres user.
290
+ POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
291
+ # The name of the default postgres database.
292
+ POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
293
+ # postgres data directory
294
+ PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
295
+ volumes:
296
+ - ./volumes/pgvector/data:/var/lib/postgresql/data
297
+ healthcheck:
298
+ test: [ 'CMD', 'pg_isready' ]
299
+ interval: 1s
300
+ timeout: 3s
301
+ retries: 30
302
+
303
+ # pgvecto-rs vector store
304
+ pgvecto-rs:
305
+ image: tensorchord/pgvecto-rs:pg16-v0.3.0
306
+ profiles:
307
+ - pgvecto-rs
308
+ restart: always
309
+ environment:
310
+ PGUSER: ${PGVECTOR_PGUSER:-postgres}
311
+ # The password for the default postgres user.
312
+ POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
313
+ # The name of the default postgres database.
314
+ POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
315
+ # postgres data directory
316
+ PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
317
+ volumes:
318
+ - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
319
+ healthcheck:
320
+ test: [ 'CMD', 'pg_isready' ]
321
+ interval: 1s
322
+ timeout: 3s
323
+ retries: 30
324
+
325
+ # Chroma vector database
326
+ chroma:
327
+ image: ghcr.io/chroma-core/chroma:0.5.20
328
+ profiles:
329
+ - chroma
330
+ restart: always
331
+ volumes:
332
+ - ./volumes/chroma:/chroma/chroma
333
+ environment:
334
+ CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
335
+ CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
336
+ IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
337
+
338
+ # OceanBase vector database
339
+ oceanbase:
340
+ image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
341
+ profiles:
342
+ - oceanbase
343
+ restart: always
344
+ volumes:
345
+ - ./volumes/oceanbase/data:/root/ob
346
+ - ./volumes/oceanbase/conf:/root/.obd/cluster
347
+ - ./volumes/oceanbase/init.d:/root/boot/init.d
348
+ environment:
349
+ OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
350
+ OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
351
+ OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
352
+ OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
353
+ OB_SERVER_IP: '127.0.0.1'
354
+
355
+ # Oracle vector database
356
+ oracle:
357
+ image: container-registry.oracle.com/database/free:latest
358
+ profiles:
359
+ - oracle
360
+ restart: always
361
+ volumes:
362
+ - source: oradata
363
+ type: volume
364
+ target: /opt/oracle/oradata
365
+ - ./startupscripts:/opt/oracle/scripts/startup
366
+ environment:
367
+ ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
368
+ ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
369
+
370
+ # Milvus vector database services
371
+ etcd:
372
+ container_name: milvus-etcd
373
+ image: quay.io/coreos/etcd:v3.5.5
374
+ profiles:
375
+ - milvus
376
+ environment:
377
+ ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
378
+ ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
379
+ ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
380
+ ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
381
+ volumes:
382
+ - ./volumes/milvus/etcd:/etcd
383
+ command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
384
+ healthcheck:
385
+ test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
386
+ interval: 30s
387
+ timeout: 20s
388
+ retries: 3
389
+ networks:
390
+ - milvus
391
+
392
+ minio:
393
+ container_name: milvus-minio
394
+ image: minio/minio:RELEASE.2023-03-20T20-16-18Z
395
+ profiles:
396
+ - milvus
397
+ environment:
398
+ MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
399
+ MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
400
+ volumes:
401
+ - ./volumes/milvus/minio:/minio_data
402
+ command: minio server /minio_data --console-address ":9001"
403
+ healthcheck:
404
+ test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
405
+ interval: 30s
406
+ timeout: 20s
407
+ retries: 3
408
+ networks:
409
+ - milvus
410
+
411
+ milvus-standalone:
412
+ container_name: milvus-standalone
413
+ image: milvusdb/milvus:v2.5.0-beta
414
+ profiles:
415
+ - milvus
416
+ command: [ 'milvus', 'run', 'standalone' ]
417
+ environment:
418
+ ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
419
+ MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
420
+ common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
421
+ volumes:
422
+ - ./volumes/milvus/milvus:/var/lib/milvus
423
+ healthcheck:
424
+ test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
425
+ interval: 30s
426
+ start_period: 90s
427
+ timeout: 20s
428
+ retries: 3
429
+ depends_on:
430
+ - etcd
431
+ - minio
432
+ ports:
433
+ - 19530:19530
434
+ - 9091:9091
435
+ networks:
436
+ - milvus
437
+
438
+ # Opensearch vector database
439
+ opensearch:
440
+ container_name: opensearch
441
+ image: opensearchproject/opensearch:latest
442
+ profiles:
443
+ - opensearch
444
+ environment:
445
+ discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
446
+ bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
447
+ OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
448
+ OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
449
+ ulimits:
450
+ memlock:
451
+ soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
452
+ hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
453
+ nofile:
454
+ soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
455
+ hard: ${OPENSEARCH_NOFILE_HARD:-65536}
456
+ volumes:
457
+ - ./volumes/opensearch/data:/usr/share/opensearch/data
458
+ networks:
459
+ - opensearch-net
460
+
461
+ opensearch-dashboards:
462
+ container_name: opensearch-dashboards
463
+ image: opensearchproject/opensearch-dashboards:latest
464
+ profiles:
465
+ - opensearch
466
+ environment:
467
+ OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
468
+ volumes:
469
+ - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
470
+ networks:
471
+ - opensearch-net
472
+ depends_on:
473
+ - opensearch
474
+
475
+ # MyScale vector database
476
+ myscale:
477
+ container_name: myscale
478
+ image: myscale/myscaledb:1.6.4
479
+ profiles:
480
+ - myscale
481
+ restart: always
482
+ tty: true
483
+ volumes:
484
+ - ./volumes/myscale/data:/var/lib/clickhouse
485
+ - ./volumes/myscale/log:/var/log/clickhouse-server
486
+ - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
487
+ ports:
488
+ - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
489
+
490
+ # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
491
+ # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
492
+ elasticsearch:
493
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
494
+ container_name: elasticsearch
495
+ profiles:
496
+ - elasticsearch
497
+ - elasticsearch-ja
498
+ restart: always
499
+ volumes:
500
+ - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
501
+ - dify_es01_data:/usr/share/elasticsearch/data
502
+ environment:
503
+ ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
504
+ VECTOR_STORE: ${VECTOR_STORE:-}
505
+ cluster.name: dify-es-cluster
506
+ node.name: dify-es0
507
+ discovery.type: single-node
508
+ xpack.license.self_generated.type: basic
509
+ xpack.security.enabled: 'true'
510
+ xpack.security.enrollment.enabled: 'false'
511
+ xpack.security.http.ssl.enabled: 'false'
512
+ ports:
513
+ - ${ELASTICSEARCH_PORT:-9200}:9200
514
+ deploy:
515
+ resources:
516
+ limits:
517
+ memory: 2g
518
+ entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
519
+ healthcheck:
520
+ test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
521
+ interval: 30s
522
+ timeout: 10s
523
+ retries: 50
524
+
525
+ # https://www.elastic.co/guide/en/kibana/current/docker.html
526
+ # https://www.elastic.co/guide/en/kibana/current/settings.html
527
+ kibana:
528
+ image: docker.elastic.co/kibana/kibana:8.14.3
529
+ container_name: kibana
530
+ profiles:
531
+ - elasticsearch
532
+ depends_on:
533
+ - elasticsearch
534
+ restart: always
535
+ environment:
536
+ XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
537
+ NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
538
+ XPACK_SECURITY_ENABLED: 'true'
539
+ XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
540
+ XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
541
+ XPACK_FLEET_ISAIRGAPPED: 'true'
542
+ I18N_LOCALE: zh-CN
543
+ SERVER_PORT: '5601'
544
+ ELASTICSEARCH_HOSTS: http://elasticsearch:9200
545
+ ports:
546
+ - ${KIBANA_PORT:-5601}:5601
547
+ healthcheck:
548
+ test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
549
+ interval: 30s
550
+ timeout: 10s
551
+ retries: 3
552
+
553
+ # unstructured .
554
+ # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
555
+ unstructured:
556
+ image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
557
+ profiles:
558
+ - unstructured
559
+ restart: always
560
+ volumes:
561
+ - ./volumes/unstructured:/app/data
562
+
563
+ networks:
564
+ # create a network between sandbox, api and ssrf_proxy, and can not access outside.
565
+ ssrf_proxy_network:
566
+ driver: bridge
567
+ internal: true
568
+ milvus:
569
+ driver: bridge
570
+ opensearch-net:
571
+ driver: bridge
572
+ internal: true
573
+
574
+ volumes:
575
+ oradata:
576
+ dify_es01_data:
docker/docker-compose.middleware.yaml ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ # The postgres database.
3
+ db:
4
+ image: postgres:15-alpine
5
+ restart: always
6
+ env_file:
7
+ - ./middleware.env
8
+ environment:
9
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
10
+ POSTGRES_DB: ${POSTGRES_DB:-dify}
11
+ PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
12
+ command: >
13
+ postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
14
+ -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
15
+ -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
16
+ -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
17
+ -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
18
+ volumes:
19
+ - ${PGDATA_HOST_VOLUME:-./volumes/db/data}:/var/lib/postgresql/data
20
+ ports:
21
+ - "${EXPOSE_POSTGRES_PORT:-5432}:5432"
22
+ healthcheck:
23
+ test: [ "CMD", "pg_isready" ]
24
+ interval: 1s
25
+ timeout: 3s
26
+ retries: 30
27
+
28
+ # The redis cache.
29
+ redis:
30
+ image: redis:6-alpine
31
+ restart: always
32
+ environment:
33
+ REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
34
+ volumes:
35
+ # Mount the redis data directory to the container.
36
+ - ${REDIS_HOST_VOLUME:-./volumes/redis/data}:/data
37
+ # Set the redis password when startup redis server.
38
+ command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
39
+ ports:
40
+ - "${EXPOSE_REDIS_PORT:-6379}:6379"
41
+ healthcheck:
42
+ test: [ "CMD", "redis-cli", "ping" ]
43
+
44
+ # The DifySandbox
45
+ sandbox:
46
+ image: langgenius/dify-sandbox:0.2.10
47
+ restart: always
48
+ environment:
49
+ # The DifySandbox configurations
50
+ # Make sure you are changing this key for your deployment with a strong key.
51
+ # You can generate a strong key using `openssl rand -base64 42`.
52
+ API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
53
+ GIN_MODE: ${SANDBOX_GIN_MODE:-release}
54
+ WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
55
+ ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
56
+ HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
57
+ HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
58
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
59
+ volumes:
60
+ - ./volumes/sandbox/dependencies:/dependencies
61
+ - ./volumes/sandbox/conf:/conf
62
+ healthcheck:
63
+ test: [ "CMD", "curl", "-f", "http://localhost:8194/health" ]
64
+ networks:
65
+ - ssrf_proxy_network
66
+
67
+ # ssrf_proxy server
68
+ # for more information, please refer to
69
+ # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
70
+ ssrf_proxy:
71
+ image: ubuntu/squid:latest
72
+ restart: always
73
+ volumes:
74
+ - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
75
+ - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
76
+ entrypoint: [ "sh", "-c", "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
77
+ environment:
78
+ # pls clearly modify the squid env vars to fit your network environment.
79
+ HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
80
+ COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
81
+ REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
82
+ SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
83
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
84
+ ports:
85
+ - "${EXPOSE_SSRF_PROXY_PORT:-3128}:${SSRF_HTTP_PORT:-3128}"
86
+ - "${EXPOSE_SANDBOX_PORT:-8194}:${SANDBOX_PORT:-8194}"
87
+ networks:
88
+ - ssrf_proxy_network
89
+ - default
90
+
91
+ # The Weaviate vector store.
92
+ weaviate:
93
+ image: semitechnologies/weaviate:1.19.0
94
+ profiles:
95
+ - ""
96
+ - weaviate
97
+ restart: always
98
+ volumes:
99
+ # Mount the Weaviate data directory to the container.
100
+ - ${WEAVIATE_HOST_VOLUME:-./volumes/weaviate}:/var/lib/weaviate
101
+ env_file:
102
+ - ./middleware.env
103
+ environment:
104
+ # The Weaviate configurations
105
+ # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
106
+ PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
107
+ QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
108
+ AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
109
+ DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
110
+ CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
111
+ AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
112
+ AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
113
+ AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:[email protected]}
114
+ AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
115
+ AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:[email protected]}
116
+ ports:
117
+ - "${EXPOSE_WEAVIATE_PORT:-8080}:8080"
118
+
119
+ networks:
120
+ # create a network between sandbox, api and ssrf_proxy, and can not access outside.
121
+ ssrf_proxy_network:
122
+ driver: bridge
123
+ internal: true
docker/docker-compose.png ADDED
docker/docker-compose.yaml ADDED
@@ -0,0 +1,971 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==================================================================
2
+ # WARNING: This file is auto-generated by generate_docker_compose
3
+ # Do not modify this file directly. Instead, update the .env.example
4
+ # or docker-compose-template.yaml and regenerate this file.
5
+ # ==================================================================
6
+
7
+ x-shared-env: &shared-api-worker-env
8
+ CONSOLE_API_URL: ${CONSOLE_API_URL:-}
9
+ CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
10
+ SERVICE_API_URL: ${SERVICE_API_URL:-}
11
+ APP_API_URL: ${APP_API_URL:-}
12
+ APP_WEB_URL: ${APP_WEB_URL:-}
13
+ FILES_URL: ${FILES_URL:-}
14
+ LOG_LEVEL: ${LOG_LEVEL:-INFO}
15
+ LOG_FILE: ${LOG_FILE:-/app/logs/server.log}
16
+ LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20}
17
+ LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5}
18
+ LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S}
19
+ LOG_TZ: ${LOG_TZ:-UTC}
20
+ DEBUG: ${DEBUG:-false}
21
+ FLASK_DEBUG: ${FLASK_DEBUG:-false}
22
+ SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
23
+ INIT_PASSWORD: ${INIT_PASSWORD:-}
24
+ DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
25
+ CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
26
+ OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
27
+ MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
28
+ FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
29
+ ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
30
+ REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30}
31
+ APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
32
+ APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200}
33
+ DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
34
+ DIFY_PORT: ${DIFY_PORT:-5001}
35
+ SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1}
36
+ SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent}
37
+ SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10}
38
+ CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
39
+ GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
40
+ CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
41
+ CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
42
+ CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
43
+ CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-}
44
+ API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10}
45
+ API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60}
46
+ DB_USERNAME: ${DB_USERNAME:-postgres}
47
+ DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
48
+ DB_HOST: ${DB_HOST:-db}
49
+ DB_PORT: ${DB_PORT:-5432}
50
+ DB_DATABASE: ${DB_DATABASE:-dify}
51
+ SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
52
+ SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
53
+ SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
54
+ POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100}
55
+ POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB}
56
+ POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB}
57
+ POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
58
+ POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
59
+ REDIS_HOST: ${REDIS_HOST:-redis}
60
+ REDIS_PORT: ${REDIS_PORT:-6379}
61
+ REDIS_USERNAME: ${REDIS_USERNAME:-}
62
+ REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
63
+ REDIS_USE_SSL: ${REDIS_USE_SSL:-false}
64
+ REDIS_DB: ${REDIS_DB:-0}
65
+ REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false}
66
+ REDIS_SENTINELS: ${REDIS_SENTINELS:-}
67
+ REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-}
68
+ REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
69
+ REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
70
+ REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1}
71
+ REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false}
72
+ REDIS_CLUSTERS: ${REDIS_CLUSTERS:-}
73
+ REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-}
74
+ CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1}
75
+ BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
76
+ CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
77
+ CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
78
+ CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
79
+ WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
80
+ CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
81
+ STORAGE_TYPE: ${STORAGE_TYPE:-opendal}
82
+ OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs}
83
+ OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage}
84
+ S3_ENDPOINT: ${S3_ENDPOINT:-}
85
+ S3_REGION: ${S3_REGION:-us-east-1}
86
+ S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai}
87
+ S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
88
+ S3_SECRET_KEY: ${S3_SECRET_KEY:-}
89
+ S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
90
+ AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai}
91
+ AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai}
92
+ AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container}
93
+ AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://<your_account_name>.blob.core.windows.net}
94
+ GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name}
95
+ GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-}
96
+ ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name}
97
+ ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key}
98
+ ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key}
99
+ ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com}
100
+ ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1}
101
+ ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
102
+ ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path}
103
+ TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name}
104
+ TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key}
105
+ TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id}
106
+ TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region}
107
+ TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme}
108
+ OCI_ENDPOINT: ${OCI_ENDPOINT:-https://objectstorage.us-ashburn-1.oraclecloud.com}
109
+ OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name}
110
+ OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key}
111
+ OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key}
112
+ OCI_REGION: ${OCI_REGION:-us-ashburn-1}
113
+ HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name}
114
+ HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key}
115
+ HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key}
116
+ HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url}
117
+ VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name}
118
+ VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key}
119
+ VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key}
120
+ VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url}
121
+ VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region}
122
+ BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name}
123
+ BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key}
124
+ BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key}
125
+ BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url}
126
+ SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name}
127
+ SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key}
128
+ SUPABASE_URL: ${SUPABASE_URL:-your-server-url}
129
+ VECTOR_STORE: ${VECTOR_STORE:-weaviate}
130
+ WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
131
+ WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
132
+ QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
133
+ QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
134
+ QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
135
+ QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
136
+ QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
137
+ MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530}
138
+ MILVUS_TOKEN: ${MILVUS_TOKEN:-}
139
+ MILVUS_USER: ${MILVUS_USER:-root}
140
+ MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
141
+ MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False}
142
+ MYSCALE_HOST: ${MYSCALE_HOST:-myscale}
143
+ MYSCALE_PORT: ${MYSCALE_PORT:-8123}
144
+ MYSCALE_USER: ${MYSCALE_USER:-default}
145
+ MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
146
+ MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
147
+ MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
148
+ COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server}
149
+ COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
150
+ COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
151
+ COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
152
+ COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
153
+ PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
154
+ PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
155
+ PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
156
+ PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
157
+ PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
158
+ PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1}
159
+ PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5}
160
+ PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs}
161
+ PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432}
162
+ PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres}
163
+ PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456}
164
+ PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify}
165
+ ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak}
166
+ ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk}
167
+ ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou}
168
+ ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456}
169
+ ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount}
170
+ ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword}
171
+ ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
172
+ ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword}
173
+ ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com}
174
+ ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432}
175
+ ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1}
176
+ ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5}
177
+ TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
178
+ TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
179
+ TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
180
+ TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
181
+ TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
182
+ TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1}
183
+ TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify}
184
+ TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20}
185
+ TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false}
186
+ TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334}
187
+ TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify}
188
+ TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify}
189
+ TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1}
190
+ TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1}
191
+ TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1}
192
+ TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify}
193
+ TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100}
194
+ CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
195
+ CHROMA_PORT: ${CHROMA_PORT:-8000}
196
+ CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
197
+ CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
198
+ CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
199
+ CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
200
+ ORACLE_HOST: ${ORACLE_HOST:-oracle}
201
+ ORACLE_PORT: ${ORACLE_PORT:-1521}
202
+ ORACLE_USER: ${ORACLE_USER:-dify}
203
+ ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
204
+ ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
205
+ RELYT_HOST: ${RELYT_HOST:-db}
206
+ RELYT_PORT: ${RELYT_PORT:-5432}
207
+ RELYT_USER: ${RELYT_USER:-postgres}
208
+ RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
209
+ RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
210
+ OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
211
+ OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
212
+ OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
213
+ OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
214
+ OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
215
+ TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1}
216
+ TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
217
+ TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
218
+ TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
219
+ TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
220
+ TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
221
+ TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
222
+ ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
223
+ ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
224
+ ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
225
+ ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
226
+ KIBANA_PORT: ${KIBANA_PORT:-5601}
227
+ BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287}
228
+ BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000}
229
+ BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root}
230
+ BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify}
231
+ BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify}
232
+ BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1}
233
+ BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3}
234
+ VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak}
235
+ VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk}
236
+ VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai}
237
+ VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com}
238
+ VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http}
239
+ VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30}
240
+ VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30}
241
+ LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070}
242
+ LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm}
243
+ LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm}
244
+ OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase}
245
+ OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
246
+ OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
247
+ OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
248
+ OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
249
+ OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
250
+ OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
251
+ UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io}
252
+ UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify}
253
+ UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
254
+ UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
255
+ ETL_TYPE: ${ETL_TYPE:-dify}
256
+ UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
257
+ UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
258
+ SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true}
259
+ PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
260
+ CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
261
+ MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64}
262
+ UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
263
+ UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
264
+ UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
265
+ SENTRY_DSN: ${SENTRY_DSN:-}
266
+ API_SENTRY_DSN: ${API_SENTRY_DSN:-}
267
+ API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
268
+ API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
269
+ WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-}
270
+ NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
271
+ NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
272
+ NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
273
+ NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
274
+ MAIL_TYPE: ${MAIL_TYPE:-resend}
275
+ MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
276
+ RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com}
277
+ RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
278
+ SMTP_SERVER: ${SMTP_SERVER:-}
279
+ SMTP_PORT: ${SMTP_PORT:-465}
280
+ SMTP_USERNAME: ${SMTP_USERNAME:-}
281
+ SMTP_PASSWORD: ${SMTP_PASSWORD:-}
282
+ SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
283
+ SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
284
+ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
285
+ INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
286
+ RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
287
+ CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
288
+ CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox}
289
+ CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
290
+ CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
291
+ CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
292
+ CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
293
+ CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
294
+ CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
295
+ CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
296
+ CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
297
+ CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10}
298
+ CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60}
299
+ CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10}
300
+ TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
301
+ WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
302
+ WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
303
+ WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
304
+ MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
305
+ WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3}
306
+ WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
307
+ HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
308
+ HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
309
+ SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
310
+ SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}
311
+ TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
312
+ PGUSER: ${PGUSER:-${DB_USERNAME}}
313
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}}
314
+ POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}}
315
+ PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
316
+ SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
317
+ SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release}
318
+ SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
319
+ SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
320
+ SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
321
+ SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
322
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
323
+ WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
324
+ WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
325
+ WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true}
326
+ WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
327
+ WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
328
+ WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
329
+ WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
330
+ WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:[email protected]}
331
+ WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
332
+ WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:[email protected]}
333
+ CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
334
+ CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
335
+ CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
336
+ ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
337
+ ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
338
+ ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
339
+ ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
340
+ ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
341
+ ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
342
+ MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
343
+ MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
344
+ ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
345
+ MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
346
+ MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true}
347
+ PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres}
348
+ PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
349
+ PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
350
+ PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
351
+ OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
352
+ OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
353
+ OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m}
354
+ OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
355
+ OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
356
+ OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1}
357
+ OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1}
358
+ OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536}
359
+ OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536}
360
+ NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
361
+ NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
362
+ NGINX_PORT: ${NGINX_PORT:-80}
363
+ NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
364
+ NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
365
+ NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
366
+ NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
367
+ NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
368
+ NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
369
+ NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
370
+ NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
371
+ NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
372
+ NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
373
+ CERTBOT_EMAIL: ${CERTBOT_EMAIL:[email protected]}
374
+ CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com}
375
+ CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-}
376
+ SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
377
+ SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
378
+ SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
379
+ SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
380
+ SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5}
381
+ SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5}
382
+ SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5}
383
+ SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5}
384
+ EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80}
385
+ EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443}
386
+ POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
387
+ POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
388
+ POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
389
+ POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
390
+ POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
391
+ POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
392
+ CSP_WHITELIST: ${CSP_WHITELIST:-}
393
+ CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
394
+ MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
395
+ TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
396
+
397
+ services:
398
+ # API service
399
+ api:
400
+ image: langgenius/dify-api:0.15.3
401
+ restart: always
402
+ environment:
403
+ # Use the shared environment variables.
404
+ <<: *shared-api-worker-env
405
+ # Startup mode, 'api' starts the API server.
406
+ MODE: api
407
+ SENTRY_DSN: ${API_SENTRY_DSN:-}
408
+ SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
409
+ SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
410
+ depends_on:
411
+ - db
412
+ - redis
413
+ volumes:
414
+ # Mount the storage directory to the container, for storing user files.
415
+ - ./volumes/app/storage:/app/api/storage
416
+ networks:
417
+ - ssrf_proxy_network
418
+ - default
419
+
420
+ # worker service
421
+ # The Celery worker for processing the queue.
422
+ worker:
423
+ image: langgenius/dify-api:0.15.3
424
+ restart: always
425
+ environment:
426
+ # Use the shared environment variables.
427
+ <<: *shared-api-worker-env
428
+ # Startup mode, 'worker' starts the Celery worker for processing the queue.
429
+ MODE: worker
430
+ SENTRY_DSN: ${API_SENTRY_DSN:-}
431
+ SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
432
+ SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
433
+ depends_on:
434
+ - db
435
+ - redis
436
+ volumes:
437
+ # Mount the storage directory to the container, for storing user files.
438
+ - ./volumes/app/storage:/app/api/storage
439
+ networks:
440
+ - ssrf_proxy_network
441
+ - default
442
+
443
+ # Frontend web application.
444
+ web:
445
+ image: langgenius/dify-web:0.15.3
446
+ restart: always
447
+ environment:
448
+ CONSOLE_API_URL: ${CONSOLE_API_URL:-}
449
+ APP_API_URL: ${APP_API_URL:-}
450
+ SENTRY_DSN: ${WEB_SENTRY_DSN:-}
451
+ NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
452
+ TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
453
+ CSP_WHITELIST: ${CSP_WHITELIST:-}
454
+ TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
455
+ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
456
+
457
+ # The postgres database.
458
+ db:
459
+ image: postgres:15-alpine
460
+ restart: always
461
+ environment:
462
+ PGUSER: ${PGUSER:-postgres}
463
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
464
+ POSTGRES_DB: ${POSTGRES_DB:-dify}
465
+ PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
466
+ command: >
467
+ postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
468
+ -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
469
+ -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
470
+ -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
471
+ -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
472
+ volumes:
473
+ - ./volumes/db/data:/var/lib/postgresql/data
474
+ healthcheck:
475
+ test: [ 'CMD', 'pg_isready' ]
476
+ interval: 1s
477
+ timeout: 3s
478
+ retries: 30
479
+
480
+ # The redis cache.
481
+ redis:
482
+ image: redis:6-alpine
483
+ restart: always
484
+ environment:
485
+ REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
486
+ volumes:
487
+ # Mount the redis data directory to the container.
488
+ - ./volumes/redis/data:/data
489
+ # Set the redis password when startup redis server.
490
+ command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
491
+ healthcheck:
492
+ test: [ 'CMD', 'redis-cli', 'ping' ]
493
+
494
+ # The DifySandbox
495
+ sandbox:
496
+ image: langgenius/dify-sandbox:0.2.10
497
+ restart: always
498
+ environment:
499
+ # The DifySandbox configurations
500
+ # Make sure you are changing this key for your deployment with a strong key.
501
+ # You can generate a strong key using `openssl rand -base64 42`.
502
+ API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
503
+ GIN_MODE: ${SANDBOX_GIN_MODE:-release}
504
+ WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
505
+ ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
506
+ HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
507
+ HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
508
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
509
+ volumes:
510
+ - ./volumes/sandbox/dependencies:/dependencies
511
+ healthcheck:
512
+ test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
513
+ networks:
514
+ - ssrf_proxy_network
515
+
516
+ # ssrf_proxy server
517
+ # for more information, please refer to
518
+ # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
519
+ ssrf_proxy:
520
+ image: ubuntu/squid:latest
521
+ restart: always
522
+ volumes:
523
+ - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
524
+ - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
525
+ entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
526
+ environment:
527
+ # pls clearly modify the squid env vars to fit your network environment.
528
+ HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
529
+ COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
530
+ REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
531
+ SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
532
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
533
+ networks:
534
+ - ssrf_proxy_network
535
+ - default
536
+
537
+ # Certbot service
538
+ # use `docker-compose --profile certbot up` to start the certbot service.
539
+ certbot:
540
+ image: certbot/certbot
541
+ profiles:
542
+ - certbot
543
+ volumes:
544
+ - ./volumes/certbot/conf:/etc/letsencrypt
545
+ - ./volumes/certbot/www:/var/www/html
546
+ - ./volumes/certbot/logs:/var/log/letsencrypt
547
+ - ./volumes/certbot/conf/live:/etc/letsencrypt/live
548
+ - ./certbot/update-cert.template.txt:/update-cert.template.txt
549
+ - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
550
+ environment:
551
+ - CERTBOT_EMAIL=${CERTBOT_EMAIL}
552
+ - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
553
+ - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
554
+ entrypoint: [ '/docker-entrypoint.sh' ]
555
+ command: [ 'tail', '-f', '/dev/null' ]
556
+
557
+ # The nginx reverse proxy.
558
+ # used for reverse proxying the API service and Web service.
559
+ nginx:
560
+ image: nginx:latest
561
+ restart: always
562
+ volumes:
563
+ - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
564
+ - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
565
+ - ./nginx/https.conf.template:/etc/nginx/https.conf.template
566
+ - ./nginx/conf.d:/etc/nginx/conf.d
567
+ - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
568
+ - ./nginx/ssl:/etc/ssl # cert dir (legacy)
569
+ - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
570
+ - ./volumes/certbot/conf:/etc/letsencrypt
571
+ - ./volumes/certbot/www:/var/www/html
572
+ entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
573
+ environment:
574
+ NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
575
+ NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
576
+ NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
577
+ NGINX_PORT: ${NGINX_PORT:-80}
578
+ # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
579
+ # and modify the env vars below in .env if HTTPS_ENABLED is true.
580
+ NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
581
+ NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
582
+ NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
583
+ NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
584
+ NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
585
+ NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
586
+ NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
587
+ NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
588
+ NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
589
+ CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
590
+ depends_on:
591
+ - api
592
+ - web
593
+ ports:
594
+ - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
595
+ - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
596
+
597
+ # The TiDB vector store.
598
+ # For production use, please refer to https://github.com/pingcap/tidb-docker-compose
599
+ tidb:
600
+ image: pingcap/tidb:v8.4.0
601
+ profiles:
602
+ - tidb
603
+ command:
604
+ - --store=unistore
605
+ restart: always
606
+
607
+ # The Weaviate vector store.
608
+ weaviate:
609
+ image: semitechnologies/weaviate:1.19.0
610
+ profiles:
611
+ - ''
612
+ - weaviate
613
+ restart: always
614
+ volumes:
615
+ # Mount the Weaviate data directory to the con tainer.
616
+ - ./volumes/weaviate:/var/lib/weaviate
617
+ environment:
618
+ # The Weaviate configurations
619
+ # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
620
+ PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
621
+ QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
622
+ AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
623
+ DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
624
+ CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
625
+ AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
626
+ AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
627
+ AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:[email protected]}
628
+ AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
629
+ AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:[email protected]}
630
+
631
+ # Qdrant vector store.
632
+ # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
633
+ qdrant:
634
+ image: langgenius/qdrant:v1.7.3
635
+ profiles:
636
+ - qdrant
637
+ restart: always
638
+ volumes:
639
+ - ./volumes/qdrant:/qdrant/storage
640
+ environment:
641
+ QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
642
+
643
+ # The Couchbase vector store.
644
+ couchbase-server:
645
+ build: ./couchbase-server
646
+ profiles:
647
+ - couchbase
648
+ restart: always
649
+ environment:
650
+ - CLUSTER_NAME=dify_search
651
+ - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
652
+ - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
653
+ - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
654
+ - COUCHBASE_BUCKET_RAMSIZE=512
655
+ - COUCHBASE_RAM_SIZE=2048
656
+ - COUCHBASE_EVENTING_RAM_SIZE=512
657
+ - COUCHBASE_INDEX_RAM_SIZE=512
658
+ - COUCHBASE_FTS_RAM_SIZE=1024
659
+ hostname: couchbase-server
660
+ container_name: couchbase-server
661
+ working_dir: /opt/couchbase
662
+ stdin_open: true
663
+ tty: true
664
+ entrypoint: [ "" ]
665
+ command: sh -c "/opt/couchbase/init/init-cbserver.sh"
666
+ volumes:
667
+ - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
668
+ healthcheck:
669
+ # ensure bucket was created before proceeding
670
+ test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
671
+ interval: 10s
672
+ retries: 10
673
+ start_period: 30s
674
+ timeout: 10s
675
+
676
+ # The pgvector vector database.
677
+ pgvector:
678
+ image: pgvector/pgvector:pg16
679
+ profiles:
680
+ - pgvector
681
+ restart: always
682
+ environment:
683
+ PGUSER: ${PGVECTOR_PGUSER:-postgres}
684
+ # The password for the default postgres user.
685
+ POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
686
+ # The name of the default postgres database.
687
+ POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
688
+ # postgres data directory
689
+ PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
690
+ volumes:
691
+ - ./volumes/pgvector/data:/var/lib/postgresql/data
692
+ healthcheck:
693
+ test: [ 'CMD', 'pg_isready' ]
694
+ interval: 1s
695
+ timeout: 3s
696
+ retries: 30
697
+
698
+ # pgvecto-rs vector store
699
+ pgvecto-rs:
700
+ image: tensorchord/pgvecto-rs:pg16-v0.3.0
701
+ profiles:
702
+ - pgvecto-rs
703
+ restart: always
704
+ environment:
705
+ PGUSER: ${PGVECTOR_PGUSER:-postgres}
706
+ # The password for the default postgres user.
707
+ POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
708
+ # The name of the default postgres database.
709
+ POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
710
+ # postgres data directory
711
+ PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
712
+ volumes:
713
+ - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
714
+ healthcheck:
715
+ test: [ 'CMD', 'pg_isready' ]
716
+ interval: 1s
717
+ timeout: 3s
718
+ retries: 30
719
+
720
+ # Chroma vector database
721
+ chroma:
722
+ image: ghcr.io/chroma-core/chroma:0.5.20
723
+ profiles:
724
+ - chroma
725
+ restart: always
726
+ volumes:
727
+ - ./volumes/chroma:/chroma/chroma
728
+ environment:
729
+ CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
730
+ CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
731
+ IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
732
+
733
+ # OceanBase vector database
734
+ oceanbase:
735
+ image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
736
+ profiles:
737
+ - oceanbase
738
+ restart: always
739
+ volumes:
740
+ - ./volumes/oceanbase/data:/root/ob
741
+ - ./volumes/oceanbase/conf:/root/.obd/cluster
742
+ - ./volumes/oceanbase/init.d:/root/boot/init.d
743
+ environment:
744
+ OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
745
+ OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
746
+ OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
747
+ OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
748
+ OB_SERVER_IP: '127.0.0.1'
749
+
750
+ # Oracle vector database
751
+ oracle:
752
+ image: container-registry.oracle.com/database/free:latest
753
+ profiles:
754
+ - oracle
755
+ restart: always
756
+ volumes:
757
+ - source: oradata
758
+ type: volume
759
+ target: /opt/oracle/oradata
760
+ - ./startupscripts:/opt/oracle/scripts/startup
761
+ environment:
762
+ ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
763
+ ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
764
+
765
+ # Milvus vector database services
766
+ etcd:
767
+ container_name: milvus-etcd
768
+ image: quay.io/coreos/etcd:v3.5.5
769
+ profiles:
770
+ - milvus
771
+ environment:
772
+ ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
773
+ ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
774
+ ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
775
+ ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
776
+ volumes:
777
+ - ./volumes/milvus/etcd:/etcd
778
+ command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
779
+ healthcheck:
780
+ test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
781
+ interval: 30s
782
+ timeout: 20s
783
+ retries: 3
784
+ networks:
785
+ - milvus
786
+
787
+ minio:
788
+ container_name: milvus-minio
789
+ image: minio/minio:RELEASE.2023-03-20T20-16-18Z
790
+ profiles:
791
+ - milvus
792
+ environment:
793
+ MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
794
+ MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
795
+ volumes:
796
+ - ./volumes/milvus/minio:/minio_data
797
+ command: minio server /minio_data --console-address ":9001"
798
+ healthcheck:
799
+ test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
800
+ interval: 30s
801
+ timeout: 20s
802
+ retries: 3
803
+ networks:
804
+ - milvus
805
+
806
+ milvus-standalone:
807
+ container_name: milvus-standalone
808
+ image: milvusdb/milvus:v2.5.0-beta
809
+ profiles:
810
+ - milvus
811
+ command: [ 'milvus', 'run', 'standalone' ]
812
+ environment:
813
+ ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
814
+ MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
815
+ common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
816
+ volumes:
817
+ - ./volumes/milvus/milvus:/var/lib/milvus
818
+ healthcheck:
819
+ test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
820
+ interval: 30s
821
+ start_period: 90s
822
+ timeout: 20s
823
+ retries: 3
824
+ depends_on:
825
+ - etcd
826
+ - minio
827
+ ports:
828
+ - 19530:19530
829
+ - 9091:9091
830
+ networks:
831
+ - milvus
832
+
833
+ # Opensearch vector database
834
+ opensearch:
835
+ container_name: opensearch
836
+ image: opensearchproject/opensearch:latest
837
+ profiles:
838
+ - opensearch
839
+ environment:
840
+ discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
841
+ bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
842
+ OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
843
+ OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
844
+ ulimits:
845
+ memlock:
846
+ soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
847
+ hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
848
+ nofile:
849
+ soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
850
+ hard: ${OPENSEARCH_NOFILE_HARD:-65536}
851
+ volumes:
852
+ - ./volumes/opensearch/data:/usr/share/opensearch/data
853
+ networks:
854
+ - opensearch-net
855
+
856
+ opensearch-dashboards:
857
+ container_name: opensearch-dashboards
858
+ image: opensearchproject/opensearch-dashboards:latest
859
+ profiles:
860
+ - opensearch
861
+ environment:
862
+ OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
863
+ volumes:
864
+ - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
865
+ networks:
866
+ - opensearch-net
867
+ depends_on:
868
+ - opensearch
869
+
870
+ # MyScale vector database
871
+ myscale:
872
+ container_name: myscale
873
+ image: myscale/myscaledb:1.6.4
874
+ profiles:
875
+ - myscale
876
+ restart: always
877
+ tty: true
878
+ volumes:
879
+ - ./volumes/myscale/data:/var/lib/clickhouse
880
+ - ./volumes/myscale/log:/var/log/clickhouse-server
881
+ - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
882
+ ports:
883
+ - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
884
+
885
+ # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
886
+ # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
887
+ elasticsearch:
888
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
889
+ container_name: elasticsearch
890
+ profiles:
891
+ - elasticsearch
892
+ - elasticsearch-ja
893
+ restart: always
894
+ volumes:
895
+ - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
896
+ - dify_es01_data:/usr/share/elasticsearch/data
897
+ environment:
898
+ ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
899
+ VECTOR_STORE: ${VECTOR_STORE:-}
900
+ cluster.name: dify-es-cluster
901
+ node.name: dify-es0
902
+ discovery.type: single-node
903
+ xpack.license.self_generated.type: basic
904
+ xpack.security.enabled: 'true'
905
+ xpack.security.enrollment.enabled: 'false'
906
+ xpack.security.http.ssl.enabled: 'false'
907
+ ports:
908
+ - ${ELASTICSEARCH_PORT:-9200}:9200
909
+ deploy:
910
+ resources:
911
+ limits:
912
+ memory: 2g
913
+ entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
914
+ healthcheck:
915
+ test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
916
+ interval: 30s
917
+ timeout: 10s
918
+ retries: 50
919
+
920
+ # https://www.elastic.co/guide/en/kibana/current/docker.html
921
+ # https://www.elastic.co/guide/en/kibana/current/settings.html
922
+ kibana:
923
+ image: docker.elastic.co/kibana/kibana:8.14.3
924
+ container_name: kibana
925
+ profiles:
926
+ - elasticsearch
927
+ depends_on:
928
+ - elasticsearch
929
+ restart: always
930
+ environment:
931
+ XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
932
+ NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
933
+ XPACK_SECURITY_ENABLED: 'true'
934
+ XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
935
+ XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
936
+ XPACK_FLEET_ISAIRGAPPED: 'true'
937
+ I18N_LOCALE: zh-CN
938
+ SERVER_PORT: '5601'
939
+ ELASTICSEARCH_HOSTS: http://elasticsearch:9200
940
+ ports:
941
+ - ${KIBANA_PORT:-5601}:5601
942
+ healthcheck:
943
+ test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
944
+ interval: 30s
945
+ timeout: 10s
946
+ retries: 3
947
+
948
+ # unstructured .
949
+ # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
950
+ unstructured:
951
+ image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
952
+ profiles:
953
+ - unstructured
954
+ restart: always
955
+ volumes:
956
+ - ./volumes/unstructured:/app/data
957
+
958
+ networks:
959
+ # create a network between sandbox, api and ssrf_proxy, and can not access outside.
960
+ ssrf_proxy_network:
961
+ driver: bridge
962
+ internal: true
963
+ milvus:
964
+ driver: bridge
965
+ opensearch-net:
966
+ driver: bridge
967
+ internal: true
968
+
969
+ volumes:
970
+ oradata:
971
+ dify_es01_data:
docker/elasticsearch/docker-entrypoint.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -e
4
+
5
+ if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then
6
+ # Check if the ICU tokenizer plugin is installed
7
+ if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then
8
+ printf '%s\n' "Installing the ICU tokenizer plugin"
9
+ if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then
10
+ printf '%s\n' "Failed to install the ICU tokenizer plugin"
11
+ exit 1
12
+ fi
13
+ fi
14
+ # Check if the Japanese language analyzer plugin is installed
15
+ if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then
16
+ printf '%s\n' "Installing the Japanese language analyzer plugin"
17
+ if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then
18
+ printf '%s\n' "Failed to install the Japanese language analyzer plugin"
19
+ exit 1
20
+ fi
21
+ fi
22
+ fi
23
+
24
+ # Run the original entrypoint script
25
+ exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh
docker/generate_docker_compose ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import re
4
+ import sys
5
+
6
+
7
+ def parse_env_example(file_path):
8
+ """
9
+ Parses the .env.example file and returns a dictionary with variable names as keys and default values as values.
10
+ """
11
+ env_vars = {}
12
+ with open(file_path, "r") as f:
13
+ for line_number, line in enumerate(f, 1):
14
+ line = line.strip()
15
+ # Ignore empty lines and comments
16
+ if not line or line.startswith("#"):
17
+ continue
18
+ # Use regex to parse KEY=VALUE
19
+ match = re.match(r"^([^=]+)=(.*)$", line)
20
+ if match:
21
+ key = match.group(1).strip()
22
+ value = match.group(2).strip()
23
+ # Remove possible quotes around the value
24
+ if (value.startswith('"') and value.endswith('"')) or (
25
+ value.startswith("'") and value.endswith("'")
26
+ ):
27
+ value = value[1:-1]
28
+ env_vars[key] = value
29
+ else:
30
+ print(f"Warning: Unable to parse line {line_number}: {line}")
31
+ return env_vars
32
+
33
+
34
+ def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"):
35
+ """
36
+ Generates a shared environment variables block as a YAML string.
37
+ """
38
+ lines = [f"x-shared-env: &{anchor_name}"]
39
+ for key, default in env_vars.items():
40
+ if key == "COMPOSE_PROFILES":
41
+ continue
42
+ # If default value is empty, use ${KEY:-}
43
+ if default == "":
44
+ lines.append(f" {key}: ${{{key}:-}}")
45
+ else:
46
+ # If default value contains special characters, wrap it in quotes
47
+ if re.search(r"[:\s]", default):
48
+ default = f"{default}"
49
+ lines.append(f" {key}: ${{{key}:-{default}}}")
50
+ return "\n".join(lines)
51
+
52
+
53
+ def insert_shared_env(template_path, output_path, shared_env_block, header_comments):
54
+ """
55
+ Inserts the shared environment variables block and header comments into the template file,
56
+ removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file.
57
+ """
58
+ with open(template_path, "r") as f:
59
+ template_content = f.read()
60
+
61
+ # Remove existing x-shared-env: &shared-api-worker-env lines
62
+ template_content = re.sub(
63
+ r"^x-shared-env: &shared-api-worker-env\s*\n?",
64
+ "",
65
+ template_content,
66
+ flags=re.MULTILINE,
67
+ )
68
+
69
+ # Prepare the final content with header comments and shared env block
70
+ final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}"
71
+
72
+ with open(output_path, "w") as f:
73
+ f.write(final_content)
74
+ print(f"Generated {output_path}")
75
+
76
+
77
+ def main():
78
+ env_example_path = ".env.example"
79
+ template_path = "docker-compose-template.yaml"
80
+ output_path = "docker-compose.yaml"
81
+ anchor_name = "shared-api-worker-env" # Can be modified as needed
82
+
83
+ # Define header comments to be added at the top of docker-compose.yaml
84
+ header_comments = (
85
+ "# ==================================================================\n"
86
+ "# WARNING: This file is auto-generated by generate_docker_compose\n"
87
+ "# Do not modify this file directly. Instead, update the .env.example\n"
88
+ "# or docker-compose-template.yaml and regenerate this file.\n"
89
+ "# ==================================================================\n"
90
+ )
91
+
92
+ # Check if required files exist
93
+ for path in [env_example_path, template_path]:
94
+ if not os.path.isfile(path):
95
+ print(f"Error: File {path} does not exist.")
96
+ sys.exit(1)
97
+
98
+ # Parse .env.example file
99
+ env_vars = parse_env_example(env_example_path)
100
+
101
+ if not env_vars:
102
+ print("Warning: No environment variables found in .env.example.")
103
+
104
+ # Generate shared environment variables block
105
+ shared_env_block = generate_shared_env_block(env_vars, anchor_name)
106
+
107
+ # Insert shared environment variables block and header comments into the template
108
+ insert_shared_env(template_path, output_path, shared_env_block, header_comments)
109
+
110
+
111
+ if __name__ == "__main__":
112
+ main()
docker/middleware.env.example ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------
2
+ # Environment Variables for db Service
3
+ # ------------------------------
4
+ PGUSER=postgres
5
+ # The password for the default postgres user.
6
+ POSTGRES_PASSWORD=difyai123456
7
+ # The name of the default postgres database.
8
+ POSTGRES_DB=dify
9
+ # postgres data directory
10
+ PGDATA=/var/lib/postgresql/data/pgdata
11
+ PGDATA_HOST_VOLUME=./volumes/db/data
12
+
13
+ # Maximum number of connections to the database
14
+ # Default is 100
15
+ #
16
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
17
+ POSTGRES_MAX_CONNECTIONS=100
18
+
19
+ # Sets the amount of shared memory used for postgres's shared buffers.
20
+ # Default is 128MB
21
+ # Recommended value: 25% of available memory
22
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
23
+ POSTGRES_SHARED_BUFFERS=128MB
24
+
25
+ # Sets the amount of memory used by each database worker for working space.
26
+ # Default is 4MB
27
+ #
28
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
29
+ POSTGRES_WORK_MEM=4MB
30
+
31
+ # Sets the amount of memory reserved for maintenance activities.
32
+ # Default is 64MB
33
+ #
34
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
35
+ POSTGRES_MAINTENANCE_WORK_MEM=64MB
36
+
37
+ # Sets the planner's assumption about the effective cache size.
38
+ # Default is 4096MB
39
+ #
40
+ # Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
41
+ POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
42
+
43
+ # -----------------------------
44
+ # Environment Variables for redis Service
45
+ # -----------------------------
46
+ REDIS_HOST_VOLUME=./volumes/redis/data
47
+ REDIS_PASSWORD=difyai123456
48
+
49
+ # ------------------------------
50
+ # Environment Variables for sandbox Service
51
+ # ------------------------------
52
+ SANDBOX_API_KEY=dify-sandbox
53
+ SANDBOX_GIN_MODE=release
54
+ SANDBOX_WORKER_TIMEOUT=15
55
+ SANDBOX_ENABLE_NETWORK=true
56
+ SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128
57
+ SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128
58
+ SANDBOX_PORT=8194
59
+
60
+ # ------------------------------
61
+ # Environment Variables for ssrf_proxy Service
62
+ # ------------------------------
63
+ SSRF_HTTP_PORT=3128
64
+ SSRF_COREDUMP_DIR=/var/spool/squid
65
+ SSRF_REVERSE_PROXY_PORT=8194
66
+ SSRF_SANDBOX_HOST=sandbox
67
+
68
+ # ------------------------------
69
+ # Environment Variables for weaviate Service
70
+ # ------------------------------
71
+ WEAVIATE_QUERY_DEFAULTS_LIMIT=25
72
+ WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
73
+ WEAVIATE_DEFAULT_VECTORIZER_MODULE=none
74
+ WEAVIATE_CLUSTER_HOSTNAME=node1
75
+ WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true
76
+ WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
77
78
+ WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
79
80
+ WEAVIATE_HOST_VOLUME=./volumes/weaviate
81
+
82
+ # ------------------------------
83
+ # Docker Compose Service Expose Host Port Configurations
84
+ # ------------------------------
85
+ EXPOSE_POSTGRES_PORT=5432
86
+ EXPOSE_REDIS_PORT=6379
87
+ EXPOSE_SANDBOX_PORT=8194
88
+ EXPOSE_SSRF_PROXY_PORT=3128
89
+ EXPOSE_WEAVIATE_PORT=8080
docker/nginx/conf.d/default.conf.template ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
2
+
3
+ server {
4
+ listen ${NGINX_PORT};
5
+ server_name ${NGINX_SERVER_NAME};
6
+
7
+ location /console/api {
8
+ proxy_pass http://api:5001;
9
+ include proxy.conf;
10
+ }
11
+
12
+ location /api {
13
+ proxy_pass http://api:5001;
14
+ include proxy.conf;
15
+ }
16
+
17
+ location /v1 {
18
+ proxy_pass http://api:5001;
19
+ include proxy.conf;
20
+ }
21
+
22
+ location /files {
23
+ proxy_pass http://api:5001;
24
+ include proxy.conf;
25
+ }
26
+
27
+ location / {
28
+ proxy_pass http://web:3000;
29
+ include proxy.conf;
30
+ }
31
+
32
+ # placeholder for acme challenge location
33
+ ${ACME_CHALLENGE_LOCATION}
34
+
35
+ # placeholder for https config defined in https.conf.template
36
+ ${HTTPS_CONFIG}
37
+ }
docker/nginx/docker-entrypoint.sh ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ if [ "${NGINX_HTTPS_ENABLED}" = "true" ]; then
4
+ # Check if the certificate and key files for the specified domain exist
5
+ if [ -n "${CERTBOT_DOMAIN}" ] && \
6
+ [ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" ] && \
7
+ [ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" ]; then
8
+ SSL_CERTIFICATE_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}"
9
+ SSL_CERTIFICATE_KEY_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}"
10
+ else
11
+ SSL_CERTIFICATE_PATH="/etc/ssl/${NGINX_SSL_CERT_FILENAME}"
12
+ SSL_CERTIFICATE_KEY_PATH="/etc/ssl/${NGINX_SSL_CERT_KEY_FILENAME}"
13
+ fi
14
+ export SSL_CERTIFICATE_PATH
15
+ export SSL_CERTIFICATE_KEY_PATH
16
+
17
+ # set the HTTPS_CONFIG environment variable to the content of the https.conf.template
18
+ HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template)
19
+ export HTTPS_CONFIG
20
+ # Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template
21
+ envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
22
+ fi
23
+
24
+ if [ "${NGINX_ENABLE_CERTBOT_CHALLENGE}" = "true" ]; then
25
+ ACME_CHALLENGE_LOCATION='location /.well-known/acme-challenge/ { root /var/www/html; }'
26
+ else
27
+ ACME_CHALLENGE_LOCATION=''
28
+ fi
29
+ export ACME_CHALLENGE_LOCATION
30
+
31
+ env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -)
32
+
33
+ envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
34
+ envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf
35
+
36
+ envsubst < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
37
+
38
+ # Start Nginx using the default entrypoint
39
+ exec nginx -g 'daemon off;'
docker/nginx/https.conf.template ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
2
+
3
+ listen ${NGINX_SSL_PORT} ssl;
4
+ ssl_certificate ${SSL_CERTIFICATE_PATH};
5
+ ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH};
6
+ ssl_protocols ${NGINX_SSL_PROTOCOLS};
7
+ ssl_prefer_server_ciphers on;
8
+ ssl_session_cache shared:SSL:10m;
9
+ ssl_session_timeout 10m;
docker/nginx/nginx.conf.template ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
2
+
3
+ user nginx;
4
+ worker_processes ${NGINX_WORKER_PROCESSES};
5
+
6
+ error_log /var/log/nginx/error.log notice;
7
+ pid /var/run/nginx.pid;
8
+
9
+
10
+ events {
11
+ worker_connections 1024;
12
+ }
13
+
14
+
15
+ http {
16
+ include /etc/nginx/mime.types;
17
+ default_type application/octet-stream;
18
+
19
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
20
+ '$status $body_bytes_sent "$http_referer" '
21
+ '"$http_user_agent" "$http_x_forwarded_for"';
22
+
23
+ access_log /var/log/nginx/access.log main;
24
+
25
+ sendfile on;
26
+ #tcp_nopush on;
27
+
28
+ keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT};
29
+
30
+ #gzip on;
31
+ client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
32
+
33
+ include /etc/nginx/conf.d/*.conf;
34
+ }
docker/nginx/proxy.conf.template ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
2
+
3
+ proxy_set_header Host $host;
4
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
5
+ proxy_set_header X-Forwarded-Proto $scheme;
6
+ proxy_set_header X-Forwarded-Port $server_port;
7
+ proxy_http_version 1.1;
8
+ proxy_set_header Connection "";
9
+ proxy_buffering off;
10
+ proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT};
11
+ proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT};
docker/nginx/ssl/.gitkeep ADDED
File without changes
docker/ssrf_proxy/docker-entrypoint.sh ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Modified based on Squid OCI image entrypoint
4
+
5
+ # This entrypoint aims to forward the squid logs to stdout to assist users of
6
+ # common container related tooling (e.g., kubernetes, docker-compose, etc) to
7
+ # access the service logs.
8
+
9
+ # Moreover, it invokes the squid binary, leaving all the desired parameters to
10
+ # be provided by the "command" passed to the spawned container. If no command
11
+ # is provided by the user, the default behavior (as per the CMD statement in
12
+ # the Dockerfile) will be to use Ubuntu's default configuration [1] and run
13
+ # squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
14
+ # systemd unit.
15
+
16
+ # [1] The default configuration is changed in the Dockerfile to allow local
17
+ # network connections. See the Dockerfile for further information.
18
+
19
+ echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
20
+ if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
21
+ /usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
22
+ fi
23
+
24
+ tail -F /var/log/squid/access.log 2>/dev/null &
25
+ tail -F /var/log/squid/error.log 2>/dev/null &
26
+ tail -F /var/log/squid/store.log 2>/dev/null &
27
+ tail -F /var/log/squid/cache.log 2>/dev/null &
28
+
29
+ # Replace environment variables in the template and output to the squid.conf
30
+ echo "[ENTRYPOINT] replacing environment variables in the template"
31
+ awk '{
32
+ while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
33
+ var = substr($0, RSTART+2, RLENGTH-3)
34
+ val = ENVIRON[var]
35
+ $0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
36
+ }
37
+ print
38
+ }' /etc/squid/squid.conf.template > /etc/squid/squid.conf
39
+
40
+ /usr/sbin/squid -Nz
41
+ echo "[ENTRYPOINT] starting squid"
42
+ /usr/sbin/squid -f /etc/squid/squid.conf -NYC 1
docker/ssrf_proxy/squid.conf.template ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
2
+ acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
3
+ acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
4
+ acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
5
+ acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
6
+ acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
7
+ acl localnet src fc00::/7 # RFC 4193 local private network range
8
+ acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
9
+ acl SSL_ports port 443
10
+ # acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792
11
+ acl Safe_ports port 80 # http
12
+ acl Safe_ports port 21 # ftp
13
+ acl Safe_ports port 443 # https
14
+ acl Safe_ports port 70 # gopher
15
+ acl Safe_ports port 210 # wais
16
+ acl Safe_ports port 1025-65535 # unregistered ports
17
+ acl Safe_ports port 280 # http-mgmt
18
+ acl Safe_ports port 488 # gss-http
19
+ acl Safe_ports port 591 # filemaker
20
+ acl Safe_ports port 777 # multiling http
21
+ acl CONNECT method CONNECT
22
+ http_access deny !Safe_ports
23
+ http_access deny CONNECT !SSL_ports
24
+ http_access allow localhost manager
25
+ http_access deny manager
26
+ http_access allow localhost
27
+ include /etc/squid/conf.d/*.conf
28
+ http_access deny all
29
+
30
+ ################################## Proxy Server ################################
31
+ http_port ${HTTP_PORT}
32
+ coredump_dir ${COREDUMP_DIR}
33
+ refresh_pattern ^ftp: 1440 20% 10080
34
+ refresh_pattern ^gopher: 1440 0% 1440
35
+ refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
36
+ refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
37
+ refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
38
+ refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
39
+ refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
40
+ refresh_pattern . 0 20% 4320
41
+
42
+
43
+ # cache_dir ufs /var/spool/squid 100 16 256
44
+ # upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
45
+ # cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
46
+
47
+ ################################## Reverse Proxy To Sandbox ################################
48
+ http_port ${REVERSE_PROXY_PORT} accel vhost
49
+ cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
50
+ acl src_all src all
51
+ http_access allow src_all
docker/startupscripts/init.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ DB_INITIALIZED="/opt/oracle/oradata/dbinit"
4
+ #[ -f ${DB_INITIALIZED} ] && exit
5
+ #touch ${DB_INITIALIZED}
6
+ if [ -f ${DB_INITIALIZED} ]; then
7
+ echo 'File exists. Standards for have been Init'
8
+ exit
9
+ else
10
+ echo 'File does not exist. Standards for first time Start up this DB'
11
+ "$ORACLE_HOME"/bin/sqlplus -s "/ as sysdba" @"/opt/oracle/scripts/startup/init_user.script";
12
+ touch ${DB_INITIALIZED}
13
+ fi
docker/startupscripts/init_user.script ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ show pdbs;
2
+ ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE;
3
+ alter session set container= freepdb1;
4
+ create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users;
5
+ grant DB_DEVELOPER_ROLE to dify;
6
+
7
+ BEGIN
8
+ CTX_DDL.CREATE_PREFERENCE('my_chinese_vgram_lexer','CHINESE_VGRAM_LEXER');
9
+ END;
10
+ /
docker/volumes/myscale/config/users.d/custom_users_config.xml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <clickhouse>
2
+ <users>
3
+ <default>
4
+ <password></password>
5
+ <networks>
6
+ <ip>::1</ip> <!-- change to ::/0 to allow access from all addresses -->
7
+ <ip>127.0.0.1</ip>
8
+ <ip>10.0.0.0/8</ip>
9
+ <ip>172.16.0.0/12</ip>
10
+ <ip>192.168.0.0/16</ip>
11
+ </networks>
12
+ <profile>default</profile>
13
+ <quota>default</quota>
14
+ <access_management>1</access_management>
15
+ </default>
16
+ </users>
17
+ </clickhouse>
docker/volumes/oceanbase/init.d/vec_memory.sql ADDED
@@ -0,0 +1 @@
 
 
1
+ ALTER SYSTEM SET ob_vector_memory_limit_percentage = 30;
docker/volumes/opensearch/opensearch_dashboards.yml ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ # Copyright OpenSearch Contributors
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ # Description:
6
+ # Default configuration for OpenSearch Dashboards
7
+
8
+ # OpenSearch Dashboards is served by a back end server. This setting specifies the port to use.
9
+ # server.port: 5601
10
+
11
+ # Specifies the address to which the OpenSearch Dashboards server will bind. IP addresses and host names are both valid values.
12
+ # The default is 'localhost', which usually means remote machines will not be able to connect.
13
+ # To allow connections from remote users, set this parameter to a non-loopback address.
14
+ # server.host: "localhost"
15
+
16
+ # Enables you to specify a path to mount OpenSearch Dashboards at if you are running behind a proxy.
17
+ # Use the `server.rewriteBasePath` setting to tell OpenSearch Dashboards if it should remove the basePath
18
+ # from requests it receives, and to prevent a deprecation warning at startup.
19
+ # This setting cannot end in a slash.
20
+ # server.basePath: ""
21
+
22
+ # Specifies whether OpenSearch Dashboards should rewrite requests that are prefixed with
23
+ # `server.basePath` or require that they are rewritten by your reverse proxy.
24
+ # server.rewriteBasePath: false
25
+
26
+ # The maximum payload size in bytes for incoming server requests.
27
+ # server.maxPayloadBytes: 1048576
28
+
29
+ # The OpenSearch Dashboards server's name. This is used for display purposes.
30
+ # server.name: "your-hostname"
31
+
32
+ # The URLs of the OpenSearch instances to use for all your queries.
33
+ # opensearch.hosts: ["http://localhost:9200"]
34
+
35
+ # OpenSearch Dashboards uses an index in OpenSearch to store saved searches, visualizations and
36
+ # dashboards. OpenSearch Dashboards creates a new index if the index doesn't already exist.
37
+ # opensearchDashboards.index: ".opensearch_dashboards"
38
+
39
+ # The default application to load.
40
+ # opensearchDashboards.defaultAppId: "home"
41
+
42
+ # Setting for an optimized healthcheck that only uses the local OpenSearch node to do Dashboards healthcheck.
43
+ # This settings should be used for large clusters or for clusters with ingest heavy nodes.
44
+ # It allows Dashboards to only healthcheck using the local OpenSearch node rather than fan out requests across all nodes.
45
+ #
46
+ # It requires the user to create an OpenSearch node attribute with the same name as the value used in the setting
47
+ # This node attribute should assign all nodes of the same cluster an integer value that increments with each new cluster that is spun up
48
+ # e.g. in opensearch.yml file you would set the value to a setting using node.attr.cluster_id:
49
+ # Should only be enabled if there is a corresponding node attribute created in your OpenSearch config that matches the value here
50
+ # opensearch.optimizedHealthcheckId: "cluster_id"
51
+
52
+ # If your OpenSearch is protected with basic authentication, these settings provide
53
+ # the username and password that the OpenSearch Dashboards server uses to perform maintenance on the OpenSearch Dashboards
54
+ # index at startup. Your OpenSearch Dashboards users still need to authenticate with OpenSearch, which
55
+ # is proxied through the OpenSearch Dashboards server.
56
+ # opensearch.username: "opensearch_dashboards_system"
57
+ # opensearch.password: "pass"
58
+
59
+ # Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
60
+ # These settings enable SSL for outgoing requests from the OpenSearch Dashboards server to the browser.
61
+ # server.ssl.enabled: false
62
+ # server.ssl.certificate: /path/to/your/server.crt
63
+ # server.ssl.key: /path/to/your/server.key
64
+
65
+ # Optional settings that provide the paths to the PEM-format SSL certificate and key files.
66
+ # These files are used to verify the identity of OpenSearch Dashboards to OpenSearch and are required when
67
+ # xpack.security.http.ssl.client_authentication in OpenSearch is set to required.
68
+ # opensearch.ssl.certificate: /path/to/your/client.crt
69
+ # opensearch.ssl.key: /path/to/your/client.key
70
+
71
+ # Optional setting that enables you to specify a path to the PEM file for the certificate
72
+ # authority for your OpenSearch instance.
73
+ # opensearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
74
+
75
+ # To disregard the validity of SSL certificates, change this setting's value to 'none'.
76
+ # opensearch.ssl.verificationMode: full
77
+
78
+ # Time in milliseconds to wait for OpenSearch to respond to pings. Defaults to the value of
79
+ # the opensearch.requestTimeout setting.
80
+ # opensearch.pingTimeout: 1500
81
+
82
+ # Time in milliseconds to wait for responses from the back end or OpenSearch. This value
83
+ # must be a positive integer.
84
+ # opensearch.requestTimeout: 30000
85
+
86
+ # List of OpenSearch Dashboards client-side headers to send to OpenSearch. To send *no* client-side
87
+ # headers, set this value to [] (an empty list).
88
+ # opensearch.requestHeadersWhitelist: [ authorization ]
89
+
90
+ # Header names and values that are sent to OpenSearch. Any custom headers cannot be overwritten
91
+ # by client-side headers, regardless of the opensearch.requestHeadersWhitelist configuration.
92
+ # opensearch.customHeaders: {}
93
+
94
+ # Time in milliseconds for OpenSearch to wait for responses from shards. Set to 0 to disable.
95
+ # opensearch.shardTimeout: 30000
96
+
97
+ # Logs queries sent to OpenSearch. Requires logging.verbose set to true.
98
+ # opensearch.logQueries: false
99
+
100
+ # Specifies the path where OpenSearch Dashboards creates the process ID file.
101
+ # pid.file: /var/run/opensearchDashboards.pid
102
+
103
+ # Enables you to specify a file where OpenSearch Dashboards stores log output.
104
+ # logging.dest: stdout
105
+
106
+ # Set the value of this setting to true to suppress all logging output.
107
+ # logging.silent: false
108
+
109
+ # Set the value of this setting to true to suppress all logging output other than error messages.
110
+ # logging.quiet: false
111
+
112
+ # Set the value of this setting to true to log all events, including system usage information
113
+ # and all requests.
114
+ # logging.verbose: false
115
+
116
+ # Set the interval in milliseconds to sample system and process performance
117
+ # metrics. Minimum is 100ms. Defaults to 5000.
118
+ # ops.interval: 5000
119
+
120
+ # Specifies locale to be used for all localizable strings, dates and number formats.
121
+ # Supported languages are the following: English - en , by default , Chinese - zh-CN .
122
+ # i18n.locale: "en"
123
+
124
+ # Set the allowlist to check input graphite Url. Allowlist is the default check list.
125
+ # vis_type_timeline.graphiteAllowedUrls: ['https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite']
126
+
127
+ # Set the blocklist to check input graphite Url. Blocklist is an IP list.
128
+ # Below is an example for reference
129
+ # vis_type_timeline.graphiteBlockedIPs: [
130
+ # //Loopback
131
+ # '127.0.0.0/8',
132
+ # '::1/128',
133
+ # //Link-local Address for IPv6
134
+ # 'fe80::/10',
135
+ # //Private IP address for IPv4
136
+ # '10.0.0.0/8',
137
+ # '172.16.0.0/12',
138
+ # '192.168.0.0/16',
139
+ # //Unique local address (ULA)
140
+ # 'fc00::/7',
141
+ # //Reserved IP address
142
+ # '0.0.0.0/8',
143
+ # '100.64.0.0/10',
144
+ # '192.0.0.0/24',
145
+ # '192.0.2.0/24',
146
+ # '198.18.0.0/15',
147
+ # '192.88.99.0/24',
148
+ # '198.51.100.0/24',
149
+ # '203.0.113.0/24',
150
+ # '224.0.0.0/4',
151
+ # '240.0.0.0/4',
152
+ # '255.255.255.255/32',
153
+ # '::/128',
154
+ # '2001:db8::/32',
155
+ # 'ff00::/8',
156
+ # ]
157
+ # vis_type_timeline.graphiteBlockedIPs: []
158
+
159
+ # opensearchDashboards.branding:
160
+ # logo:
161
+ # defaultUrl: ""
162
+ # darkModeUrl: ""
163
+ # mark:
164
+ # defaultUrl: ""
165
+ # darkModeUrl: ""
166
+ # loadingLogo:
167
+ # defaultUrl: ""
168
+ # darkModeUrl: ""
169
+ # faviconUrl: ""
170
+ # applicationTitle: ""
171
+
172
+ # Set the value of this setting to true to capture region blocked warnings and errors
173
+ # for your map rendering services.
174
+ # map.showRegionBlockedWarning: false%
175
+
176
+ # Set the value of this setting to false to suppress search usage telemetry
177
+ # for reducing the load of OpenSearch cluster.
178
+ # data.search.usageTelemetry.enabled: false
179
+
180
+ # 2.4 renames 'wizard.enabled: false' to 'vis_builder.enabled: false'
181
+ # Set the value of this setting to false to disable VisBuilder
182
+ # functionality in Visualization.
183
+ # vis_builder.enabled: false
184
+
185
+ # 2.4 New Experimental Feature
186
+ # Set the value of this setting to true to enable the experimental multiple data source
187
+ # support feature. Use with caution.
188
+ # data_source.enabled: false
189
+ # Set the value of these settings to customize crypto materials to encryption saved credentials
190
+ # in data sources.
191
+ # data_source.encryption.wrappingKeyName: 'changeme'
192
+ # data_source.encryption.wrappingKeyNamespace: 'changeme'
193
+ # data_source.encryption.wrappingKey: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
194
+
195
+ # 2.6 New ML Commons Dashboards Feature
196
+ # Set the value of this setting to true to enable the ml commons dashboards
197
+ # ml_commons_dashboards.enabled: false
198
+
199
+ # 2.12 New experimental Assistant Dashboards Feature
200
+ # Set the value of this setting to true to enable the assistant dashboards
201
+ # assistant.chat.enabled: false
202
+
203
+ # 2.13 New Query Assistant Feature
204
+ # Set the value of this setting to false to disable the query assistant
205
+ # observability.query_assist.enabled: false
206
+
207
+ # 2.14 Enable Ui Metric Collectors in Usage Collector
208
+ # Set the value of this setting to true to enable UI Metric collections
209
+ # usageCollection.uiMetric.enabled: false
210
+
211
+ opensearch.hosts: [https://localhost:9200]
212
+ opensearch.ssl.verificationMode: none
213
+ opensearch.username: admin
214
+ opensearch.password: 'Qazwsxedc!@#123'
215
+ opensearch.requestHeadersWhitelist: [authorization, securitytenant]
216
+
217
+ opensearch_security.multitenancy.enabled: true
218
+ opensearch_security.multitenancy.tenants.preferred: [Private, Global]
219
+ opensearch_security.readonly_mode.roles: [kibana_read_only]
220
+ # Use this setting if you are running opensearch-dashboards without https
221
+ opensearch_security.cookie.secure: false
222
+ server.host: '0.0.0.0'
docker/volumes/sandbox/conf/config.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ app:
2
+ port: 8194
3
+ debug: True
4
+ key: dify-sandbox
5
+ max_workers: 4
6
+ max_requests: 50
7
+ worker_timeout: 5
8
+ python_path: /usr/local/bin/python3
9
+ enable_network: True # please make sure there is no network risk in your environment
10
+ allowed_syscalls: # please leave it empty if you have no idea how seccomp works
11
+ proxy:
12
+ socks5: ''
13
+ http: ''
14
+ https: ''
docker/volumes/sandbox/conf/config.yaml.example ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ app:
2
+ port: 8194
3
+ debug: True
4
+ key: dify-sandbox
5
+ max_workers: 4
6
+ max_requests: 50
7
+ worker_timeout: 5
8
+ python_path: /usr/local/bin/python3
9
+ python_lib_path:
10
+ - /usr/local/lib/python3.10
11
+ - /usr/lib/python3.10
12
+ - /usr/lib/python3
13
+ - /usr/lib/x86_64-linux-gnu
14
+ - /etc/ssl/certs/ca-certificates.crt
15
+ - /etc/nsswitch.conf
16
+ - /etc/hosts
17
+ - /etc/resolv.conf
18
+ - /run/systemd/resolve/stub-resolv.conf
19
+ - /run/resolvconf/resolv.conf
20
+ - /etc/localtime
21
+ - /usr/share/zoneinfo
22
+ - /etc/timezone
23
+ # add more paths if needed
24
+ python_pip_mirror_url: https://pypi.tuna.tsinghua.edu.cn/simple
25
+ nodejs_path: /usr/local/bin/node
26
+ enable_network: True
27
+ allowed_syscalls:
28
+ - 1
29
+ - 2
30
+ - 3
31
+ # add all the syscalls which you require
32
+ proxy:
33
+ socks5: ''
34
+ http: ''
35
+ https: ''
docker/volumes/sandbox/dependencies/python-requirements.txt ADDED
File without changes