M96820
commited on
setup: add .env.example
Browse files- ai-comic-factory/.env.example +114 -0
ai-comic-factory/.env.example
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Supported values:
|
2 |
+
# - VIDEOCHAIN
|
3 |
+
# - REPLICATE
|
4 |
+
# - INFERENCE_ENDPOINT
|
5 |
+
# - INFERENCE_API
|
6 |
+
# - OPENAI
|
7 |
+
RENDERING_ENGINE="INFERENCE_ENDPOINT"
|
8 |
+
|
9 |
+
# Supported values:
|
10 |
+
# - INFERENCE_ENDPOINT
|
11 |
+
# - INFERENCE_API
|
12 |
+
# - OPENAI
|
13 |
+
# - GROQ
|
14 |
+
# - ANTHROPIC
|
15 |
+
LLM_ENGINE="INFERENCE_API"
|
16 |
+
|
17 |
+
# set this to control the number of pages
|
18 |
+
MAX_NB_PAGES=6
|
19 |
+
|
20 |
+
# Set to "true" to create artificial delays and smooth out traffic
|
21 |
+
NEXT_PUBLIC_ENABLE_RATE_LIMITER="false"
|
22 |
+
|
23 |
+
# ------------- HUGGING FACE OAUTH -------------
|
24 |
+
ENABLE_HUGGING_FACE_OAUTH=
|
25 |
+
ENABLE_HUGGING_FACE_OAUTH_WALL=
|
26 |
+
HUGGING_FACE_OAUTH_CLIENT_ID=
|
27 |
+
|
28 |
+
# in production this should be the space's domain and/or URL
|
29 |
+
HUGGING_FACE_OAUTH_REDIRECT_URL=
|
30 |
+
|
31 |
+
# this one must be kept secret (and is unused for now)
|
32 |
+
HUGGING_FACE_OAUTH_SECRET=
|
33 |
+
|
34 |
+
# ------------- PROVIDER AUTH ------------
|
35 |
+
# You only need to configure the access token(s) for the provider(s) you want to use
|
36 |
+
|
37 |
+
# HuggingFace.co token: available for the LLM engine and the RENDERING engine
|
38 |
+
AUTH_HF_API_TOKEN=
|
39 |
+
|
40 |
+
# Replicate.com token: available for the RENDERING engine
|
41 |
+
AUTH_REPLICATE_API_TOKEN=
|
42 |
+
|
43 |
+
# OpenAI.dom token: available for the LLM engine and the RENDERING engine
|
44 |
+
AUTH_OPENAI_API_KEY=
|
45 |
+
|
46 |
+
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
|
47 |
+
AUTH_VIDEOCHAIN_API_TOKEN=
|
48 |
+
|
49 |
+
# Groq.com key: available for the LLM engine
|
50 |
+
AUTH_GROQ_API_KEY=
|
51 |
+
|
52 |
+
# Anthropic.com key: available for the LLM engine
|
53 |
+
AUTH_ANTHROPIC_API_KEY=
|
54 |
+
|
55 |
+
# ------------- RENDERING API CONFIG --------------
|
56 |
+
|
57 |
+
# If you decide to use Replicate for the RENDERING engine
|
58 |
+
RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
|
59 |
+
RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
|
60 |
+
|
61 |
+
# If you decide to use a private Hugging Face Inference Endpoint for the RENDERING engine
|
62 |
+
RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud"
|
63 |
+
|
64 |
+
# If you decide to use a Hugging Face Inference API model for the RENDERING engine
|
65 |
+
RENDERING_HF_INFERENCE_API_BASE_MODEL="stabilityai/stable-diffusion-xl-base-1.0"
|
66 |
+
|
67 |
+
# If you decide to use a Hugging Face Inference API model for the RENDERING engine
|
68 |
+
RENDERING_HF_INFERENCE_API_REFINER_MODEL="stabilityai/stable-diffusion-xl-refiner-1.0"
|
69 |
+
|
70 |
+
# If your model returns a different file type (eg. jpg or webp) change it here
|
71 |
+
RENDERING_HF_INFERENCE_API_FILE_TYPE="image/png"
|
72 |
+
|
73 |
+
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
|
74 |
+
RENDERING_VIDEOCHAIN_API_URL="http://localhost:7860"
|
75 |
+
|
76 |
+
RENDERING_OPENAI_API_BASE_URL="https://api.openai.com/v1"
|
77 |
+
RENDERING_OPENAI_API_MODEL="dall-e-3"
|
78 |
+
|
79 |
+
# ------------- LLM API CONFIG ----------------
|
80 |
+
|
81 |
+
LLM_GROQ_API_MODEL="mixtral-8x7b-32768"
|
82 |
+
|
83 |
+
# If you decide to use OpenAI for the LLM engine
|
84 |
+
LLM_OPENAI_API_BASE_URL="https://api.openai.com/v1"
|
85 |
+
LLM_OPENAI_API_MODEL="gpt-4-turbo"
|
86 |
+
|
87 |
+
# If you decide to use Anthropic (eg. Claude) for the LLM engine
|
88 |
+
# https://docs.anthropic.com/claude/docs/models-overview
|
89 |
+
LLM_ANTHROPIC_API_MODEL="claude-3-opus-20240229"
|
90 |
+
|
91 |
+
# If you decide to use a private Hugging Face Inference Endpoint for the LLM engine
|
92 |
+
LLM_HF_INFERENCE_ENDPOINT_URL=""
|
93 |
+
|
94 |
+
# If you decide to use a Hugging Face Inference API model for the LLM engine
|
95 |
+
# LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta"
|
96 |
+
LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta"
|
97 |
+
|
98 |
+
# ----------- COMMUNITY SHARING (OPTIONAL) -----------
|
99 |
+
# You don't need those community sharing options to run the AI Comic Factory
|
100 |
+
# locally or on your own server (they are meant to be used by the Hugging Face team)
|
101 |
+
NEXT_PUBLIC_ENABLE_COMMUNITY_SHARING="false"
|
102 |
+
COMMUNITY_API_URL=
|
103 |
+
COMMUNITY_API_TOKEN=
|
104 |
+
COMMUNITY_API_ID=
|
105 |
+
|
106 |
+
# ----------- CENSORSHIP (OPTIONAL) -----------
|
107 |
+
# censorship is currently disabled, but will be required when we create a "community roll"
|
108 |
+
# (a public repositoruy of user-generated comic strips)
|
109 |
+
ENABLE_CENSORSHIP="false"
|
110 |
+
|
111 |
+
# Due to the sensitive nature of some of keywords we want to ban (users try all kind of crazy illegal things)
|
112 |
+
# the words are are not put in clear in the source code, but behind an encryption key
|
113 |
+
# (I don't want the project to be flagged by an AI robot police on GitHub or something)
|
114 |
+
SECRET_FINGERPRINT=""
|