File size: 2,994 Bytes
fe5c39d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
llm:
  api_type: "openai"  # or azure / ollama / groq etc.
  base_url: "YOUR_BASE_URL"
  api_key: "YOUR_API_KEY"
  model: "gpt-4-turbo"  # or gpt-3.5-turbo
  proxy: "YOUR_PROXY"  # for LLM API requests
  # timeout: 600 # Optional. If set to 0, default value is 300.
  # Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
  pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's


# RAG Embedding.
# For backward compatibility, if the embedding is not set and the llm's api_type is either openai or azure, the llm's config will be used. 
embedding:
  api_type: "" # openai / azure / gemini / ollama etc. Check EmbeddingType for more options.
  base_url: ""
  api_key: ""
  model: ""
  api_version: ""
  embed_batch_size: 100
  dimensions: # output dimension of embedding model

repair_llm_output: true  # when the output is not a valid json, try to repair it

proxy: "YOUR_PROXY"  # for tools like requests, playwright, selenium, etc.

search:
  api_type: "google"
  api_key: "YOUR_API_KEY"
  cse_id: "YOUR_CSE_ID"

browser:
  engine: "playwright"  # playwright/selenium
  browser_type: "chromium"  # playwright: chromium/firefox/webkit; selenium: chrome/firefox/edge/ie

mermaid:
  engine: "pyppeteer"
  pyppeteer_path: "/Applications/Google Chrome.app"

redis:
  host: "YOUR_HOST"
  port: 32582
  password: "YOUR_PASSWORD"
  db: "0"

s3:
  access_key: "YOUR_ACCESS_KEY"
  secret_key: "YOUR_SECRET_KEY"
  endpoint: "YOUR_ENDPOINT"
  secure: false
  bucket: "test"


azure_tts_subscription_key: "YOUR_SUBSCRIPTION_KEY"
azure_tts_region: "eastus"

iflytek_api_id: "YOUR_APP_ID"
iflytek_api_key: "YOUR_API_KEY"
iflytek_api_secret: "YOUR_API_SECRET"

metagpt_tti_url: "YOUR_MODEL_URL"

omniparse:
    api_key: "YOUR_API_KEY"
    base_url: "YOUR_BASE_URL"

models:
#  "YOUR_MODEL_NAME_1 or YOUR_API_TYPE_1": # model: "gpt-4-turbo"  # or gpt-3.5-turbo
#    api_type: "openai"  # or azure / ollama / groq etc.
#    base_url: "YOUR_BASE_URL"
#    api_key: "YOUR_API_KEY"
#    proxy: "YOUR_PROXY"  # for LLM API requests
#    # timeout: 600 # Optional. If set to 0, default value is 300.
#    # Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
#    pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's
#  "YOUR_MODEL_NAME_2 or YOUR_API_TYPE_2": # api_type: "openai"  # or azure / ollama / groq etc.
#    api_type: "openai"  # or azure / ollama / groq etc.
#    base_url: "YOUR_BASE_URL"
#    api_key: "YOUR_API_KEY"
#    proxy: "YOUR_PROXY"  # for LLM API requests
#    # timeout: 600 # Optional. If set to 0, default value is 300.
#    # Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
#    pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's

agentops_api_key: "YOUR_AGENTOPS_API_KEY" # get key from https://app.agentops.ai/settings/projects