Commit
·
28e0efa
1
Parent(s):
af619ea
Refactor UI text (#3911)
Browse files### What problem does this PR solve?
Refactor UI text
### Type of change
- [x] Documentation Update
- [x] Refactoring
Signed-off-by: jinhai <[email protected]>
- agent/component/generate.py +1 -1
- api/apps/dialog_app.py +1 -1
- api/db/services/dialog_service.py +2 -2
- docs/guides/deploy_local_llm.mdx +4 -4
- docs/guides/llm_api_key_setup.md +4 -4
- docs/guides/start_chat.md +2 -2
- docs/quickstart.mdx +1 -1
- web/src/locales/en.ts +16 -16
- web/src/locales/es.ts +1 -1
- web/src/locales/zh-traditional.ts +1 -1
- web/src/locales/zh.ts +4 -4
- web/src/pages/chat/chat-configuration-modal/assistant-setting.tsx +1 -1
agent/component/generate.py
CHANGED
@@ -96,7 +96,7 @@ class Generate(ComponentBase):
|
|
96 |
}
|
97 |
|
98 |
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
99 |
-
answer += " Please set LLM API-Key in 'User Setting -> Model
|
100 |
res = {"content": answer, "reference": reference}
|
101 |
|
102 |
return res
|
|
|
96 |
}
|
97 |
|
98 |
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
99 |
+
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
100 |
res = {"content": answer, "reference": reference}
|
101 |
|
102 |
return res
|
api/apps/dialog_app.py
CHANGED
@@ -32,7 +32,7 @@ def set_dialog():
|
|
32 |
req = request.json
|
33 |
dialog_id = req.get("dialog_id")
|
34 |
name = req.get("name", "New Dialog")
|
35 |
-
description = req.get("description", "A helpful
|
36 |
icon = req.get("icon", "")
|
37 |
top_n = req.get("top_n", 6)
|
38 |
top_k = req.get("top_k", 1024)
|
|
|
32 |
req = request.json
|
33 |
dialog_id = req.get("dialog_id")
|
34 |
name = req.get("name", "New Dialog")
|
35 |
+
description = req.get("description", "A helpful dialog")
|
36 |
icon = req.get("icon", "")
|
37 |
top_n = req.get("top_n", 6)
|
38 |
top_k = req.get("top_k", 1024)
|
api/db/services/dialog_service.py
CHANGED
@@ -266,7 +266,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|
266 |
del c["vector"]
|
267 |
|
268 |
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
269 |
-
answer += " Please set LLM API-Key in 'User Setting -> Model
|
270 |
done_tm = timer()
|
271 |
prompt += "\n\n### Elapsed\n - Refine Question: %.1f ms\n - Keywords: %.1f ms\n - Retrieval: %.1f ms\n - LLM: %.1f ms" % (
|
272 |
(refineQ_tm - st) * 1000, (keyword_tm - refineQ_tm) * 1000, (retrieval_tm - keyword_tm) * 1000,
|
@@ -649,7 +649,7 @@ def ask(question, kb_ids, tenant_id):
|
|
649 |
del c["vector"]
|
650 |
|
651 |
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
652 |
-
answer += " Please set LLM API-Key in 'User Setting -> Model
|
653 |
return {"answer": answer, "reference": refs}
|
654 |
|
655 |
answer = ""
|
|
|
266 |
del c["vector"]
|
267 |
|
268 |
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
269 |
+
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
270 |
done_tm = timer()
|
271 |
prompt += "\n\n### Elapsed\n - Refine Question: %.1f ms\n - Keywords: %.1f ms\n - Retrieval: %.1f ms\n - LLM: %.1f ms" % (
|
272 |
(refineQ_tm - st) * 1000, (keyword_tm - refineQ_tm) * 1000, (retrieval_tm - keyword_tm) * 1000,
|
|
|
649 |
del c["vector"]
|
650 |
|
651 |
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
652 |
+
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
653 |
return {"answer": answer, "reference": refs}
|
654 |
|
655 |
answer = ""
|
docs/guides/deploy_local_llm.mdx
CHANGED
@@ -78,7 +78,7 @@ Ollama is running
|
|
78 |
|
79 |
### 4. Add Ollama
|
80 |
|
81 |
-
In RAGFlow, click on your logo on the top right of the page **>** **Model
|
82 |
|
83 |

|
84 |
|
@@ -101,7 +101,7 @@ Max retries exceeded with url: /api/chat (Caused by NewConnectionError('<urllib3
|
|
101 |
|
102 |
### 6. Update System Model Settings
|
103 |
|
104 |
-
Click on your logo **>** **Model
|
105 |
|
106 |
*You should now be able to find **llama3.2** from the dropdown list under **Chat model**, and **bge-m3** from the dropdown list under **Embedding model**.*
|
107 |
|
@@ -143,7 +143,7 @@ $ xinference launch -u mistral --model-name mistral-v0.1 --size-in-billions 7 --
|
|
143 |
```
|
144 |
### 4. Add Xinference
|
145 |
|
146 |
-
In RAGFlow, click on your logo on the top right of the page **>** **Model
|
147 |
|
148 |

|
149 |
|
@@ -154,7 +154,7 @@ Enter an accessible base URL, such as `http://<your-xinference-endpoint-domain>:
|
|
154 |
|
155 |
### 6. Update System Model Settings
|
156 |
|
157 |
-
Click on your logo **>** **Model
|
158 |
|
159 |
*You should now be able to find **mistral** from the dropdown list under **Chat model**.*
|
160 |
|
|
|
78 |
|
79 |
### 4. Add Ollama
|
80 |
|
81 |
+
In RAGFlow, click on your logo on the top right of the page **>** **Model providers** and add Ollama to RAGFlow:
|
82 |
|
83 |

|
84 |
|
|
|
101 |
|
102 |
### 6. Update System Model Settings
|
103 |
|
104 |
+
Click on your logo **>** **Model providers** **>** **System Model Settings** to update your model:
|
105 |
|
106 |
*You should now be able to find **llama3.2** from the dropdown list under **Chat model**, and **bge-m3** from the dropdown list under **Embedding model**.*
|
107 |
|
|
|
143 |
```
|
144 |
### 4. Add Xinference
|
145 |
|
146 |
+
In RAGFlow, click on your logo on the top right of the page **>** **Model providers** and add Xinference to RAGFlow:
|
147 |
|
148 |

|
149 |
|
|
|
154 |
|
155 |
### 6. Update System Model Settings
|
156 |
|
157 |
+
Click on your logo **>** **Model providers** **>** **System Model Settings** to update your model.
|
158 |
|
159 |
*You should now be able to find **mistral** from the dropdown list under **Chat model**.*
|
160 |
|
docs/guides/llm_api_key_setup.md
CHANGED
@@ -20,7 +20,7 @@ If you find your online LLM is not on the list, don't feel disheartened. The lis
|
|
20 |
You have two options for configuring your model API key:
|
21 |
|
22 |
- Configure it in **service_conf.yaml.template** before starting RAGFlow.
|
23 |
-
- Configure it on the **Model
|
24 |
|
25 |
### Configure model API key before starting up RAGFlow
|
26 |
|
@@ -32,7 +32,7 @@ You have two options for configuring your model API key:
|
|
32 |
3. Reboot your system for your changes to take effect.
|
33 |
4. Log into RAGFlow.
|
34 |
|
35 |
-
*After logging into RAGFlow, you will find your chosen model appears under **Added models** on the **Model
|
36 |
|
37 |
### Configure model API key after logging into RAGFlow
|
38 |
|
@@ -40,9 +40,9 @@ You have two options for configuring your model API key:
|
|
40 |
After logging into RAGFlow, configuring your model API key through the **service_conf.yaml.template** file will no longer take effect.
|
41 |
:::
|
42 |
|
43 |
-
After logging into RAGFlow, you can *only* configure API Key on the **Model
|
44 |
|
45 |
-
1. Click on your logo on the top right of the page **>** **Model
|
46 |
2. Find your model card under **Models to be added** and click **Add the model**:
|
47 |

|
48 |
3. Paste your model API key.
|
|
|
20 |
You have two options for configuring your model API key:
|
21 |
|
22 |
- Configure it in **service_conf.yaml.template** before starting RAGFlow.
|
23 |
+
- Configure it on the **Model providers** page after logging into RAGFlow.
|
24 |
|
25 |
### Configure model API key before starting up RAGFlow
|
26 |
|
|
|
32 |
3. Reboot your system for your changes to take effect.
|
33 |
4. Log into RAGFlow.
|
34 |
|
35 |
+
*After logging into RAGFlow, you will find your chosen model appears under **Added models** on the **Model providers** page.*
|
36 |
|
37 |
### Configure model API key after logging into RAGFlow
|
38 |
|
|
|
40 |
After logging into RAGFlow, configuring your model API key through the **service_conf.yaml.template** file will no longer take effect.
|
41 |
:::
|
42 |
|
43 |
+
After logging into RAGFlow, you can *only* configure API Key on the **Model providers** page:
|
44 |
|
45 |
+
1. Click on your logo on the top right of the page **>** **Model providers**.
|
46 |
2. Find your model card under **Models to be added** and click **Add the model**:
|
47 |

|
48 |
3. Paste your model API key.
|
docs/guides/start_chat.md
CHANGED
@@ -21,7 +21,7 @@ You start an AI conversation by creating an assistant.
|
|
21 |
- **Empty response**:
|
22 |
- If you wish to *confine* RAGFlow's answers to your knowledge bases, leave a response here. Then, when it doesn't retrieve an answer, it *uniformly* responds with what you set here.
|
23 |
- If you wish RAGFlow to *improvise* when it doesn't retrieve an answer from your knowledge bases, leave it blank, which may give rise to hallucinations.
|
24 |
-
- **Show
|
25 |
- Select the corresponding knowledge bases. You can select one or multiple knowledge bases, but ensure that they use the same embedding model, otherwise an error would occur.
|
26 |
|
27 |
3. Update **Prompt Engine**:
|
@@ -35,7 +35,7 @@ You start an AI conversation by creating an assistant.
|
|
35 |
4. Update **Model Setting**:
|
36 |
|
37 |
- In **Model**: you select the chat model. Though you have selected the default chat model in **System Model Settings**, RAGFlow allows you to choose an alternative chat model for your dialogue.
|
38 |
-
- **Freedom** refers to the level that the LLM improvises. From **Improvise**, **Precise**, to **Balance**, each freedom level corresponds to a unique combination of **Temperature**, **Top P**, **Presence
|
39 |
- **Temperature**: Level of the prediction randomness of the LLM. The higher the value, the more creative the LLM is.
|
40 |
- **Top P** is also known as "nucleus sampling". See [here](https://en.wikipedia.org/wiki/Top-p_sampling) for more information.
|
41 |
- **Max Tokens**: The maximum length of the LLM's responses. Note that the responses may be curtailed if this value is set too low.
|
|
|
21 |
- **Empty response**:
|
22 |
- If you wish to *confine* RAGFlow's answers to your knowledge bases, leave a response here. Then, when it doesn't retrieve an answer, it *uniformly* responds with what you set here.
|
23 |
- If you wish RAGFlow to *improvise* when it doesn't retrieve an answer from your knowledge bases, leave it blank, which may give rise to hallucinations.
|
24 |
+
- **Show quote**: This is a key feature of RAGFlow and enabled by default. RAGFlow does not work like a black box. instead, it clearly shows the sources of information that its responses are based on.
|
25 |
- Select the corresponding knowledge bases. You can select one or multiple knowledge bases, but ensure that they use the same embedding model, otherwise an error would occur.
|
26 |
|
27 |
3. Update **Prompt Engine**:
|
|
|
35 |
4. Update **Model Setting**:
|
36 |
|
37 |
- In **Model**: you select the chat model. Though you have selected the default chat model in **System Model Settings**, RAGFlow allows you to choose an alternative chat model for your dialogue.
|
38 |
+
- **Freedom** refers to the level that the LLM improvises. From **Improvise**, **Precise**, to **Balance**, each freedom level corresponds to a unique combination of **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**.
|
39 |
- **Temperature**: Level of the prediction randomness of the LLM. The higher the value, the more creative the LLM is.
|
40 |
- **Top P** is also known as "nucleus sampling". See [here](https://en.wikipedia.org/wiki/Top-p_sampling) for more information.
|
41 |
- **Max Tokens**: The maximum length of the LLM's responses. Note that the responses may be curtailed if this value is set too low.
|
docs/quickstart.mdx
CHANGED
@@ -235,7 +235,7 @@ RAGFlow also supports deploying LLMs locally using Ollama, Xinference, or LocalA
|
|
235 |
|
236 |
To add and configure an LLM:
|
237 |
|
238 |
-
1. Click on your logo on the top right of the page **>** **Model
|
239 |
|
240 |

|
241 |
|
|
|
235 |
|
236 |
To add and configure an LLM:
|
237 |
|
238 |
+
1. Click on your logo on the top right of the page **>** **Model providers**:
|
239 |
|
240 |

|
241 |
|
web/src/locales/en.ts
CHANGED
@@ -99,9 +99,9 @@ export default {
|
|
99 |
disabled: 'Disable',
|
100 |
action: 'Action',
|
101 |
parsingStatus: 'Parsing Status',
|
102 |
-
processBeginAt: '
|
103 |
-
processDuration: '
|
104 |
-
progressMsg: 'Progress
|
105 |
testingDescription:
|
106 |
'Conduct a retrieval test to check if RAGFlow can recover the intended content for the LLM.',
|
107 |
similarityThreshold: 'Similarity threshold',
|
@@ -151,7 +151,7 @@ export default {
|
|
151 |
chunk: 'Chunk',
|
152 |
bulk: 'Bulk',
|
153 |
cancel: 'Cancel',
|
154 |
-
rerankModel: 'Rerank
|
155 |
rerankPlaceholder: 'Please select',
|
156 |
rerankTip: `If left empty, RAGFlow will use a combination of weighted keyword similarity and weighted vector cosine similarity; if a rerank model is selected, a weighted reranking score will replace the weighted vector cosine similarity.`,
|
157 |
topK: 'Top-K',
|
@@ -337,7 +337,7 @@ When you want to search the given knowledge base at first place, set a higher pa
|
|
337 |
chat: 'Chat',
|
338 |
newChat: 'New chat',
|
339 |
send: 'Send',
|
340 |
-
sendPlaceholder: 'Message the
|
341 |
chatConfiguration: 'Chat Configuration',
|
342 |
chatConfigurationDescription:
|
343 |
' Here, dress up a dedicated assistant for your special knowledge bases! 💕',
|
@@ -351,7 +351,7 @@ When you want to search the given knowledge base at first place, set a higher pa
|
|
351 |
setAnOpener: 'Set an opener',
|
352 |
setAnOpenerInitial: `Hi! I'm your assistant, what can I do for you?`,
|
353 |
setAnOpenerTip: 'How do you want to welcome your clients?',
|
354 |
-
knowledgeBases: '
|
355 |
knowledgeBasesMessage: 'Please select',
|
356 |
knowledgeBasesTip: 'Select knowledgebases associated.',
|
357 |
system: 'System',
|
@@ -389,21 +389,21 @@ When you want to search the given knowledge base at first place, set a higher pa
|
|
389 |
topPMessage: 'Top P is required',
|
390 |
topPTip:
|
391 |
'Also known as “nucleus sampling,” this parameter sets a threshold to select a smaller set of words to sample from. It focuses on the most likely words, cutting off the less probable ones.',
|
392 |
-
presencePenalty: 'Presence
|
393 |
-
presencePenaltyMessage: 'Presence
|
394 |
presencePenaltyTip:
|
395 |
'This discourages the model from repeating the same information by penalizing words that have already appeared in the conversation.',
|
396 |
-
frequencyPenalty: 'Frequency
|
397 |
-
frequencyPenaltyMessage: 'Frequency
|
398 |
frequencyPenaltyTip:
|
399 |
'Similar to the presence penalty, this reduces the model’s tendency to repeat the same words frequently.',
|
400 |
-
maxTokens: 'Max
|
401 |
-
maxTokensMessage: 'Max
|
402 |
maxTokensTip:
|
403 |
'This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).',
|
404 |
maxTokensInvalidMessage: 'Please enter a valid number for Max Tokens.',
|
405 |
maxTokensMinMessage: 'Max Tokens cannot be less than 0.',
|
406 |
-
quote: 'Show
|
407 |
quoteTip: 'Should the source of the original text be displayed?',
|
408 |
selfRag: 'Self-RAG',
|
409 |
selfRagTip: 'Please refer to: https://huggingface.co/papers/2310.11511',
|
@@ -461,7 +461,7 @@ When you want to search the given knowledge base at first place, set a higher pa
|
|
461 |
password: 'Password',
|
462 |
passwordDescription:
|
463 |
'Please enter your current password to change your password.',
|
464 |
-
model: 'Model
|
465 |
modelDescription: 'Set the model parameter and API KEY here.',
|
466 |
team: 'Team',
|
467 |
system: 'System',
|
@@ -476,7 +476,7 @@ When you want to search the given knowledge base at first place, set a higher pa
|
|
476 |
colorSchemaPlaceholder: 'select your color schema',
|
477 |
bright: 'Bright',
|
478 |
dark: 'Dark',
|
479 |
-
timezone: '
|
480 |
timezoneMessage: 'Please input your timezone!',
|
481 |
timezonePlaceholder: 'select your timezone',
|
482 |
email: 'Email address',
|
@@ -518,7 +518,7 @@ When you want to search the given knowledge base at first place, set a higher pa
|
|
518 |
sequence2txtModel: 'Sequence2txt model',
|
519 |
sequence2txtModelTip:
|
520 |
'The default ASR model all the newly created knowledgebase will use. Use this model to translate voices to corresponding text.',
|
521 |
-
rerankModel: 'Rerank
|
522 |
rerankModelTip: `The default rerank model is used to rerank chunks retrieved by users' questions.`,
|
523 |
ttsModel: 'TTS Model',
|
524 |
ttsModelTip:
|
|
|
99 |
disabled: 'Disable',
|
100 |
action: 'Action',
|
101 |
parsingStatus: 'Parsing Status',
|
102 |
+
processBeginAt: 'Begin at',
|
103 |
+
processDuration: 'Duration',
|
104 |
+
progressMsg: 'Progress',
|
105 |
testingDescription:
|
106 |
'Conduct a retrieval test to check if RAGFlow can recover the intended content for the LLM.',
|
107 |
similarityThreshold: 'Similarity threshold',
|
|
|
151 |
chunk: 'Chunk',
|
152 |
bulk: 'Bulk',
|
153 |
cancel: 'Cancel',
|
154 |
+
rerankModel: 'Rerank model',
|
155 |
rerankPlaceholder: 'Please select',
|
156 |
rerankTip: `If left empty, RAGFlow will use a combination of weighted keyword similarity and weighted vector cosine similarity; if a rerank model is selected, a weighted reranking score will replace the weighted vector cosine similarity.`,
|
157 |
topK: 'Top-K',
|
|
|
337 |
chat: 'Chat',
|
338 |
newChat: 'New chat',
|
339 |
send: 'Send',
|
340 |
+
sendPlaceholder: 'Message the assistant...',
|
341 |
chatConfiguration: 'Chat Configuration',
|
342 |
chatConfigurationDescription:
|
343 |
' Here, dress up a dedicated assistant for your special knowledge bases! 💕',
|
|
|
351 |
setAnOpener: 'Set an opener',
|
352 |
setAnOpenerInitial: `Hi! I'm your assistant, what can I do for you?`,
|
353 |
setAnOpenerTip: 'How do you want to welcome your clients?',
|
354 |
+
knowledgeBases: 'Knowledge bases',
|
355 |
knowledgeBasesMessage: 'Please select',
|
356 |
knowledgeBasesTip: 'Select knowledgebases associated.',
|
357 |
system: 'System',
|
|
|
389 |
topPMessage: 'Top P is required',
|
390 |
topPTip:
|
391 |
'Also known as “nucleus sampling,” this parameter sets a threshold to select a smaller set of words to sample from. It focuses on the most likely words, cutting off the less probable ones.',
|
392 |
+
presencePenalty: 'Presence penalty',
|
393 |
+
presencePenaltyMessage: 'Presence penalty is required',
|
394 |
presencePenaltyTip:
|
395 |
'This discourages the model from repeating the same information by penalizing words that have already appeared in the conversation.',
|
396 |
+
frequencyPenalty: 'Frequency penalty',
|
397 |
+
frequencyPenaltyMessage: 'Frequency penalty is required',
|
398 |
frequencyPenaltyTip:
|
399 |
'Similar to the presence penalty, this reduces the model’s tendency to repeat the same words frequently.',
|
400 |
+
maxTokens: 'Max tokens',
|
401 |
+
maxTokensMessage: 'Max tokens is required',
|
402 |
maxTokensTip:
|
403 |
'This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).',
|
404 |
maxTokensInvalidMessage: 'Please enter a valid number for Max Tokens.',
|
405 |
maxTokensMinMessage: 'Max Tokens cannot be less than 0.',
|
406 |
+
quote: 'Show quote',
|
407 |
quoteTip: 'Should the source of the original text be displayed?',
|
408 |
selfRag: 'Self-RAG',
|
409 |
selfRagTip: 'Please refer to: https://huggingface.co/papers/2310.11511',
|
|
|
461 |
password: 'Password',
|
462 |
passwordDescription:
|
463 |
'Please enter your current password to change your password.',
|
464 |
+
model: 'Model providers',
|
465 |
modelDescription: 'Set the model parameter and API KEY here.',
|
466 |
team: 'Team',
|
467 |
system: 'System',
|
|
|
476 |
colorSchemaPlaceholder: 'select your color schema',
|
477 |
bright: 'Bright',
|
478 |
dark: 'Dark',
|
479 |
+
timezone: 'Time zone',
|
480 |
timezoneMessage: 'Please input your timezone!',
|
481 |
timezonePlaceholder: 'select your timezone',
|
482 |
email: 'Email address',
|
|
|
518 |
sequence2txtModel: 'Sequence2txt model',
|
519 |
sequence2txtModelTip:
|
520 |
'The default ASR model all the newly created knowledgebase will use. Use this model to translate voices to corresponding text.',
|
521 |
+
rerankModel: 'Rerank model',
|
522 |
rerankModelTip: `The default rerank model is used to rerank chunks retrieved by users' questions.`,
|
523 |
ttsModel: 'TTS Model',
|
524 |
ttsModelTip:
|
web/src/locales/es.ts
CHANGED
@@ -98,7 +98,7 @@ export default {
|
|
98 |
processDuration: 'Duración del proceso',
|
99 |
progressMsg: 'Mensaje de progreso',
|
100 |
testingDescription:
|
101 |
-
'¡Último paso! Después del éxito, deja el resto al AI de
|
102 |
similarityThreshold: 'Umbral de similitud',
|
103 |
similarityThresholdTip:
|
104 |
'Usamos una puntuación de similitud híbrida para evaluar la distancia entre dos líneas de texto. Se pondera la similitud de palabras clave y la similitud coseno de vectores. Si la similitud entre la consulta y el fragmento es menor que este umbral, el fragmento será filtrado.',
|
|
|
98 |
processDuration: 'Duración del proceso',
|
99 |
progressMsg: 'Mensaje de progreso',
|
100 |
testingDescription:
|
101 |
+
'¡Último paso! Después del éxito, deja el resto al AI de RAGFlow.',
|
102 |
similarityThreshold: 'Umbral de similitud',
|
103 |
similarityThresholdTip:
|
104 |
'Usamos una puntuación de similitud híbrida para evaluar la distancia entre dos líneas de texto. Se pondera la similitud de palabras clave y la similitud coseno de vectores. Si la similitud entre la consulta y el fragmento es menor que este umbral, el fragmento será filtrado.',
|
web/src/locales/zh-traditional.ts
CHANGED
@@ -101,7 +101,7 @@ export default {
|
|
101 |
processBeginAt: '流程開始於',
|
102 |
processDuration: '過程持續時間',
|
103 |
progressMsg: '進度消息',
|
104 |
-
testingDescription: '最後一步!成功後,剩下的就交給
|
105 |
similarityThreshold: '相似度閾值',
|
106 |
similarityThresholdTip:
|
107 |
'我們使用混合相似度得分來評估兩行文本之間的距離。它是加權關鍵詞相似度和向量餘弦相似度。如果查詢和塊之間的相似度小於此閾值,則該塊將被過濾掉。',
|
|
|
101 |
processBeginAt: '流程開始於',
|
102 |
processDuration: '過程持續時間',
|
103 |
progressMsg: '進度消息',
|
104 |
+
testingDescription: '最後一步!成功後,剩下的就交給 RAGFlow 吧。',
|
105 |
similarityThreshold: '相似度閾值',
|
106 |
similarityThresholdTip:
|
107 |
'我們使用混合相似度得分來評估兩行文本之間的距離。它是加權關鍵詞相似度和向量餘弦相似度。如果查詢和塊之間的相似度小於此閾值,則該塊將被過濾掉。',
|
web/src/locales/zh.ts
CHANGED
@@ -98,10 +98,10 @@ export default {
|
|
98 |
disabled: '禁用',
|
99 |
action: '动作',
|
100 |
parsingStatus: '解析状态',
|
101 |
-
processBeginAt: '
|
102 |
-
processDuration: '
|
103 |
-
progressMsg: '
|
104 |
-
testingDescription: '最后一步! 成功后,剩下的就交给
|
105 |
similarityThreshold: '相似度阈值',
|
106 |
similarityThresholdTip:
|
107 |
'我们使用混合相似度得分来评估两行文本之间的距离。 它是加权关键词相似度和向量余弦相似度。 如果查询和块之间的相似度小于此阈值,则该块将被过滤掉。',
|
|
|
98 |
disabled: '禁用',
|
99 |
action: '动作',
|
100 |
parsingStatus: '解析状态',
|
101 |
+
processBeginAt: '开始于',
|
102 |
+
processDuration: '持续时间',
|
103 |
+
progressMsg: '进度',
|
104 |
+
testingDescription: '最后一步! 成功后,剩下的就交给 RAGFlow 吧。',
|
105 |
similarityThreshold: '相似度阈值',
|
106 |
similarityThresholdTip:
|
107 |
'我们使用混合相似度得分来评估两行文本之间的距离。 它是加权关键词相似度和向量余弦相似度。 如果查询和块之间的相似度小于此阈值,则该块将被过滤掉。',
|
web/src/pages/chat/chat-configuration-modal/assistant-setting.tsx
CHANGED
@@ -24,7 +24,7 @@ const AssistantSetting = ({ show, form }: ISegmentedContentProps) => {
|
|
24 |
(checked: boolean) => {
|
25 |
if (checked && !data.tts_id) {
|
26 |
message.error(`Please set TTS model firstly.
|
27 |
-
Setting >> Model
|
28 |
form.setFieldValue(['prompt_config', 'tts'], false);
|
29 |
}
|
30 |
},
|
|
|
24 |
(checked: boolean) => {
|
25 |
if (checked && !data.tts_id) {
|
26 |
message.error(`Please set TTS model firstly.
|
27 |
+
Setting >> Model providers >> System model settings`);
|
28 |
form.setFieldValue(['prompt_config', 'tts'], false);
|
29 |
}
|
30 |
},
|