liuhua
liuhua
commited on
Commit
·
99ac12c
1
Parent(s):
fed0197
Add api for sessions and add max_tokens for tenant_llm (#3472)
Browse files### What problem does this PR solve?
Add api for sessions and add max_tokens for tenant_llm
### Type of change
- [x] New Feature (non-breaking change which adds functionality)
---------
Co-authored-by: liuhua <[email protected]>
- api/apps/llm_app.py +33 -28
- api/apps/sdk/session.py +84 -5
- api/db/db_models.py +24 -10
- rag/llm/embedding_model.py +1 -1
- web/src/interfaces/request/llm.ts +1 -0
- web/src/locales/en.ts +8 -0
- web/src/locales/es.ts +8 -0
- web/src/locales/id.ts +8 -0
- web/src/locales/zh-traditional.ts +8 -0
- web/src/locales/zh.ts +8 -0
- web/src/pages/user-setting/setting-model/Tencent-modal/index.tsx +2 -1
- web/src/pages/user-setting/setting-model/azure-openai-modal/index.tsx +27 -1
- web/src/pages/user-setting/setting-model/bedrock-modal/index.tsx +27 -1
- web/src/pages/user-setting/setting-model/fish-audio-modal/index.tsx +27 -1
- web/src/pages/user-setting/setting-model/google-modal/index.tsx +27 -1
- web/src/pages/user-setting/setting-model/hunyuan-modal/index.tsx +1 -1
- web/src/pages/user-setting/setting-model/index.tsx +1 -1
- web/src/pages/user-setting/setting-model/ollama-modal/index.tsx +27 -1
- web/src/pages/user-setting/setting-model/spark-modal/index.tsx +27 -1
- web/src/pages/user-setting/setting-model/volcengine-modal/index.tsx +26 -13
- web/src/pages/user-setting/setting-model/yiyan-modal/index.tsx +26 -1
api/apps/llm_app.py
CHANGED
@@ -74,9 +74,9 @@ def set_api_key():
|
|
74 |
mdl = ChatModel[factory](
|
75 |
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
76 |
try:
|
77 |
-
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
78 |
-
{"temperature": 0.9,'max_tokens':50})
|
79 |
-
if m.find("**ERROR**") >=0:
|
80 |
raise Exception(m)
|
81 |
chat_passed = True
|
82 |
except Exception as e:
|
@@ -110,6 +110,7 @@ def set_api_key():
|
|
110 |
llm_config[n] = req[n]
|
111 |
|
112 |
for llm in LLMService.query(fid=factory):
|
|
|
113 |
if not TenantLLMService.filter_update(
|
114 |
[TenantLLM.tenant_id == current_user.id,
|
115 |
TenantLLM.llm_factory == factory,
|
@@ -121,7 +122,8 @@ def set_api_key():
|
|
121 |
llm_name=llm.llm_name,
|
122 |
model_type=llm.model_type,
|
123 |
api_key=llm_config["api_key"],
|
124 |
-
api_base=llm_config["api_base"]
|
|
|
125 |
)
|
126 |
|
127 |
return get_json_result(data=True)
|
@@ -158,23 +160,23 @@ def add_llm():
|
|
158 |
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
159 |
|
160 |
elif factory == "LocalAI":
|
161 |
-
llm_name = req["llm_name"]+"___LocalAI"
|
162 |
api_key = "xxxxxxxxxxxxxxx"
|
163 |
-
|
164 |
elif factory == "HuggingFace":
|
165 |
-
llm_name = req["llm_name"]+"___HuggingFace"
|
166 |
api_key = "xxxxxxxxxxxxxxx"
|
167 |
|
168 |
elif factory == "OpenAI-API-Compatible":
|
169 |
-
llm_name = req["llm_name"]+"___OpenAI-API"
|
170 |
-
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
171 |
|
172 |
-
elif factory =="XunFei Spark":
|
173 |
llm_name = req["llm_name"]
|
174 |
if req["model_type"] == "chat":
|
175 |
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
176 |
elif req["model_type"] == "tts":
|
177 |
-
api_key = apikey_json(["spark_app_id", "spark_api_secret","spark_api_key"])
|
178 |
|
179 |
elif factory == "BaiduYiyan":
|
180 |
llm_name = req["llm_name"]
|
@@ -202,14 +204,15 @@ def add_llm():
|
|
202 |
"model_type": req["model_type"],
|
203 |
"llm_name": llm_name,
|
204 |
"api_base": req.get("api_base", ""),
|
205 |
-
"api_key": api_key
|
|
|
206 |
}
|
207 |
|
208 |
msg = ""
|
209 |
if llm["model_type"] == LLMType.EMBEDDING.value:
|
210 |
mdl = EmbeddingModel[factory](
|
211 |
key=llm['api_key'],
|
212 |
-
model_name=llm["llm_name"],
|
213 |
base_url=llm["api_base"])
|
214 |
try:
|
215 |
arr, tc = mdl.encode(["Test if the api key is available"])
|
@@ -225,7 +228,7 @@ def add_llm():
|
|
225 |
)
|
226 |
try:
|
227 |
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
228 |
-
|
229 |
if not tc:
|
230 |
raise Exception(m)
|
231 |
except Exception as e:
|
@@ -233,8 +236,8 @@ def add_llm():
|
|
233 |
e)
|
234 |
elif llm["model_type"] == LLMType.RERANK:
|
235 |
mdl = RerankModel[factory](
|
236 |
-
key=llm["api_key"],
|
237 |
-
model_name=llm["llm_name"],
|
238 |
base_url=llm["api_base"]
|
239 |
)
|
240 |
try:
|
@@ -246,8 +249,8 @@ def add_llm():
|
|
246 |
e)
|
247 |
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
248 |
mdl = CvModel[factory](
|
249 |
-
key=llm["api_key"],
|
250 |
-
model_name=llm["llm_name"],
|
251 |
base_url=llm["api_base"]
|
252 |
)
|
253 |
try:
|
@@ -282,7 +285,8 @@ def add_llm():
|
|
282 |
return get_data_error_result(message=msg)
|
283 |
|
284 |
if not TenantLLMService.filter_update(
|
285 |
-
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory,
|
|
|
286 |
TenantLLMService.save(**llm)
|
287 |
|
288 |
return get_json_result(data=True)
|
@@ -294,7 +298,8 @@ def add_llm():
|
|
294 |
def delete_llm():
|
295 |
req = request.json
|
296 |
TenantLLMService.filter_delete(
|
297 |
-
|
|
|
298 |
return get_json_result(data=True)
|
299 |
|
300 |
|
@@ -304,7 +309,7 @@ def delete_llm():
|
|
304 |
def delete_factory():
|
305 |
req = request.json
|
306 |
TenantLLMService.filter_delete(
|
307 |
-
|
308 |
return get_json_result(data=True)
|
309 |
|
310 |
|
@@ -332,8 +337,8 @@ def my_llms():
|
|
332 |
@manager.route('/list', methods=['GET'])
|
333 |
@login_required
|
334 |
def list_app():
|
335 |
-
self_deploied = ["Youdao","FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
336 |
-
weighted = ["Youdao","FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
|
337 |
model_type = request.args.get("model_type")
|
338 |
try:
|
339 |
objs = TenantLLMService.query(tenant_id=current_user.id)
|
@@ -344,15 +349,15 @@ def list_app():
|
|
344 |
for m in llms:
|
345 |
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied
|
346 |
|
347 |
-
llm_set = set([m["llm_name"]+"@"+m["fid"] for m in llms])
|
348 |
for o in objs:
|
349 |
-
if not o.api_key:continue
|
350 |
-
if o.llm_name+"@"+o.llm_factory in llm_set:continue
|
351 |
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
352 |
|
353 |
res = {}
|
354 |
for m in llms:
|
355 |
-
if model_type and m["model_type"].find(model_type)<0:
|
356 |
continue
|
357 |
if m["fid"] not in res:
|
358 |
res[m["fid"]] = []
|
@@ -360,4 +365,4 @@ def list_app():
|
|
360 |
|
361 |
return get_json_result(data=res)
|
362 |
except Exception as e:
|
363 |
-
return server_error_response(e)
|
|
|
74 |
mdl = ChatModel[factory](
|
75 |
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
76 |
try:
|
77 |
+
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
78 |
+
{"temperature": 0.9, 'max_tokens': 50})
|
79 |
+
if m.find("**ERROR**") >= 0:
|
80 |
raise Exception(m)
|
81 |
chat_passed = True
|
82 |
except Exception as e:
|
|
|
110 |
llm_config[n] = req[n]
|
111 |
|
112 |
for llm in LLMService.query(fid=factory):
|
113 |
+
llm_config["max_tokens"]=llm.max_tokens
|
114 |
if not TenantLLMService.filter_update(
|
115 |
[TenantLLM.tenant_id == current_user.id,
|
116 |
TenantLLM.llm_factory == factory,
|
|
|
122 |
llm_name=llm.llm_name,
|
123 |
model_type=llm.model_type,
|
124 |
api_key=llm_config["api_key"],
|
125 |
+
api_base=llm_config["api_base"],
|
126 |
+
max_tokens=llm_config["max_tokens"]
|
127 |
)
|
128 |
|
129 |
return get_json_result(data=True)
|
|
|
160 |
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
161 |
|
162 |
elif factory == "LocalAI":
|
163 |
+
llm_name = req["llm_name"] + "___LocalAI"
|
164 |
api_key = "xxxxxxxxxxxxxxx"
|
165 |
+
|
166 |
elif factory == "HuggingFace":
|
167 |
+
llm_name = req["llm_name"] + "___HuggingFace"
|
168 |
api_key = "xxxxxxxxxxxxxxx"
|
169 |
|
170 |
elif factory == "OpenAI-API-Compatible":
|
171 |
+
llm_name = req["llm_name"] + "___OpenAI-API"
|
172 |
+
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
173 |
|
174 |
+
elif factory == "XunFei Spark":
|
175 |
llm_name = req["llm_name"]
|
176 |
if req["model_type"] == "chat":
|
177 |
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
178 |
elif req["model_type"] == "tts":
|
179 |
+
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
180 |
|
181 |
elif factory == "BaiduYiyan":
|
182 |
llm_name = req["llm_name"]
|
|
|
204 |
"model_type": req["model_type"],
|
205 |
"llm_name": llm_name,
|
206 |
"api_base": req.get("api_base", ""),
|
207 |
+
"api_key": api_key,
|
208 |
+
"max_tokens": req.get("max_tokens")
|
209 |
}
|
210 |
|
211 |
msg = ""
|
212 |
if llm["model_type"] == LLMType.EMBEDDING.value:
|
213 |
mdl = EmbeddingModel[factory](
|
214 |
key=llm['api_key'],
|
215 |
+
model_name=llm["llm_name"],
|
216 |
base_url=llm["api_base"])
|
217 |
try:
|
218 |
arr, tc = mdl.encode(["Test if the api key is available"])
|
|
|
228 |
)
|
229 |
try:
|
230 |
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
231 |
+
"temperature": 0.9})
|
232 |
if not tc:
|
233 |
raise Exception(m)
|
234 |
except Exception as e:
|
|
|
236 |
e)
|
237 |
elif llm["model_type"] == LLMType.RERANK:
|
238 |
mdl = RerankModel[factory](
|
239 |
+
key=llm["api_key"],
|
240 |
+
model_name=llm["llm_name"],
|
241 |
base_url=llm["api_base"]
|
242 |
)
|
243 |
try:
|
|
|
249 |
e)
|
250 |
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
251 |
mdl = CvModel[factory](
|
252 |
+
key=llm["api_key"],
|
253 |
+
model_name=llm["llm_name"],
|
254 |
base_url=llm["api_base"]
|
255 |
)
|
256 |
try:
|
|
|
285 |
return get_data_error_result(message=msg)
|
286 |
|
287 |
if not TenantLLMService.filter_update(
|
288 |
+
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory,
|
289 |
+
TenantLLM.llm_name == llm["llm_name"]], llm):
|
290 |
TenantLLMService.save(**llm)
|
291 |
|
292 |
return get_json_result(data=True)
|
|
|
298 |
def delete_llm():
|
299 |
req = request.json
|
300 |
TenantLLMService.filter_delete(
|
301 |
+
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"],
|
302 |
+
TenantLLM.llm_name == req["llm_name"]])
|
303 |
return get_json_result(data=True)
|
304 |
|
305 |
|
|
|
309 |
def delete_factory():
|
310 |
req = request.json
|
311 |
TenantLLMService.filter_delete(
|
312 |
+
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
|
313 |
return get_json_result(data=True)
|
314 |
|
315 |
|
|
|
337 |
@manager.route('/list', methods=['GET'])
|
338 |
@login_required
|
339 |
def list_app():
|
340 |
+
self_deploied = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
341 |
+
weighted = ["Youdao", "FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
|
342 |
model_type = request.args.get("model_type")
|
343 |
try:
|
344 |
objs = TenantLLMService.query(tenant_id=current_user.id)
|
|
|
349 |
for m in llms:
|
350 |
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied
|
351 |
|
352 |
+
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
353 |
for o in objs:
|
354 |
+
if not o.api_key: continue
|
355 |
+
if o.llm_name + "@" + o.llm_factory in llm_set: continue
|
356 |
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
357 |
|
358 |
res = {}
|
359 |
for m in llms:
|
360 |
+
if model_type and m["model_type"].find(model_type) < 0:
|
361 |
continue
|
362 |
if m["fid"] not in res:
|
363 |
res[m["fid"]] = []
|
|
|
365 |
|
366 |
return get_json_result(data=res)
|
367 |
except Exception as e:
|
368 |
+
return server_error_response(e)
|
api/apps/sdk/session.py
CHANGED
@@ -13,21 +13,24 @@
|
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
#
|
|
|
16 |
import json
|
17 |
from functools import partial
|
18 |
from uuid import uuid4
|
19 |
-
|
20 |
from flask import request, Response
|
21 |
-
|
22 |
from agent.canvas import Canvas
|
23 |
from api.db import StatusEnum
|
24 |
from api.db.db_models import API4Conversation
|
25 |
from api.db.services.api_service import API4ConversationService
|
26 |
from api.db.services.canvas_service import UserCanvasService
|
27 |
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
|
|
28 |
from api.utils import get_uuid
|
29 |
from api.utils.api_utils import get_error_data_result
|
30 |
from api.utils.api_utils import get_result, token_required
|
|
|
31 |
|
32 |
|
33 |
@manager.route('/chats/<chat_id>/sessions', methods=['POST'])
|
@@ -342,7 +345,7 @@ def agent_completion(tenant_id, agent_id):
|
|
342 |
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
343 |
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
344 |
ensure_ascii=False) + "\n\n"
|
345 |
-
yield "data:" + json.dumps({"code": 0, "
|
346 |
|
347 |
resp = Response(sse(), mimetype="text/event-stream")
|
348 |
resp.headers.add_header("Cache-control", "no-cache")
|
@@ -366,7 +369,7 @@ def agent_completion(tenant_id, agent_id):
|
|
366 |
|
367 |
@manager.route('/chats/<chat_id>/sessions', methods=['GET'])
|
368 |
@token_required
|
369 |
-
def
|
370 |
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
371 |
return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
|
372 |
id = request.args.get("id")
|
@@ -441,4 +444,80 @@ def delete(tenant_id,chat_id):
|
|
441 |
if not conv:
|
442 |
return get_error_data_result(message="The chat doesn't own the session")
|
443 |
ConversationService.delete_by_id(id)
|
444 |
-
return get_result()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
#
|
16 |
+
import re
|
17 |
import json
|
18 |
from functools import partial
|
19 |
from uuid import uuid4
|
20 |
+
from api.db import LLMType
|
21 |
from flask import request, Response
|
22 |
+
from api.db.services.dialog_service import ask
|
23 |
from agent.canvas import Canvas
|
24 |
from api.db import StatusEnum
|
25 |
from api.db.db_models import API4Conversation
|
26 |
from api.db.services.api_service import API4ConversationService
|
27 |
from api.db.services.canvas_service import UserCanvasService
|
28 |
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
29 |
+
from api.db.services.knowledgebase_service import KnowledgebaseService
|
30 |
from api.utils import get_uuid
|
31 |
from api.utils.api_utils import get_error_data_result
|
32 |
from api.utils.api_utils import get_result, token_required
|
33 |
+
from api.db.services.llm_service import LLMBundle
|
34 |
|
35 |
|
36 |
@manager.route('/chats/<chat_id>/sessions', methods=['POST'])
|
|
|
345 |
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
346 |
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
347 |
ensure_ascii=False) + "\n\n"
|
348 |
+
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
349 |
|
350 |
resp = Response(sse(), mimetype="text/event-stream")
|
351 |
resp.headers.add_header("Cache-control", "no-cache")
|
|
|
369 |
|
370 |
@manager.route('/chats/<chat_id>/sessions', methods=['GET'])
|
371 |
@token_required
|
372 |
+
def list_session(chat_id,tenant_id):
|
373 |
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
374 |
return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
|
375 |
id = request.args.get("id")
|
|
|
444 |
if not conv:
|
445 |
return get_error_data_result(message="The chat doesn't own the session")
|
446 |
ConversationService.delete_by_id(id)
|
447 |
+
return get_result()
|
448 |
+
|
449 |
+
@manager.route('/sessions/ask', methods=['POST'])
|
450 |
+
@token_required
|
451 |
+
def ask_about(tenant_id):
|
452 |
+
req = request.json
|
453 |
+
if not req.get("question"):
|
454 |
+
return get_error_data_result("`question` is required.")
|
455 |
+
if not req.get("dataset_ids"):
|
456 |
+
return get_error_data_result("`dataset_ids` is required.")
|
457 |
+
if not isinstance(req.get("dataset_ids"),list):
|
458 |
+
return get_error_data_result("`dataset_ids` should be a list.")
|
459 |
+
req["kb_ids"]=req.pop("dataset_ids")
|
460 |
+
for kb_id in req["kb_ids"]:
|
461 |
+
if not KnowledgebaseService.accessible(kb_id,tenant_id):
|
462 |
+
return get_error_data_result(f"You don't own the dataset {kb_id}.")
|
463 |
+
kbs = KnowledgebaseService.query(id=kb_id)
|
464 |
+
kb = kbs[0]
|
465 |
+
if kb.chunk_num == 0:
|
466 |
+
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
467 |
+
uid = tenant_id
|
468 |
+
def stream():
|
469 |
+
nonlocal req, uid
|
470 |
+
try:
|
471 |
+
for ans in ask(req["question"], req["kb_ids"], uid):
|
472 |
+
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
473 |
+
except Exception as e:
|
474 |
+
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
475 |
+
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
476 |
+
ensure_ascii=False) + "\n\n"
|
477 |
+
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
478 |
+
|
479 |
+
resp = Response(stream(), mimetype="text/event-stream")
|
480 |
+
resp.headers.add_header("Cache-control", "no-cache")
|
481 |
+
resp.headers.add_header("Connection", "keep-alive")
|
482 |
+
resp.headers.add_header("X-Accel-Buffering", "no")
|
483 |
+
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
484 |
+
return resp
|
485 |
+
|
486 |
+
|
487 |
+
@manager.route('/sessions/related_questions', methods=['POST'])
|
488 |
+
@token_required
|
489 |
+
def related_questions(tenant_id):
|
490 |
+
req = request.json
|
491 |
+
if not req.get("question"):
|
492 |
+
return get_error_data_result("`question` is required.")
|
493 |
+
question = req["question"]
|
494 |
+
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
495 |
+
prompt = """
|
496 |
+
Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
|
497 |
+
Instructions:
|
498 |
+
- Based on the keywords provided by the user, generate 5-10 related search terms.
|
499 |
+
- Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
|
500 |
+
- Use common, general terms as much as possible, avoiding obscure words or technical jargon.
|
501 |
+
- Keep the term length between 2-4 words, concise and clear.
|
502 |
+
- DO NOT translate, use the language of the original keywords.
|
503 |
+
|
504 |
+
### Example:
|
505 |
+
Keywords: Chinese football
|
506 |
+
Related search terms:
|
507 |
+
1. Current status of Chinese football
|
508 |
+
2. Reform of Chinese football
|
509 |
+
3. Youth training of Chinese football
|
510 |
+
4. Chinese football in the Asian Cup
|
511 |
+
5. Chinese football in the World Cup
|
512 |
+
|
513 |
+
Reason:
|
514 |
+
- When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
|
515 |
+
- Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
|
516 |
+
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
517 |
+
|
518 |
+
"""
|
519 |
+
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
520 |
+
Keywords: {question}
|
521 |
+
Related search terms:
|
522 |
+
"""}], {"temperature": 0.9})
|
523 |
+
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
api/db/db_models.py
CHANGED
@@ -17,6 +17,7 @@ import logging
|
|
17 |
import inspect
|
18 |
import os
|
19 |
import sys
|
|
|
20 |
import operator
|
21 |
from enum import Enum
|
22 |
from functools import wraps
|
@@ -29,10 +30,13 @@ from peewee import (
|
|
29 |
Field, Model, Metadata
|
30 |
)
|
31 |
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase
|
|
|
|
|
32 |
from api.db import SerializedType, ParserType
|
33 |
from api import settings
|
34 |
from api import utils
|
35 |
|
|
|
36 |
def singleton(cls, *args, **kw):
|
37 |
instances = {}
|
38 |
|
@@ -120,13 +124,13 @@ class SerializedField(LongTextField):
|
|
120 |
f"the serialized type {self._serialized_type} is not supported")
|
121 |
|
122 |
|
123 |
-
def is_continuous_field(cls:
|
124 |
if cls in CONTINUOUS_FIELD_TYPE:
|
125 |
return True
|
126 |
for p in cls.__bases__:
|
127 |
if p in CONTINUOUS_FIELD_TYPE:
|
128 |
return True
|
129 |
-
elif p
|
130 |
if is_continuous_field(p):
|
131 |
return True
|
132 |
else:
|
@@ -158,7 +162,7 @@ class BaseModel(Model):
|
|
158 |
def to_dict(self):
|
159 |
return self.__dict__['__data__']
|
160 |
|
161 |
-
def to_human_model_dict(self, only_primary_with: list
|
162 |
model_dict = self.__dict__['__data__']
|
163 |
|
164 |
if not only_primary_with:
|
@@ -268,6 +272,7 @@ class JsonSerializedField(SerializedField):
|
|
268 |
super(JsonSerializedField, self).__init__(serialized_type=SerializedType.JSON, object_hook=object_hook,
|
269 |
object_pairs_hook=object_pairs_hook, **kwargs)
|
270 |
|
|
|
271 |
class PooledDatabase(Enum):
|
272 |
MYSQL = PooledMySQLDatabase
|
273 |
POSTGRES = PooledPostgresqlDatabase
|
@@ -286,6 +291,7 @@ class BaseDataBase:
|
|
286 |
self.database_connection = PooledDatabase[settings.DATABASE_TYPE.upper()].value(db_name, **database_config)
|
287 |
logging.info('init database on cluster mode successfully')
|
288 |
|
|
|
289 |
class PostgresDatabaseLock:
|
290 |
def __init__(self, lock_name, timeout=10, db=None):
|
291 |
self.lock_name = lock_name
|
@@ -330,6 +336,7 @@ class PostgresDatabaseLock:
|
|
330 |
|
331 |
return magic
|
332 |
|
|
|
333 |
class MysqlDatabaseLock:
|
334 |
def __init__(self, lock_name, timeout=10, db=None):
|
335 |
self.lock_name = lock_name
|
@@ -644,7 +651,7 @@ class TenantLLM(DataBaseModel):
|
|
644 |
index=True)
|
645 |
api_key = CharField(max_length=1024, null=True, help_text="API KEY", index=True)
|
646 |
api_base = CharField(max_length=255, null=True, help_text="API Base")
|
647 |
-
|
648 |
used_tokens = IntegerField(default=0, index=True)
|
649 |
|
650 |
def __str__(self):
|
@@ -875,8 +882,10 @@ class Dialog(DataBaseModel):
|
|
875 |
default="simple",
|
876 |
help_text="simple|advanced",
|
877 |
index=True)
|
878 |
-
prompt_config = JSONField(null=False,
|
879 |
-
|
|
|
|
|
880 |
|
881 |
similarity_threshold = FloatField(default=0.2)
|
882 |
vector_similarity_weight = FloatField(default=0.3)
|
@@ -890,7 +899,7 @@ class Dialog(DataBaseModel):
|
|
890 |
null=False,
|
891 |
default="1",
|
892 |
help_text="it needs to insert reference index into answer or not")
|
893 |
-
|
894 |
rerank_id = CharField(
|
895 |
max_length=128,
|
896 |
null=False,
|
@@ -1025,8 +1034,8 @@ def migrate_db():
|
|
1025 |
pass
|
1026 |
try:
|
1027 |
migrate(
|
1028 |
-
migrator.add_column("tenant","tts_id",
|
1029 |
-
|
1030 |
)
|
1031 |
except Exception:
|
1032 |
pass
|
@@ -1055,4 +1064,9 @@ def migrate_db():
|
|
1055 |
)
|
1056 |
except Exception:
|
1057 |
pass
|
1058 |
-
|
|
|
|
|
|
|
|
|
|
|
|
17 |
import inspect
|
18 |
import os
|
19 |
import sys
|
20 |
+
import typing
|
21 |
import operator
|
22 |
from enum import Enum
|
23 |
from functools import wraps
|
|
|
30 |
Field, Model, Metadata
|
31 |
)
|
32 |
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase
|
33 |
+
|
34 |
+
|
35 |
from api.db import SerializedType, ParserType
|
36 |
from api import settings
|
37 |
from api import utils
|
38 |
|
39 |
+
|
40 |
def singleton(cls, *args, **kw):
|
41 |
instances = {}
|
42 |
|
|
|
124 |
f"the serialized type {self._serialized_type} is not supported")
|
125 |
|
126 |
|
127 |
+
def is_continuous_field(cls: typing.Type) -> bool:
|
128 |
if cls in CONTINUOUS_FIELD_TYPE:
|
129 |
return True
|
130 |
for p in cls.__bases__:
|
131 |
if p in CONTINUOUS_FIELD_TYPE:
|
132 |
return True
|
133 |
+
elif p != Field and p != object:
|
134 |
if is_continuous_field(p):
|
135 |
return True
|
136 |
else:
|
|
|
162 |
def to_dict(self):
|
163 |
return self.__dict__['__data__']
|
164 |
|
165 |
+
def to_human_model_dict(self, only_primary_with: list = None):
|
166 |
model_dict = self.__dict__['__data__']
|
167 |
|
168 |
if not only_primary_with:
|
|
|
272 |
super(JsonSerializedField, self).__init__(serialized_type=SerializedType.JSON, object_hook=object_hook,
|
273 |
object_pairs_hook=object_pairs_hook, **kwargs)
|
274 |
|
275 |
+
|
276 |
class PooledDatabase(Enum):
|
277 |
MYSQL = PooledMySQLDatabase
|
278 |
POSTGRES = PooledPostgresqlDatabase
|
|
|
291 |
self.database_connection = PooledDatabase[settings.DATABASE_TYPE.upper()].value(db_name, **database_config)
|
292 |
logging.info('init database on cluster mode successfully')
|
293 |
|
294 |
+
|
295 |
class PostgresDatabaseLock:
|
296 |
def __init__(self, lock_name, timeout=10, db=None):
|
297 |
self.lock_name = lock_name
|
|
|
336 |
|
337 |
return magic
|
338 |
|
339 |
+
|
340 |
class MysqlDatabaseLock:
|
341 |
def __init__(self, lock_name, timeout=10, db=None):
|
342 |
self.lock_name = lock_name
|
|
|
651 |
index=True)
|
652 |
api_key = CharField(max_length=1024, null=True, help_text="API KEY", index=True)
|
653 |
api_base = CharField(max_length=255, null=True, help_text="API Base")
|
654 |
+
max_tokens = IntegerField(default=8192, index=True)
|
655 |
used_tokens = IntegerField(default=0, index=True)
|
656 |
|
657 |
def __str__(self):
|
|
|
882 |
default="simple",
|
883 |
help_text="simple|advanced",
|
884 |
index=True)
|
885 |
+
prompt_config = JSONField(null=False,
|
886 |
+
default={"system": "", "prologue": "Hi! I'm your assistant, what can I do for you?",
|
887 |
+
"parameters": [],
|
888 |
+
"empty_response": "Sorry! No relevant content was found in the knowledge base!"})
|
889 |
|
890 |
similarity_threshold = FloatField(default=0.2)
|
891 |
vector_similarity_weight = FloatField(default=0.3)
|
|
|
899 |
null=False,
|
900 |
default="1",
|
901 |
help_text="it needs to insert reference index into answer or not")
|
902 |
+
|
903 |
rerank_id = CharField(
|
904 |
max_length=128,
|
905 |
null=False,
|
|
|
1034 |
pass
|
1035 |
try:
|
1036 |
migrate(
|
1037 |
+
migrator.add_column("tenant", "tts_id",
|
1038 |
+
CharField(max_length=256, null=True, help_text="default tts model ID", index=True))
|
1039 |
)
|
1040 |
except Exception:
|
1041 |
pass
|
|
|
1064 |
)
|
1065 |
except Exception:
|
1066 |
pass
|
1067 |
+
try:
|
1068 |
+
migrate(
|
1069 |
+
migrator.add_column("tenant_llm","max_tokens",IntegerField(default=8192,index=True))
|
1070 |
+
)
|
1071 |
+
except Exception:
|
1072 |
+
pass
|
rag/llm/embedding_model.py
CHANGED
@@ -567,7 +567,7 @@ class TogetherAIEmbed(OllamaEmbed):
|
|
567 |
def __init__(self, key, model_name, base_url="https://api.together.xyz/v1"):
|
568 |
if not base_url:
|
569 |
base_url = "https://api.together.xyz/v1"
|
570 |
-
super().__init__(key, model_name, base_url)
|
571 |
|
572 |
|
573 |
class PerfXCloudEmbed(OpenAIEmbed):
|
|
|
567 |
def __init__(self, key, model_name, base_url="https://api.together.xyz/v1"):
|
568 |
if not base_url:
|
569 |
base_url = "https://api.together.xyz/v1"
|
570 |
+
super().__init__(key, model_name, base_url=base_url)
|
571 |
|
572 |
|
573 |
class PerfXCloudEmbed(OpenAIEmbed):
|
web/src/interfaces/request/llm.ts
CHANGED
@@ -4,6 +4,7 @@ export interface IAddLlmRequestBody {
|
|
4 |
model_type: string;
|
5 |
api_base?: string; // chat|embedding|speech2text|image2text
|
6 |
api_key: string;
|
|
|
7 |
}
|
8 |
|
9 |
export interface IDeleteLlmRequestBody {
|
|
|
4 |
model_type: string;
|
5 |
api_base?: string; // chat|embedding|speech2text|image2text
|
6 |
api_key: string;
|
7 |
+
max_tokens: number;
|
8 |
}
|
9 |
|
10 |
export interface IDeleteLlmRequestBody {
|
web/src/locales/en.ts
CHANGED
@@ -393,6 +393,8 @@ The above is the content you need to summarize.`,
|
|
393 |
maxTokensMessage: 'Max Tokens is required',
|
394 |
maxTokensTip:
|
395 |
'This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).',
|
|
|
|
|
396 |
quote: 'Show Quote',
|
397 |
quoteTip: 'Should the source of the original text be displayed?',
|
398 |
selfRag: 'Self-RAG',
|
@@ -441,6 +443,12 @@ The above is the content you need to summarize.`,
|
|
441 |
setting: {
|
442 |
profile: 'Profile',
|
443 |
profileDescription: 'Update your photo and personal details here.',
|
|
|
|
|
|
|
|
|
|
|
|
|
444 |
password: 'Password',
|
445 |
passwordDescription:
|
446 |
'Please enter your current password to change your password.',
|
|
|
393 |
maxTokensMessage: 'Max Tokens is required',
|
394 |
maxTokensTip:
|
395 |
'This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).',
|
396 |
+
maxTokensInvalidMessage: 'Please enter a valid number for Max Tokens.',
|
397 |
+
maxTokensMinMessage: 'Max Tokens cannot be less than 0.',
|
398 |
quote: 'Show Quote',
|
399 |
quoteTip: 'Should the source of the original text be displayed?',
|
400 |
selfRag: 'Self-RAG',
|
|
|
443 |
setting: {
|
444 |
profile: 'Profile',
|
445 |
profileDescription: 'Update your photo and personal details here.',
|
446 |
+
maxTokens: 'Max Tokens',
|
447 |
+
maxTokensMessage: 'Max Tokens is required',
|
448 |
+
maxTokensTip:
|
449 |
+
'This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).',
|
450 |
+
maxTokensInvalidMessage: 'Please enter a valid number for Max Tokens.',
|
451 |
+
maxTokensMinMessage: 'Max Tokens cannot be less than 0.',
|
452 |
password: 'Password',
|
453 |
passwordDescription:
|
454 |
'Please enter your current password to change your password.',
|
web/src/locales/es.ts
CHANGED
@@ -231,6 +231,8 @@ export default {
|
|
231 |
maxTokensMessage: 'El máximo de tokens es obligatorio',
|
232 |
maxTokensTip:
|
233 |
'Esto establece la longitud máxima de la salida del modelo, medida en el número de tokens (palabras o piezas de palabras).',
|
|
|
|
|
234 |
quote: 'Mostrar cita',
|
235 |
quoteTip: '¿Debe mostrarse la fuente del texto original?',
|
236 |
selfRag: 'Self-RAG',
|
@@ -278,6 +280,12 @@ export default {
|
|
278 |
setting: {
|
279 |
profile: 'Perfil',
|
280 |
profileDescription: 'Actualiza tu foto y tus datos personales aquí.',
|
|
|
|
|
|
|
|
|
|
|
|
|
281 |
password: 'Contraseña',
|
282 |
passwordDescription:
|
283 |
'Por favor ingresa tu contraseña actual para cambiarla.',
|
|
|
231 |
maxTokensMessage: 'El máximo de tokens es obligatorio',
|
232 |
maxTokensTip:
|
233 |
'Esto establece la longitud máxima de la salida del modelo, medida en el número de tokens (palabras o piezas de palabras).',
|
234 |
+
maxTokensInvalidMessage: 'Por favor, ingresa un número válido para Max Tokens.',
|
235 |
+
maxTokensMinMessage: 'Max Tokens no puede ser menor que 0.',
|
236 |
quote: 'Mostrar cita',
|
237 |
quoteTip: '¿Debe mostrarse la fuente del texto original?',
|
238 |
selfRag: 'Self-RAG',
|
|
|
280 |
setting: {
|
281 |
profile: 'Perfil',
|
282 |
profileDescription: 'Actualiza tu foto y tus datos personales aquí.',
|
283 |
+
maxTokens: 'Máximo de tokens',
|
284 |
+
maxTokensMessage: 'El máximo de tokens es obligatorio',
|
285 |
+
maxTokensTip:
|
286 |
+
'Esto establece la longitud máxima de la salida del modelo, medida en el número de tokens (palabras o piezas de palabras).',
|
287 |
+
maxTokensInvalidMessage: 'Por favor, ingresa un número válido para Max Tokens.',
|
288 |
+
maxTokensMinMessage: 'Max Tokens no puede ser menor que 0.',
|
289 |
password: 'Contraseña',
|
290 |
passwordDescription:
|
291 |
'Por favor ingresa tu contraseña actual para cambiarla.',
|
web/src/locales/id.ts
CHANGED
@@ -401,6 +401,8 @@ export default {
|
|
401 |
maxTokensMessage: 'Token Maksimum diperlukan',
|
402 |
maxTokensTip:
|
403 |
'Ini menetapkan panjang maksimum keluaran model, diukur dalam jumlah token (kata atau potongan kata).',
|
|
|
|
|
404 |
quote: 'Tampilkan Kutipan',
|
405 |
quoteTip: 'Haruskah sumber teks asli ditampilkan?',
|
406 |
selfRag: 'Self-RAG',
|
@@ -450,6 +452,12 @@ export default {
|
|
450 |
setting: {
|
451 |
profile: 'Profil',
|
452 |
profileDescription: 'Perbarui foto dan detail pribadi Anda di sini.',
|
|
|
|
|
|
|
|
|
|
|
|
|
453 |
password: 'Kata Sandi',
|
454 |
passwordDescription:
|
455 |
'Silakan masukkan kata sandi Anda saat ini untuk mengubah kata sandi Anda.',
|
|
|
401 |
maxTokensMessage: 'Token Maksimum diperlukan',
|
402 |
maxTokensTip:
|
403 |
'Ini menetapkan panjang maksimum keluaran model, diukur dalam jumlah token (kata atau potongan kata).',
|
404 |
+
maxTokensInvalidMessage: 'Silakan masukkan angka yang valid untuk Max Tokens.',
|
405 |
+
maxTokensMinMessage: 'Max Tokens tidak boleh kurang dari 0.',
|
406 |
quote: 'Tampilkan Kutipan',
|
407 |
quoteTip: 'Haruskah sumber teks asli ditampilkan?',
|
408 |
selfRag: 'Self-RAG',
|
|
|
452 |
setting: {
|
453 |
profile: 'Profil',
|
454 |
profileDescription: 'Perbarui foto dan detail pribadi Anda di sini.',
|
455 |
+
maxTokens: 'Token Maksimum',
|
456 |
+
maxTokensMessage: 'Token Maksimum diperlukan',
|
457 |
+
maxTokensTip:
|
458 |
+
'Ini menetapkan panjang maksimum keluaran model, diukur dalam jumlah token (kata atau potongan kata).',
|
459 |
+
maxTokensInvalidMessage: 'Silakan masukkan angka yang valid untuk Max Tokens.',
|
460 |
+
maxTokensMinMessage: 'Max Tokens tidak boleh kurang dari 0.',
|
461 |
password: 'Kata Sandi',
|
462 |
passwordDescription:
|
463 |
'Silakan masukkan kata sandi Anda saat ini untuk mengubah kata sandi Anda.',
|
web/src/locales/zh-traditional.ts
CHANGED
@@ -376,6 +376,8 @@ export default {
|
|
376 |
maxTokensMessage: '最大token數是必填項',
|
377 |
maxTokensTip:
|
378 |
'這設置了模型輸出的最大長度,以標記(單詞或單詞片段)的數量來衡量。',
|
|
|
|
|
379 |
quote: '顯示引文',
|
380 |
quoteTip: '是否應該顯示原文出處?',
|
381 |
selfRag: 'Self-RAG',
|
@@ -422,6 +424,12 @@ export default {
|
|
422 |
setting: {
|
423 |
profile: '概述',
|
424 |
profileDescription: '在此更新您的照片和個人詳細信息。',
|
|
|
|
|
|
|
|
|
|
|
|
|
425 |
password: '密碼',
|
426 |
passwordDescription: '請輸入您當前的密碼以更改您的密碼。',
|
427 |
model: '模型提供商',
|
|
|
376 |
maxTokensMessage: '最大token數是必填項',
|
377 |
maxTokensTip:
|
378 |
'這設置了模型輸出的最大長度,以標記(單詞或單詞片段)的數量來衡量。',
|
379 |
+
maxTokensInvalidMessage: '請輸入有效的最大標記數。',
|
380 |
+
maxTokensMinMessage: '最大標記數不能小於 0。',
|
381 |
quote: '顯示引文',
|
382 |
quoteTip: '是否應該顯示原文出處?',
|
383 |
selfRag: 'Self-RAG',
|
|
|
424 |
setting: {
|
425 |
profile: '概述',
|
426 |
profileDescription: '在此更新您的照片和個人詳細信息。',
|
427 |
+
maxTokens: '最大token數',
|
428 |
+
maxTokensMessage: '最大token數是必填項',
|
429 |
+
maxTokensTip:
|
430 |
+
'這設置了模型輸出的最大長度,以標記(單詞或單詞片段)的數量來衡量。',
|
431 |
+
maxTokensInvalidMessage: '請輸入有效的最大標記數。',
|
432 |
+
maxTokensMinMessage: '最大標記數不能小於 0。',
|
433 |
password: '密碼',
|
434 |
passwordDescription: '請輸入您當前的密碼以更改您的密碼。',
|
435 |
model: '模型提供商',
|
web/src/locales/zh.ts
CHANGED
@@ -393,6 +393,8 @@ export default {
|
|
393 |
maxTokensMessage: '最大token数是必填项',
|
394 |
maxTokensTip:
|
395 |
'这设置了模型输出的最大长度,以标记(单词或单词片段)的数量来衡量。',
|
|
|
|
|
396 |
quote: '显示引文',
|
397 |
quoteTip: '是否应该显示原文出处?',
|
398 |
selfRag: 'Self-RAG',
|
@@ -439,6 +441,12 @@ export default {
|
|
439 |
setting: {
|
440 |
profile: '概要',
|
441 |
profileDescription: '在此更新您的照片和个人详细信息。',
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
password: '密码',
|
443 |
passwordDescription: '请输入您当前的密码以更改您的密码。',
|
444 |
model: '模型提供商',
|
|
|
393 |
maxTokensMessage: '最大token数是必填项',
|
394 |
maxTokensTip:
|
395 |
'这设置了模型输出的最大长度,以标记(单词或单词片段)的数量来衡量。',
|
396 |
+
maxTokensInvalidMessage: '请输入有效的最大令牌数。',
|
397 |
+
maxTokensMinMessage: '最大令牌数不能小于 0。',
|
398 |
quote: '显示引文',
|
399 |
quoteTip: '是否应该显示原文出处?',
|
400 |
selfRag: 'Self-RAG',
|
|
|
441 |
setting: {
|
442 |
profile: '概要',
|
443 |
profileDescription: '在此更新您的照片和个人详细信息。',
|
444 |
+
maxTokens: '最大token数',
|
445 |
+
maxTokensMessage: '最大token数是必填项',
|
446 |
+
maxTokensTip:
|
447 |
+
'这设置了模型输出的最大长度,以标记(单词或单词片段)的数量来衡量。',
|
448 |
+
maxTokensInvalidMessage: '请输入有效的最大令牌数。',
|
449 |
+
maxTokensMinMessage: '最大令牌数不能小于 0。',
|
450 |
password: '密码',
|
451 |
passwordDescription: '请输入您当前的密码以更改您的密码。',
|
452 |
model: '模型提供商',
|
web/src/pages/user-setting/setting-model/Tencent-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Flex, Form, Input, Modal, Select, Space } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
@@ -30,6 +30,7 @@ const TencentCloudModal = ({
|
|
30 |
...omit(values),
|
31 |
model_type: modelType,
|
32 |
llm_factory: llmFactory,
|
|
|
33 |
};
|
34 |
console.info(data);
|
35 |
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Flex, Form, Input, Modal, Select, Space, InputNumber } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
|
|
30 |
...omit(values),
|
31 |
model_type: modelType,
|
32 |
llm_factory: llmFactory,
|
33 |
+
max_tokens:16000,
|
34 |
};
|
35 |
console.info(data);
|
36 |
|
web/src/pages/user-setting/setting-model/azure-openai-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Form, Input, Modal, Select, Switch } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
@@ -33,6 +33,7 @@ const AzureOpenAIModal = ({
|
|
33 |
...omit(values, ['vision']),
|
34 |
model_type: modelType,
|
35 |
llm_factory: llmFactory,
|
|
|
36 |
};
|
37 |
console.info(data);
|
38 |
|
@@ -107,6 +108,31 @@ const AzureOpenAIModal = ({
|
|
107 |
>
|
108 |
<Input placeholder={t('apiVersionMessage')} />
|
109 |
</Form.Item>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
<Form.Item noStyle dependencies={['model_type']}>
|
111 |
{({ getFieldValue }) =>
|
112 |
getFieldValue('model_type') === 'chat' && (
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Form, Input, Modal, Select, Switch, InputNumber } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
|
|
33 |
...omit(values, ['vision']),
|
34 |
model_type: modelType,
|
35 |
llm_factory: llmFactory,
|
36 |
+
max_tokens:values.max_tokens,
|
37 |
};
|
38 |
console.info(data);
|
39 |
|
|
|
108 |
>
|
109 |
<Input placeholder={t('apiVersionMessage')} />
|
110 |
</Form.Item>
|
111 |
+
<Form.Item<FieldType>
|
112 |
+
label={t('maxTokens')}
|
113 |
+
name="max_tokens"
|
114 |
+
rules={[
|
115 |
+
{ required: true, message: t('maxTokensMessage') },
|
116 |
+
{
|
117 |
+
type: 'number',
|
118 |
+
message: t('maxTokensInvalidMessage'),
|
119 |
+
},
|
120 |
+
({ getFieldValue }) => ({
|
121 |
+
validator(_, value) {
|
122 |
+
if (value < 0) {
|
123 |
+
return Promise.reject(new Error(t('maxTokensMinMessage')));
|
124 |
+
}
|
125 |
+
return Promise.resolve();
|
126 |
+
},
|
127 |
+
}),
|
128 |
+
]}
|
129 |
+
>
|
130 |
+
<InputNumber
|
131 |
+
placeholder={t('maxTokensTip')}
|
132 |
+
style={{ width: '100%' }}
|
133 |
+
/>
|
134 |
+
</Form.Item>
|
135 |
+
|
136 |
<Form.Item noStyle dependencies={['model_type']}>
|
137 |
{({ getFieldValue }) =>
|
138 |
getFieldValue('model_type') === 'chat' && (
|
web/src/pages/user-setting/setting-model/bedrock-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Flex, Form, Input, Modal, Select, Space } from 'antd';
|
5 |
import { useMemo } from 'react';
|
6 |
import { BedrockRegionList } from '../constant';
|
7 |
|
@@ -34,6 +34,7 @@ const BedrockModal = ({
|
|
34 |
const data = {
|
35 |
...values,
|
36 |
llm_factory: llmFactory,
|
|
|
37 |
};
|
38 |
|
39 |
onOk?.(data);
|
@@ -111,6 +112,31 @@ const BedrockModal = ({
|
|
111 |
allowClear
|
112 |
></Select>
|
113 |
</Form.Item>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
</Form>
|
115 |
</Modal>
|
116 |
);
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Flex, Form, Input, Modal, Select, Space, InputNumber } from 'antd';
|
5 |
import { useMemo } from 'react';
|
6 |
import { BedrockRegionList } from '../constant';
|
7 |
|
|
|
34 |
const data = {
|
35 |
...values,
|
36 |
llm_factory: llmFactory,
|
37 |
+
max_tokens:values.max_tokens,
|
38 |
};
|
39 |
|
40 |
onOk?.(data);
|
|
|
112 |
allowClear
|
113 |
></Select>
|
114 |
</Form.Item>
|
115 |
+
<Form.Item<FieldType>
|
116 |
+
label={t('maxTokens')}
|
117 |
+
name="max_tokens"
|
118 |
+
rules={[
|
119 |
+
{ required: true, message: t('maxTokensMessage') },
|
120 |
+
{
|
121 |
+
type: 'number',
|
122 |
+
message: t('maxTokensInvalidMessage'),
|
123 |
+
},
|
124 |
+
({ getFieldValue }) => ({
|
125 |
+
validator(_, value) {
|
126 |
+
if (value < 0) {
|
127 |
+
return Promise.reject(new Error(t('maxTokensMinMessage')));
|
128 |
+
}
|
129 |
+
return Promise.resolve();
|
130 |
+
},
|
131 |
+
}),
|
132 |
+
]}
|
133 |
+
>
|
134 |
+
<InputNumber
|
135 |
+
placeholder={t('maxTokensTip')}
|
136 |
+
style={{ width: '100%' }}
|
137 |
+
/>
|
138 |
+
</Form.Item>
|
139 |
+
|
140 |
</Form>
|
141 |
</Modal>
|
142 |
);
|
web/src/pages/user-setting/setting-model/fish-audio-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Flex, Form, Input, Modal, Select, Space } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
@@ -30,6 +30,7 @@ const FishAudioModal = ({
|
|
30 |
...omit(values),
|
31 |
model_type: modelType,
|
32 |
llm_factory: llmFactory,
|
|
|
33 |
};
|
34 |
console.info(data);
|
35 |
|
@@ -93,6 +94,31 @@ const FishAudioModal = ({
|
|
93 |
>
|
94 |
<Input placeholder={t('FishAudioRefIDMessage')} />
|
95 |
</Form.Item>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
</Form>
|
97 |
</Modal>
|
98 |
);
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Flex, Form, Input, Modal, Select, Space, InputNumber } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
|
|
30 |
...omit(values),
|
31 |
model_type: modelType,
|
32 |
llm_factory: llmFactory,
|
33 |
+
max_tokens:values.max_tokens,
|
34 |
};
|
35 |
console.info(data);
|
36 |
|
|
|
94 |
>
|
95 |
<Input placeholder={t('FishAudioRefIDMessage')} />
|
96 |
</Form.Item>
|
97 |
+
<Form.Item<FieldType>
|
98 |
+
label={t('maxTokens')}
|
99 |
+
name="max_tokens"
|
100 |
+
rules={[
|
101 |
+
{ required: true, message: t('maxTokensMessage') },
|
102 |
+
{
|
103 |
+
type: 'number',
|
104 |
+
message: t('maxTokensInvalidMessage'),
|
105 |
+
},
|
106 |
+
({ getFieldValue }) => ({
|
107 |
+
validator(_, value) {
|
108 |
+
if (value < 0) {
|
109 |
+
return Promise.reject(new Error(t('maxTokensMinMessage')));
|
110 |
+
}
|
111 |
+
return Promise.resolve();
|
112 |
+
},
|
113 |
+
}),
|
114 |
+
]}
|
115 |
+
>
|
116 |
+
<InputNumber
|
117 |
+
placeholder={t('maxTokensTip')}
|
118 |
+
style={{ width: '100%' }}
|
119 |
+
/>
|
120 |
+
</Form.Item>
|
121 |
+
|
122 |
</Form>
|
123 |
</Modal>
|
124 |
);
|
web/src/pages/user-setting/setting-model/google-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Form, Input, Modal, Select } from 'antd';
|
5 |
|
6 |
type FieldType = IAddLlmRequestBody & {
|
7 |
google_project_id: string;
|
@@ -27,6 +27,7 @@ const GoogleModal = ({
|
|
27 |
const data = {
|
28 |
...values,
|
29 |
llm_factory: llmFactory,
|
|
|
30 |
};
|
31 |
|
32 |
onOk?.(data);
|
@@ -87,6 +88,31 @@ const GoogleModal = ({
|
|
87 |
>
|
88 |
<Input placeholder={t('GoogleServiceAccountKeyMessage')} />
|
89 |
</Form.Item>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
</Form>
|
91 |
</Modal>
|
92 |
);
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Form, Input, Modal, Select, InputNumber } from 'antd';
|
5 |
|
6 |
type FieldType = IAddLlmRequestBody & {
|
7 |
google_project_id: string;
|
|
|
27 |
const data = {
|
28 |
...values,
|
29 |
llm_factory: llmFactory,
|
30 |
+
max_tokens:values.max_tokens,
|
31 |
};
|
32 |
|
33 |
onOk?.(data);
|
|
|
88 |
>
|
89 |
<Input placeholder={t('GoogleServiceAccountKeyMessage')} />
|
90 |
</Form.Item>
|
91 |
+
<Form.Item<FieldType>
|
92 |
+
label={t('maxTokens')}
|
93 |
+
name="max_tokens"
|
94 |
+
rules={[
|
95 |
+
{ required: true, message: t('maxTokensMessage') },
|
96 |
+
{
|
97 |
+
type: 'number',
|
98 |
+
message: t('maxTokensInvalidMessage'),
|
99 |
+
},
|
100 |
+
({ getFieldValue }) => ({
|
101 |
+
validator(_, value) {
|
102 |
+
if (value < 0) {
|
103 |
+
return Promise.reject(new Error(t('maxTokensMinMessage')));
|
104 |
+
}
|
105 |
+
return Promise.resolve();
|
106 |
+
},
|
107 |
+
}),
|
108 |
+
]}
|
109 |
+
>
|
110 |
+
<InputNumber
|
111 |
+
placeholder={t('maxTokensTip')}
|
112 |
+
style={{ width: '100%' }}
|
113 |
+
/>
|
114 |
+
</Form.Item>
|
115 |
+
|
116 |
</Form>
|
117 |
</Modal>
|
118 |
);
|
web/src/pages/user-setting/setting-model/hunyuan-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Form, Input, Modal, Select
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Form, Input, Modal, Select} from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
web/src/pages/user-setting/setting-model/index.tsx
CHANGED
@@ -402,7 +402,7 @@ const UserSettingModel = () => {
|
|
402 |
hideModal={hideTencentCloudAddingModal}
|
403 |
onOk={onTencentCloudAddingOk}
|
404 |
loading={TencentCloudAddingLoading}
|
405 |
-
llmFactory={'Tencent
|
406 |
></TencentCloudModal>
|
407 |
<SparkModal
|
408 |
visible={SparkAddingVisible}
|
|
|
402 |
hideModal={hideTencentCloudAddingModal}
|
403 |
onOk={onTencentCloudAddingOk}
|
404 |
loading={TencentCloudAddingLoading}
|
405 |
+
llmFactory={'Tencent Cloud'}
|
406 |
></TencentCloudModal>
|
407 |
<SparkModal
|
408 |
visible={SparkAddingVisible}
|
web/src/pages/user-setting/setting-model/ollama-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Flex, Form, Input, Modal, Select, Space, Switch } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & { vision: boolean };
|
@@ -45,6 +45,7 @@ const OllamaModal = ({
|
|
45 |
...omit(values, ['vision']),
|
46 |
model_type: modelType,
|
47 |
llm_factory: llmFactory,
|
|
|
48 |
};
|
49 |
console.info(data);
|
50 |
|
@@ -136,6 +137,31 @@ const OllamaModal = ({
|
|
136 |
>
|
137 |
<Input placeholder={t('apiKeyMessage')} />
|
138 |
</Form.Item>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
<Form.Item noStyle dependencies={['model_type']}>
|
140 |
{({ getFieldValue }) =>
|
141 |
getFieldValue('model_type') === 'chat' && (
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Flex, Form, Input, Modal, Select, Space, Switch, InputNumber } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & { vision: boolean };
|
|
|
45 |
...omit(values, ['vision']),
|
46 |
model_type: modelType,
|
47 |
llm_factory: llmFactory,
|
48 |
+
max_tokens:values.max_tokens,
|
49 |
};
|
50 |
console.info(data);
|
51 |
|
|
|
137 |
>
|
138 |
<Input placeholder={t('apiKeyMessage')} />
|
139 |
</Form.Item>
|
140 |
+
<Form.Item<FieldType>
|
141 |
+
label={t('maxTokens')}
|
142 |
+
name="max_tokens"
|
143 |
+
rules={[
|
144 |
+
{ required: true, message: t('maxTokensMessage') },
|
145 |
+
{
|
146 |
+
type: 'number',
|
147 |
+
message: t('maxTokensInvalidMessage'),
|
148 |
+
},
|
149 |
+
({ getFieldValue }) => ({
|
150 |
+
validator(_, value) {
|
151 |
+
if (value < 0) {
|
152 |
+
return Promise.reject(new Error(t('maxTokensMinMessage')));
|
153 |
+
}
|
154 |
+
return Promise.resolve();
|
155 |
+
},
|
156 |
+
}),
|
157 |
+
]}
|
158 |
+
>
|
159 |
+
<InputNumber
|
160 |
+
placeholder={t('maxTokensTip')}
|
161 |
+
style={{ width: '100%' }}
|
162 |
+
/>
|
163 |
+
</Form.Item>
|
164 |
+
|
165 |
<Form.Item noStyle dependencies={['model_type']}>
|
166 |
{({ getFieldValue }) =>
|
167 |
getFieldValue('model_type') === 'chat' && (
|
web/src/pages/user-setting/setting-model/spark-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Form, Input, Modal, Select } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
@@ -36,6 +36,7 @@ const SparkModal = ({
|
|
36 |
...omit(values, ['vision']),
|
37 |
model_type: modelType,
|
38 |
llm_factory: llmFactory,
|
|
|
39 |
};
|
40 |
console.info(data);
|
41 |
|
@@ -128,6 +129,31 @@ const SparkModal = ({
|
|
128 |
)
|
129 |
}
|
130 |
</Form.Item>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
</Form>
|
132 |
</Modal>
|
133 |
);
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Form, Input, Modal, Select, InputNumber } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
|
|
36 |
...omit(values, ['vision']),
|
37 |
model_type: modelType,
|
38 |
llm_factory: llmFactory,
|
39 |
+
max_tokens:values.max_tokens,
|
40 |
};
|
41 |
console.info(data);
|
42 |
|
|
|
129 |
)
|
130 |
}
|
131 |
</Form.Item>
|
132 |
+
<Form.Item<FieldType>
|
133 |
+
label={t('maxTokens')}
|
134 |
+
name="max_tokens"
|
135 |
+
rules={[
|
136 |
+
{ required: true, message: t('maxTokensMessage') },
|
137 |
+
{
|
138 |
+
type: 'number',
|
139 |
+
message: t('maxTokensInvalidMessage'),
|
140 |
+
},
|
141 |
+
({ getFieldValue }) => ({
|
142 |
+
validator(_, value) {
|
143 |
+
if (value < 0) {
|
144 |
+
return Promise.reject(new Error(t('maxTokensMinMessage')));
|
145 |
+
}
|
146 |
+
return Promise.resolve();
|
147 |
+
},
|
148 |
+
}),
|
149 |
+
]}
|
150 |
+
>
|
151 |
+
<InputNumber
|
152 |
+
placeholder={t('maxTokensTip')}
|
153 |
+
style={{ width: '100%' }}
|
154 |
+
/>
|
155 |
+
</Form.Item>
|
156 |
+
|
157 |
</Form>
|
158 |
</Modal>
|
159 |
);
|
web/src/pages/user-setting/setting-model/volcengine-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Flex, Form, Input, Modal, Select, Space, Switch } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
@@ -36,6 +36,7 @@ const VolcEngineModal = ({
|
|
36 |
...omit(values, ['vision']),
|
37 |
model_type: modelType,
|
38 |
llm_factory: llmFactory,
|
|
|
39 |
};
|
40 |
console.info(data);
|
41 |
|
@@ -103,19 +104,31 @@ const VolcEngineModal = ({
|
|
103 |
>
|
104 |
<Input placeholder={t('ArkApiKeyMessage')} />
|
105 |
</Form.Item>
|
106 |
-
<Form.Item
|
107 |
-
{(
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
</Form.Item>
|
|
|
119 |
</Form>
|
120 |
</Modal>
|
121 |
);
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Flex, Form, Input, Modal, Select, Space, Switch, InputNumber } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
|
|
36 |
...omit(values, ['vision']),
|
37 |
model_type: modelType,
|
38 |
llm_factory: llmFactory,
|
39 |
+
max_tokens:values.max_tokens,
|
40 |
};
|
41 |
console.info(data);
|
42 |
|
|
|
104 |
>
|
105 |
<Input placeholder={t('ArkApiKeyMessage')} />
|
106 |
</Form.Item>
|
107 |
+
<Form.Item<FieldType>
|
108 |
+
label={t('maxTokens')}
|
109 |
+
name="max_tokens"
|
110 |
+
rules={[
|
111 |
+
{ required: true, message: t('maxTokensMessage') },
|
112 |
+
{
|
113 |
+
type: 'number',
|
114 |
+
message: t('maxTokensInvalidMessage'),
|
115 |
+
},
|
116 |
+
({ getFieldValue }) => ({
|
117 |
+
validator(_, value) {
|
118 |
+
if (value < 0) {
|
119 |
+
return Promise.reject(new Error(t('maxTokensMinMessage')));
|
120 |
+
}
|
121 |
+
return Promise.resolve();
|
122 |
+
},
|
123 |
+
}),
|
124 |
+
]}
|
125 |
+
>
|
126 |
+
<InputNumber
|
127 |
+
placeholder={t('maxTokensTip')}
|
128 |
+
style={{ width: '100%' }}
|
129 |
+
/>
|
130 |
</Form.Item>
|
131 |
+
|
132 |
</Form>
|
133 |
</Modal>
|
134 |
);
|
web/src/pages/user-setting/setting-model/yiyan-modal/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
-
import { Form, Input, Modal, Select } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
@@ -34,6 +34,7 @@ const YiyanModal = ({
|
|
34 |
...omit(values, ['vision']),
|
35 |
model_type: modelType,
|
36 |
llm_factory: llmFactory,
|
|
|
37 |
};
|
38 |
console.info(data);
|
39 |
|
@@ -89,6 +90,30 @@ const YiyanModal = ({
|
|
89 |
>
|
90 |
<Input placeholder={t('yiyanSKMessage')} />
|
91 |
</Form.Item>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
</Form>
|
93 |
</Modal>
|
94 |
);
|
|
|
1 |
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
import { IModalProps } from '@/interfaces/common';
|
3 |
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Form, Input, Modal, Select, InputNumber } from 'antd';
|
5 |
import omit from 'lodash/omit';
|
6 |
|
7 |
type FieldType = IAddLlmRequestBody & {
|
|
|
34 |
...omit(values, ['vision']),
|
35 |
model_type: modelType,
|
36 |
llm_factory: llmFactory,
|
37 |
+
max_tokens:values.max_tokens,
|
38 |
};
|
39 |
console.info(data);
|
40 |
|
|
|
90 |
>
|
91 |
<Input placeholder={t('yiyanSKMessage')} />
|
92 |
</Form.Item>
|
93 |
+
<Form.Item<FieldType>
|
94 |
+
label={t('maxTokens')}
|
95 |
+
name="max_tokens"
|
96 |
+
rules={[
|
97 |
+
{ required: true, message: t('maxTokensMessage') },
|
98 |
+
{
|
99 |
+
type: 'number',
|
100 |
+
message: t('maxTokensInvalidMessage'),
|
101 |
+
},
|
102 |
+
({ getFieldValue }) => ({
|
103 |
+
validator(_, value) {
|
104 |
+
if (value < 0) {
|
105 |
+
return Promise.reject(new Error(t('maxTokensMinMessage')));
|
106 |
+
}
|
107 |
+
return Promise.resolve();
|
108 |
+
},
|
109 |
+
}),
|
110 |
+
]}
|
111 |
+
>
|
112 |
+
<InputNumber
|
113 |
+
placeholder={t('maxTokensTip')}
|
114 |
+
style={{ width: '100%' }}
|
115 |
+
/>
|
116 |
+
</Form.Item>
|
117 |
</Form>
|
118 |
</Modal>
|
119 |
);
|