liuhua
liuhua
commited on
Commit
·
95da4bf
1
Parent(s):
9e1f9a0
Fix some issues in API (#2982)
Browse files### What problem does this PR solve?
Fix some issues in API
### Type of change
- [x] Bug Fix (non-breaking change which fixes an issue)
---------
Co-authored-by: liuhua <[email protected]>
- api/apps/sdk/chat.py +32 -27
- api/apps/sdk/dataset.py +28 -14
- api/apps/sdk/doc.py +67 -12
- api/apps/sdk/session.py +1 -1
- api/db/services/knowledgebase_service.py +11 -1
- api/utils/api_utils.py +20 -1
- sdk/python/ragflow/modules/chat.py +1 -1
- sdk/python/ragflow/modules/chunk.py +3 -3
- sdk/python/ragflow/modules/dataset.py +8 -7
- sdk/python/ragflow/modules/document.py +12 -8
- sdk/python/ragflow/modules/session.py +2 -2
- sdk/python/ragflow/ragflow.py +12 -21
- sdk/python/test/t_chat.py +4 -3
- sdk/python/test/t_session.py +5 -5
api/apps/sdk/chat.py
CHANGED
@@ -18,20 +18,21 @@ from flask import request
|
|
18 |
from api.db import StatusEnum
|
19 |
from api.db.services.dialog_service import DialogService
|
20 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
21 |
-
from api.db.services.llm_service import
|
22 |
from api.db.services.user_service import TenantService
|
23 |
from api.utils import get_uuid
|
24 |
from api.utils.api_utils import get_error_data_result, token_required
|
25 |
from api.utils.api_utils import get_result
|
26 |
|
27 |
|
|
|
28 |
@manager.route('/chat', methods=['POST'])
|
29 |
@token_required
|
30 |
def create(tenant_id):
|
31 |
req=request.json
|
32 |
-
ids= req.get("
|
33 |
if not ids:
|
34 |
-
return get_error_data_result(retmsg="`
|
35 |
for kb_id in ids:
|
36 |
kbs = KnowledgebaseService.query(id=kb_id,tenant_id=tenant_id)
|
37 |
if not kbs:
|
@@ -45,6 +46,8 @@ def create(tenant_id):
|
|
45 |
if llm:
|
46 |
if "model_name" in llm:
|
47 |
req["llm_id"] = llm.pop("model_name")
|
|
|
|
|
48 |
req["llm_setting"] = req.pop("llm")
|
49 |
e, tenant = TenantService.get_by_id(tenant_id)
|
50 |
if not e:
|
@@ -73,10 +76,10 @@ def create(tenant_id):
|
|
73 |
req["top_n"] = req.get("top_n", 6)
|
74 |
req["top_k"] = req.get("top_k", 1024)
|
75 |
req["rerank_id"] = req.get("rerank_id", "")
|
76 |
-
if req.get("
|
77 |
-
if not TenantLLMService.query(llm_name=req
|
78 |
-
return get_error_data_result(
|
79 |
-
|
80 |
req["llm_id"] = tenant.llm_id
|
81 |
if not req.get("name"):
|
82 |
return get_error_data_result(retmsg="`name` is required.")
|
@@ -135,7 +138,7 @@ def create(tenant_id):
|
|
135 |
res["llm"] = res.pop("llm_setting")
|
136 |
res["llm"]["model_name"] = res.pop("llm_id")
|
137 |
del res["kb_ids"]
|
138 |
-
res["
|
139 |
res["avatar"] = res.pop("icon")
|
140 |
return get_result(data=res)
|
141 |
|
@@ -145,27 +148,32 @@ def update(tenant_id,chat_id):
|
|
145 |
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
146 |
return get_error_data_result(retmsg='You do not own the chat')
|
147 |
req =request.json
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
llm = req.get("llm")
|
162 |
if llm:
|
163 |
if "model_name" in llm:
|
164 |
req["llm_id"] = llm.pop("model_name")
|
|
|
|
|
165 |
req["llm_setting"] = req.pop("llm")
|
166 |
e, tenant = TenantService.get_by_id(tenant_id)
|
167 |
if not e:
|
168 |
return get_error_data_result(retmsg="Tenant not found!")
|
|
|
|
|
|
|
169 |
# prompt
|
170 |
prompt = req.get("prompt")
|
171 |
key_mapping = {"parameters": "variables",
|
@@ -185,9 +193,6 @@ def update(tenant_id,chat_id):
|
|
185 |
req["prompt_config"] = req.pop("prompt")
|
186 |
e, res = DialogService.get_by_id(chat_id)
|
187 |
res = res.to_json()
|
188 |
-
if "llm_id" in req:
|
189 |
-
if not TenantLLMService.query(llm_name=req["llm_id"]):
|
190 |
-
return get_error_data_result(retmsg="The `model_name` does not exist.")
|
191 |
if "name" in req:
|
192 |
if not req.get("name"):
|
193 |
return get_error_data_result(retmsg="`name` is not empty.")
|
@@ -209,8 +214,8 @@ def update(tenant_id,chat_id):
|
|
209 |
# avatar
|
210 |
if "avatar" in req:
|
211 |
req["icon"] = req.pop("avatar")
|
212 |
-
if "
|
213 |
-
req.pop("
|
214 |
if not DialogService.update_by_id(chat_id, req):
|
215 |
return get_error_data_result(retmsg="Chat not found!")
|
216 |
return get_result()
|
@@ -279,7 +284,7 @@ def list_chat(tenant_id):
|
|
279 |
return get_error_data_result(retmsg=f"Don't exist the kb {kb_id}")
|
280 |
kb_list.append(kb[0].to_json())
|
281 |
del res["kb_ids"]
|
282 |
-
res["
|
283 |
res["avatar"] = res.pop("icon")
|
284 |
list_assts.append(res)
|
285 |
return get_result(data=list_assts)
|
|
|
18 |
from api.db import StatusEnum
|
19 |
from api.db.services.dialog_service import DialogService
|
20 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
21 |
+
from api.db.services.llm_service import TenantLLMService
|
22 |
from api.db.services.user_service import TenantService
|
23 |
from api.utils import get_uuid
|
24 |
from api.utils.api_utils import get_error_data_result, token_required
|
25 |
from api.utils.api_utils import get_result
|
26 |
|
27 |
|
28 |
+
|
29 |
@manager.route('/chat', methods=['POST'])
|
30 |
@token_required
|
31 |
def create(tenant_id):
|
32 |
req=request.json
|
33 |
+
ids= req.get("datasets")
|
34 |
if not ids:
|
35 |
+
return get_error_data_result(retmsg="`datasets` is required")
|
36 |
for kb_id in ids:
|
37 |
kbs = KnowledgebaseService.query(id=kb_id,tenant_id=tenant_id)
|
38 |
if not kbs:
|
|
|
46 |
if llm:
|
47 |
if "model_name" in llm:
|
48 |
req["llm_id"] = llm.pop("model_name")
|
49 |
+
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
50 |
+
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
51 |
req["llm_setting"] = req.pop("llm")
|
52 |
e, tenant = TenantService.get_by_id(tenant_id)
|
53 |
if not e:
|
|
|
76 |
req["top_n"] = req.get("top_n", 6)
|
77 |
req["top_k"] = req.get("top_k", 1024)
|
78 |
req["rerank_id"] = req.get("rerank_id", "")
|
79 |
+
if req.get("rerank_id"):
|
80 |
+
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
81 |
+
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
82 |
+
if not req.get("llm_id"):
|
83 |
req["llm_id"] = tenant.llm_id
|
84 |
if not req.get("name"):
|
85 |
return get_error_data_result(retmsg="`name` is required.")
|
|
|
138 |
res["llm"] = res.pop("llm_setting")
|
139 |
res["llm"]["model_name"] = res.pop("llm_id")
|
140 |
del res["kb_ids"]
|
141 |
+
res["datasets"] = req["datasets"]
|
142 |
res["avatar"] = res.pop("icon")
|
143 |
return get_result(data=res)
|
144 |
|
|
|
148 |
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
149 |
return get_error_data_result(retmsg='You do not own the chat')
|
150 |
req =request.json
|
151 |
+
ids = req.get("datasets")
|
152 |
+
if "datasets" in req:
|
153 |
+
if not ids:
|
154 |
+
return get_error_data_result("`datasets` can't be empty")
|
155 |
+
if ids:
|
156 |
+
for kb_id in ids:
|
157 |
+
kbs = KnowledgebaseService.query(id=kb_id, tenant_id=tenant_id)
|
158 |
+
if not kbs:
|
159 |
+
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
160 |
+
kb = kbs[0]
|
161 |
+
if kb.chunk_num == 0:
|
162 |
+
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
163 |
+
req["kb_ids"] = ids
|
164 |
llm = req.get("llm")
|
165 |
if llm:
|
166 |
if "model_name" in llm:
|
167 |
req["llm_id"] = llm.pop("model_name")
|
168 |
+
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
169 |
+
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
170 |
req["llm_setting"] = req.pop("llm")
|
171 |
e, tenant = TenantService.get_by_id(tenant_id)
|
172 |
if not e:
|
173 |
return get_error_data_result(retmsg="Tenant not found!")
|
174 |
+
if req.get("rerank_model"):
|
175 |
+
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_model"),model_type="rerank"):
|
176 |
+
return get_error_data_result(f"`rerank_model` {req.get('rerank_model')} doesn't exist")
|
177 |
# prompt
|
178 |
prompt = req.get("prompt")
|
179 |
key_mapping = {"parameters": "variables",
|
|
|
193 |
req["prompt_config"] = req.pop("prompt")
|
194 |
e, res = DialogService.get_by_id(chat_id)
|
195 |
res = res.to_json()
|
|
|
|
|
|
|
196 |
if "name" in req:
|
197 |
if not req.get("name"):
|
198 |
return get_error_data_result(retmsg="`name` is not empty.")
|
|
|
214 |
# avatar
|
215 |
if "avatar" in req:
|
216 |
req["icon"] = req.pop("avatar")
|
217 |
+
if "datasets" in req:
|
218 |
+
req.pop("datasets")
|
219 |
if not DialogService.update_by_id(chat_id, req):
|
220 |
return get_error_data_result(retmsg="Chat not found!")
|
221 |
return get_result()
|
|
|
284 |
return get_error_data_result(retmsg=f"Don't exist the kb {kb_id}")
|
285 |
kb_list.append(kb[0].to_json())
|
286 |
del res["kb_ids"]
|
287 |
+
res["datasets"] = kb_list
|
288 |
res["avatar"] = res.pop("icon")
|
289 |
list_assts.append(res)
|
290 |
return get_result(data=list_assts)
|
api/apps/sdk/dataset.py
CHANGED
@@ -15,17 +15,17 @@
|
|
15 |
#
|
16 |
|
17 |
from flask import request
|
18 |
-
|
19 |
from api.db import StatusEnum, FileSource
|
20 |
from api.db.db_models import File
|
21 |
from api.db.services.document_service import DocumentService
|
22 |
from api.db.services.file2document_service import File2DocumentService
|
23 |
from api.db.services.file_service import FileService
|
24 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
|
|
25 |
from api.db.services.user_service import TenantService
|
26 |
from api.settings import RetCode
|
27 |
from api.utils import get_uuid
|
28 |
-
from api.utils.api_utils import get_result, token_required, get_error_data_result, valid
|
29 |
|
30 |
|
31 |
@manager.route('/dataset', methods=['POST'])
|
@@ -36,15 +36,17 @@ def create(tenant_id):
|
|
36 |
permission = req.get("permission")
|
37 |
language = req.get("language")
|
38 |
chunk_method = req.get("chunk_method")
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
42 |
check_validation=valid(permission,valid_permission,language,valid_language,chunk_method,valid_chunk_method)
|
43 |
if check_validation:
|
44 |
return check_validation
|
45 |
-
|
|
|
46 |
return get_error_data_result(
|
47 |
-
retmsg="`tenant_id`
|
48 |
chunk_count=req.get("chunk_count")
|
49 |
document_count=req.get("document_count")
|
50 |
if chunk_count or document_count:
|
@@ -59,9 +61,13 @@ def create(tenant_id):
|
|
59 |
retmsg="`name` is not empty string!")
|
60 |
if KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
61 |
return get_error_data_result(
|
62 |
-
retmsg="Duplicated
|
63 |
req["tenant_id"] = req['created_by'] = tenant_id
|
64 |
-
|
|
|
|
|
|
|
|
|
65 |
key_mapping = {
|
66 |
"chunk_num": "chunk_count",
|
67 |
"doc_num": "document_count",
|
@@ -116,10 +122,12 @@ def update(tenant_id,dataset_id):
|
|
116 |
permission = req.get("permission")
|
117 |
language = req.get("language")
|
118 |
chunk_method = req.get("chunk_method")
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
|
123 |
if check_validation:
|
124 |
return check_validation
|
125 |
if "tenant_id" in req:
|
@@ -142,10 +150,16 @@ def update(tenant_id,dataset_id):
|
|
142 |
return get_error_data_result(
|
143 |
retmsg="If `chunk_count` is not 0, `chunk_method` is not changeable.")
|
144 |
req['parser_id'] = req.pop('chunk_method')
|
|
|
|
|
145 |
if "embedding_model" in req:
|
146 |
if kb.chunk_num != 0 and req['embedding_model'] != kb.embd_id:
|
147 |
return get_error_data_result(
|
148 |
retmsg="If `chunk_count` is not 0, `embedding_method` is not changeable.")
|
|
|
|
|
|
|
|
|
149 |
req['embd_id'] = req.pop('embedding_model')
|
150 |
if "name" in req:
|
151 |
req["name"] = req["name"].strip()
|
@@ -153,7 +167,7 @@ def update(tenant_id,dataset_id):
|
|
153 |
and len(KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id,
|
154 |
status=StatusEnum.VALID.value)) > 0:
|
155 |
return get_error_data_result(
|
156 |
-
retmsg="Duplicated
|
157 |
if not KnowledgebaseService.update_by_id(kb.id, req):
|
158 |
return get_error_data_result(retmsg="Update dataset error.(Database error)")
|
159 |
return get_result(retcode=RetCode.SUCCESS)
|
|
|
15 |
#
|
16 |
|
17 |
from flask import request
|
|
|
18 |
from api.db import StatusEnum, FileSource
|
19 |
from api.db.db_models import File
|
20 |
from api.db.services.document_service import DocumentService
|
21 |
from api.db.services.file2document_service import File2DocumentService
|
22 |
from api.db.services.file_service import FileService
|
23 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
24 |
+
from api.db.services.llm_service import TenantLLMService
|
25 |
from api.db.services.user_service import TenantService
|
26 |
from api.settings import RetCode
|
27 |
from api.utils import get_uuid
|
28 |
+
from api.utils.api_utils import get_result, token_required, get_error_data_result, valid,get_parser_config
|
29 |
|
30 |
|
31 |
@manager.route('/dataset', methods=['POST'])
|
|
|
36 |
permission = req.get("permission")
|
37 |
language = req.get("language")
|
38 |
chunk_method = req.get("chunk_method")
|
39 |
+
parser_config = req.get("parser_config")
|
40 |
+
valid_permission = {"me", "team"}
|
41 |
+
valid_language ={"Chinese", "English"}
|
42 |
+
valid_chunk_method = {"naive","manual","qa","table","paper","book","laws","presentation","picture","one","knowledge_graph","email"}
|
43 |
check_validation=valid(permission,valid_permission,language,valid_language,chunk_method,valid_chunk_method)
|
44 |
if check_validation:
|
45 |
return check_validation
|
46 |
+
req["parser_config"]=get_parser_config(chunk_method,parser_config)
|
47 |
+
if "tenant_id" in req:
|
48 |
return get_error_data_result(
|
49 |
+
retmsg="`tenant_id` must not be provided")
|
50 |
chunk_count=req.get("chunk_count")
|
51 |
document_count=req.get("document_count")
|
52 |
if chunk_count or document_count:
|
|
|
61 |
retmsg="`name` is not empty string!")
|
62 |
if KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
63 |
return get_error_data_result(
|
64 |
+
retmsg="Duplicated dataset name in creating dataset.")
|
65 |
req["tenant_id"] = req['created_by'] = tenant_id
|
66 |
+
if not req.get("embedding_model"):
|
67 |
+
req['embedding_model'] = t.embd_id
|
68 |
+
else:
|
69 |
+
if not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model")):
|
70 |
+
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
71 |
key_mapping = {
|
72 |
"chunk_num": "chunk_count",
|
73 |
"doc_num": "document_count",
|
|
|
122 |
permission = req.get("permission")
|
123 |
language = req.get("language")
|
124 |
chunk_method = req.get("chunk_method")
|
125 |
+
parser_config = req.get("parser_config")
|
126 |
+
valid_permission = {"me", "team"}
|
127 |
+
valid_language = {"Chinese", "English"}
|
128 |
+
valid_chunk_method = {"naive", "manual", "qa", "table", "paper", "book", "laws", "presentation", "picture", "one",
|
129 |
+
"knowledge_graph", "email"}
|
130 |
+
check_validation = valid(permission, valid_permission, language, valid_language, chunk_method, valid_chunk_method)
|
131 |
if check_validation:
|
132 |
return check_validation
|
133 |
if "tenant_id" in req:
|
|
|
150 |
return get_error_data_result(
|
151 |
retmsg="If `chunk_count` is not 0, `chunk_method` is not changeable.")
|
152 |
req['parser_id'] = req.pop('chunk_method')
|
153 |
+
if req['parser_id'] != kb.parser_id:
|
154 |
+
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
155 |
if "embedding_model" in req:
|
156 |
if kb.chunk_num != 0 and req['embedding_model'] != kb.embd_id:
|
157 |
return get_error_data_result(
|
158 |
retmsg="If `chunk_count` is not 0, `embedding_method` is not changeable.")
|
159 |
+
if not req.get("embedding_model"):
|
160 |
+
return get_error_data_result("`embedding_model` can't be empty")
|
161 |
+
if not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model")):
|
162 |
+
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
163 |
req['embd_id'] = req.pop('embedding_model')
|
164 |
if "name" in req:
|
165 |
req["name"] = req["name"].strip()
|
|
|
167 |
and len(KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id,
|
168 |
status=StatusEnum.VALID.value)) > 0:
|
169 |
return get_error_data_result(
|
170 |
+
retmsg="Duplicated dataset name in updating dataset.")
|
171 |
if not KnowledgebaseService.update_by_id(kb.id, req):
|
172 |
return get_error_data_result(retmsg="Update dataset error.(Database error)")
|
173 |
return get_result(retcode=RetCode.SUCCESS)
|
api/apps/sdk/doc.py
CHANGED
@@ -39,7 +39,7 @@ from api.db.services.file2document_service import File2DocumentService
|
|
39 |
from api.db.services.file_service import FileService
|
40 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
41 |
from api.settings import RetCode, retrievaler
|
42 |
-
from api.utils.api_utils import construct_json_result
|
43 |
from rag.nlp import search
|
44 |
from rag.utils import rmSpace
|
45 |
from rag.utils.es_conn import ELASTICSEARCH
|
@@ -49,6 +49,10 @@ MAXIMUM_OF_UPLOADING_FILES = 256
|
|
49 |
|
50 |
MAXIMUM_OF_UPLOADING_FILES = 256
|
51 |
|
|
|
|
|
|
|
|
|
52 |
|
53 |
@manager.route('/dataset/<dataset_id>/document', methods=['POST'])
|
54 |
@token_required
|
@@ -61,14 +65,41 @@ def upload(dataset_id, tenant_id):
|
|
61 |
if file_obj.filename == '':
|
62 |
return get_result(
|
63 |
retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
65 |
if not e:
|
66 |
-
raise LookupError(f"Can't find the
|
67 |
-
err,
|
68 |
if err:
|
69 |
return get_result(
|
70 |
retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
|
74 |
@manager.route('/dataset/<dataset_id>/info/<document_id>', methods=['PUT'])
|
@@ -97,7 +128,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
|
97 |
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
98 |
if d.name == req["name"]:
|
99 |
return get_error_data_result(
|
100 |
-
retmsg="Duplicated document name in the same
|
101 |
if not DocumentService.update_by_id(
|
102 |
document_id, {"name": req["name"]}):
|
103 |
return get_error_data_result(
|
@@ -110,6 +141,9 @@ def update_doc(tenant_id, dataset_id, document_id):
|
|
110 |
if "parser_config" in req:
|
111 |
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
112 |
if "chunk_method" in req:
|
|
|
|
|
|
|
113 |
if doc.parser_id.lower() == req["chunk_method"].lower():
|
114 |
return get_result()
|
115 |
|
@@ -122,6 +156,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
|
122 |
"run": TaskStatus.UNSTART.value})
|
123 |
if not e:
|
124 |
return get_error_data_result(retmsg="Document not found!")
|
|
|
125 |
if doc.token_num > 0:
|
126 |
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
127 |
doc.process_duation * -1)
|
@@ -182,12 +217,21 @@ def list_docs(dataset_id, tenant_id):
|
|
182 |
for doc in docs:
|
183 |
key_mapping = {
|
184 |
"chunk_num": "chunk_count",
|
185 |
-
"kb_id": "
|
186 |
"token_num": "token_count",
|
187 |
"parser_id": "chunk_method"
|
188 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
renamed_doc = {}
|
190 |
for key, value in doc.items():
|
|
|
|
|
191 |
new_key = key_mapping.get(key, key)
|
192 |
renamed_doc[new_key] = value
|
193 |
renamed_doc_list.append(renamed_doc)
|
@@ -353,9 +397,10 @@ def list_chunks(tenant_id,dataset_id,document_id):
|
|
353 |
return get_result(data=res)
|
354 |
|
355 |
|
|
|
356 |
@manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['POST'])
|
357 |
@token_required
|
358 |
-
def
|
359 |
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
360 |
return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
|
361 |
doc = DocumentService.query(id=document_id, kb_id=dataset_id)
|
@@ -441,6 +486,7 @@ def rm_chunk(tenant_id,dataset_id,document_id):
|
|
441 |
return get_result()
|
442 |
|
443 |
|
|
|
444 |
@manager.route('/dataset/<dataset_id>/document/<document_id>/chunk/<chunk_id>', methods=['PUT'])
|
445 |
@token_required
|
446 |
def update_chunk(tenant_id,dataset_id,document_id,chunk_id):
|
@@ -470,12 +516,12 @@ def update_chunk(tenant_id,dataset_id,document_id,chunk_id):
|
|
470 |
d["content_ltks"] = rag_tokenizer.tokenize(d["content_with_weight"])
|
471 |
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
472 |
if "important_keywords" in req:
|
473 |
-
if
|
474 |
-
return get_error_data_result("`important_keywords`
|
475 |
d["important_kwd"] = req.get("important_keywords")
|
476 |
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
|
477 |
if "available" in req:
|
478 |
-
d["available_int"] = req["available"]
|
479 |
embd_id = DocumentService.get_embd_id(document_id)
|
480 |
embd_mdl = TenantLLMService.model_instance(
|
481 |
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
@@ -498,6 +544,7 @@ def update_chunk(tenant_id,dataset_id,document_id,chunk_id):
|
|
498 |
return get_result()
|
499 |
|
500 |
|
|
|
501 |
@manager.route('/retrieval', methods=['POST'])
|
502 |
@token_required
|
503 |
def retrieval_test(tenant_id):
|
@@ -505,6 +552,8 @@ def retrieval_test(tenant_id):
|
|
505 |
if not req.get("datasets"):
|
506 |
return get_error_data_result("`datasets` is required.")
|
507 |
kb_ids = req["datasets"]
|
|
|
|
|
508 |
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
509 |
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
510 |
if len(embd_nms) != 1:
|
@@ -518,9 +567,15 @@ def retrieval_test(tenant_id):
|
|
518 |
if "question" not in req:
|
519 |
return get_error_data_result("`question` is required.")
|
520 |
page = int(req.get("offset", 1))
|
521 |
-
size = int(req.get("limit",
|
522 |
question = req["question"]
|
523 |
doc_ids = req.get("documents", [])
|
|
|
|
|
|
|
|
|
|
|
|
|
524 |
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
525 |
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
526 |
top = int(req.get("top_k", 1024))
|
@@ -531,7 +586,7 @@ def retrieval_test(tenant_id):
|
|
531 |
try:
|
532 |
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
533 |
if not e:
|
534 |
-
return get_error_data_result(retmsg="
|
535 |
embd_mdl = TenantLLMService.model_instance(
|
536 |
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
537 |
|
|
|
39 |
from api.db.services.file_service import FileService
|
40 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
41 |
from api.settings import RetCode, retrievaler
|
42 |
+
from api.utils.api_utils import construct_json_result,get_parser_config
|
43 |
from rag.nlp import search
|
44 |
from rag.utils import rmSpace
|
45 |
from rag.utils.es_conn import ELASTICSEARCH
|
|
|
49 |
|
50 |
MAXIMUM_OF_UPLOADING_FILES = 256
|
51 |
|
52 |
+
MAXIMUM_OF_UPLOADING_FILES = 256
|
53 |
+
|
54 |
+
MAXIMUM_OF_UPLOADING_FILES = 256
|
55 |
+
|
56 |
|
57 |
@manager.route('/dataset/<dataset_id>/document', methods=['POST'])
|
58 |
@token_required
|
|
|
65 |
if file_obj.filename == '':
|
66 |
return get_result(
|
67 |
retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
68 |
+
# total size
|
69 |
+
total_size = 0
|
70 |
+
for file_obj in file_objs:
|
71 |
+
file_obj.seek(0, os.SEEK_END)
|
72 |
+
total_size += file_obj.tell()
|
73 |
+
file_obj.seek(0)
|
74 |
+
MAX_TOTAL_FILE_SIZE=10*1024*1024
|
75 |
+
if total_size > MAX_TOTAL_FILE_SIZE:
|
76 |
+
return get_result(
|
77 |
+
retmsg=f'Total file size exceeds 10MB limit! ({total_size / (1024 * 1024):.2f} MB)',
|
78 |
+
retcode=RetCode.ARGUMENT_ERROR)
|
79 |
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
80 |
if not e:
|
81 |
+
raise LookupError(f"Can't find the dataset with ID {dataset_id}!")
|
82 |
+
err, files= FileService.upload_document(kb, file_objs, tenant_id)
|
83 |
if err:
|
84 |
return get_result(
|
85 |
retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
86 |
+
# rename key's name
|
87 |
+
renamed_doc_list = []
|
88 |
+
for file in files:
|
89 |
+
doc = file[0]
|
90 |
+
key_mapping = {
|
91 |
+
"chunk_num": "chunk_count",
|
92 |
+
"kb_id": "dataset_id",
|
93 |
+
"token_num": "token_count",
|
94 |
+
"parser_id": "chunk_method"
|
95 |
+
}
|
96 |
+
renamed_doc = {}
|
97 |
+
for key, value in doc.items():
|
98 |
+
new_key = key_mapping.get(key, key)
|
99 |
+
renamed_doc[new_key] = value
|
100 |
+
renamed_doc["run"] = "UNSTART"
|
101 |
+
renamed_doc_list.append(renamed_doc)
|
102 |
+
return get_result(data=renamed_doc_list)
|
103 |
|
104 |
|
105 |
@manager.route('/dataset/<dataset_id>/info/<document_id>', methods=['PUT'])
|
|
|
128 |
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
129 |
if d.name == req["name"]:
|
130 |
return get_error_data_result(
|
131 |
+
retmsg="Duplicated document name in the same dataset.")
|
132 |
if not DocumentService.update_by_id(
|
133 |
document_id, {"name": req["name"]}):
|
134 |
return get_error_data_result(
|
|
|
141 |
if "parser_config" in req:
|
142 |
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
143 |
if "chunk_method" in req:
|
144 |
+
valid_chunk_method = {"naive","manual","qa","table","paper","book","laws","presentation","picture","one","knowledge_graph","email"}
|
145 |
+
if req.get("chunk_method") not in valid_chunk_method:
|
146 |
+
return get_error_data_result(f"`chunk_method` {req['chunk_method']} doesn't exist")
|
147 |
if doc.parser_id.lower() == req["chunk_method"].lower():
|
148 |
return get_result()
|
149 |
|
|
|
156 |
"run": TaskStatus.UNSTART.value})
|
157 |
if not e:
|
158 |
return get_error_data_result(retmsg="Document not found!")
|
159 |
+
req["parser_config"] = get_parser_config(req["chunk_method"], req.get("parser_config"))
|
160 |
if doc.token_num > 0:
|
161 |
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
162 |
doc.process_duation * -1)
|
|
|
217 |
for doc in docs:
|
218 |
key_mapping = {
|
219 |
"chunk_num": "chunk_count",
|
220 |
+
"kb_id": "dataset_id",
|
221 |
"token_num": "token_count",
|
222 |
"parser_id": "chunk_method"
|
223 |
}
|
224 |
+
run_mapping = {
|
225 |
+
"0" :"UNSTART",
|
226 |
+
"1":"RUNNING",
|
227 |
+
"2":"CANCEL",
|
228 |
+
"3":"DONE",
|
229 |
+
"4":"FAIL"
|
230 |
+
}
|
231 |
renamed_doc = {}
|
232 |
for key, value in doc.items():
|
233 |
+
if key =="run":
|
234 |
+
renamed_doc["run"]=run_mapping.get(str(value))
|
235 |
new_key = key_mapping.get(key, key)
|
236 |
renamed_doc[new_key] = value
|
237 |
renamed_doc_list.append(renamed_doc)
|
|
|
397 |
return get_result(data=res)
|
398 |
|
399 |
|
400 |
+
|
401 |
@manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['POST'])
|
402 |
@token_required
|
403 |
+
def add_chunk(tenant_id,dataset_id,document_id):
|
404 |
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
405 |
return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
|
406 |
doc = DocumentService.query(id=document_id, kb_id=dataset_id)
|
|
|
486 |
return get_result()
|
487 |
|
488 |
|
489 |
+
|
490 |
@manager.route('/dataset/<dataset_id>/document/<document_id>/chunk/<chunk_id>', methods=['PUT'])
|
491 |
@token_required
|
492 |
def update_chunk(tenant_id,dataset_id,document_id,chunk_id):
|
|
|
516 |
d["content_ltks"] = rag_tokenizer.tokenize(d["content_with_weight"])
|
517 |
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
518 |
if "important_keywords" in req:
|
519 |
+
if not isinstance(req["important_keywords"],list):
|
520 |
+
return get_error_data_result("`important_keywords` should be a list")
|
521 |
d["important_kwd"] = req.get("important_keywords")
|
522 |
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
|
523 |
if "available" in req:
|
524 |
+
d["available_int"] = int(req["available"])
|
525 |
embd_id = DocumentService.get_embd_id(document_id)
|
526 |
embd_mdl = TenantLLMService.model_instance(
|
527 |
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
|
|
544 |
return get_result()
|
545 |
|
546 |
|
547 |
+
|
548 |
@manager.route('/retrieval', methods=['POST'])
|
549 |
@token_required
|
550 |
def retrieval_test(tenant_id):
|
|
|
552 |
if not req.get("datasets"):
|
553 |
return get_error_data_result("`datasets` is required.")
|
554 |
kb_ids = req["datasets"]
|
555 |
+
if not isinstance(kb_ids,list):
|
556 |
+
return get_error_data_result("`datasets` should be a list")
|
557 |
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
558 |
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
559 |
if len(embd_nms) != 1:
|
|
|
567 |
if "question" not in req:
|
568 |
return get_error_data_result("`question` is required.")
|
569 |
page = int(req.get("offset", 1))
|
570 |
+
size = int(req.get("limit", 1024))
|
571 |
question = req["question"]
|
572 |
doc_ids = req.get("documents", [])
|
573 |
+
if not isinstance(req.get("documents"),list):
|
574 |
+
return get_error_data_result("`documents` should be a list")
|
575 |
+
doc_ids_list=KnowledgebaseService.list_documents_by_ids(kb_ids)
|
576 |
+
for doc_id in doc_ids:
|
577 |
+
if doc_id not in doc_ids_list:
|
578 |
+
return get_error_data_result(f"You don't own the document {doc_id}")
|
579 |
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
580 |
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
581 |
top = int(req.get("top_k", 1024))
|
|
|
586 |
try:
|
587 |
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
588 |
if not e:
|
589 |
+
return get_error_data_result(retmsg="Dataset not found!")
|
590 |
embd_mdl = TenantLLMService.model_instance(
|
591 |
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
592 |
|
api/apps/sdk/session.py
CHANGED
@@ -199,7 +199,7 @@ def list(chat_id,tenant_id):
|
|
199 |
"content": chunk["content_with_weight"],
|
200 |
"document_id": chunk["doc_id"],
|
201 |
"document_name": chunk["docnm_kwd"],
|
202 |
-
"
|
203 |
"image_id": chunk["img_id"],
|
204 |
"similarity": chunk["similarity"],
|
205 |
"vector_similarity": chunk["vector_similarity"],
|
|
|
199 |
"content": chunk["content_with_weight"],
|
200 |
"document_id": chunk["doc_id"],
|
201 |
"document_name": chunk["docnm_kwd"],
|
202 |
+
"dataset_id": chunk["kb_id"],
|
203 |
"image_id": chunk["img_id"],
|
204 |
"similarity": chunk["similarity"],
|
205 |
"vector_similarity": chunk["vector_similarity"],
|
api/db/services/knowledgebase_service.py
CHANGED
@@ -14,13 +14,23 @@
|
|
14 |
# limitations under the License.
|
15 |
#
|
16 |
from api.db import StatusEnum, TenantPermission
|
17 |
-
from api.db.db_models import Knowledgebase, DB, Tenant, User, UserTenant
|
18 |
from api.db.services.common_service import CommonService
|
19 |
|
20 |
|
21 |
class KnowledgebaseService(CommonService):
|
22 |
model = Knowledgebase
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
@classmethod
|
25 |
@DB.connection_context()
|
26 |
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
|
|
14 |
# limitations under the License.
|
15 |
#
|
16 |
from api.db import StatusEnum, TenantPermission
|
17 |
+
from api.db.db_models import Knowledgebase, DB, Tenant, User, UserTenant,Document
|
18 |
from api.db.services.common_service import CommonService
|
19 |
|
20 |
|
21 |
class KnowledgebaseService(CommonService):
|
22 |
model = Knowledgebase
|
23 |
|
24 |
+
@classmethod
|
25 |
+
@DB.connection_context()
|
26 |
+
def list_documents_by_ids(cls,kb_ids):
|
27 |
+
doc_ids=cls.model.select(Document.id.alias("document_id")).join(Document,on=(cls.model.id == Document.kb_id)).where(
|
28 |
+
cls.model.id.in_(kb_ids)
|
29 |
+
)
|
30 |
+
doc_ids =list(doc_ids.dicts())
|
31 |
+
doc_ids = [doc["document_id"] for doc in doc_ids]
|
32 |
+
return doc_ids
|
33 |
+
|
34 |
@classmethod
|
35 |
@DB.connection_context()
|
36 |
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
api/utils/api_utils.py
CHANGED
@@ -337,4 +337,23 @@ def valid(permission,valid_permission,language,valid_language,chunk_method,valid
|
|
337 |
|
338 |
def valid_parameter(parameter,valid_values):
|
339 |
if parameter and parameter not in valid_values:
|
340 |
-
return get_error_data_result(f"{parameter} not in {valid_values}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
337 |
|
338 |
def valid_parameter(parameter,valid_values):
|
339 |
if parameter and parameter not in valid_values:
|
340 |
+
return get_error_data_result(f"{parameter} not in {valid_values}")
|
341 |
+
|
342 |
+
def get_parser_config(chunk_method,parser_config):
|
343 |
+
if parser_config:
|
344 |
+
return parser_config
|
345 |
+
if not chunk_method:
|
346 |
+
chunk_method = "naive"
|
347 |
+
key_mapping={"naive":{"chunk_token_num": 128, "delimiter": "\\n!?;。;!?", "html4excel": False,"layout_recognize": True, "raptor": {"user_raptor": False}},
|
348 |
+
"qa":{"raptor":{"use_raptor":False}},
|
349 |
+
"resume":None,
|
350 |
+
"manual":{"raptor":{"use_raptor":False}},
|
351 |
+
"table":None,
|
352 |
+
"paper":{"raptor":{"use_raptor":False}},
|
353 |
+
"book":{"raptor":{"use_raptor":False}},
|
354 |
+
"laws":{"raptor":{"use_raptor":False}},
|
355 |
+
"presentation":{"raptor":{"use_raptor":False}},
|
356 |
+
"one":None,
|
357 |
+
"knowledge_graph":{"chunk_token_num":8192,"delimiter":"\\n!?;。;!?","entity_types":["organization","person","location","event","time"]}}
|
358 |
+
parser_config=key_mapping[chunk_method]
|
359 |
+
return parser_config
|
sdk/python/ragflow/modules/chat.py
CHANGED
@@ -9,7 +9,7 @@ class Chat(Base):
|
|
9 |
self.id = ""
|
10 |
self.name = "assistant"
|
11 |
self.avatar = "path/to/avatar"
|
12 |
-
self.
|
13 |
self.llm = Chat.LLM(rag, {})
|
14 |
self.prompt = Chat.Prompt(rag, {})
|
15 |
super().__init__(rag, res_dict)
|
|
|
9 |
self.id = ""
|
10 |
self.name = "assistant"
|
11 |
self.avatar = "path/to/avatar"
|
12 |
+
self.datasets = ["kb1"]
|
13 |
self.llm = Chat.LLM(rag, {})
|
14 |
self.prompt = Chat.Prompt(rag, {})
|
15 |
super().__init__(rag, res_dict)
|
sdk/python/ragflow/modules/chunk.py
CHANGED
@@ -8,10 +8,10 @@ class Chunk(Base):
|
|
8 |
self.important_keywords = []
|
9 |
self.create_time = ""
|
10 |
self.create_timestamp = 0.0
|
11 |
-
self.
|
12 |
self.document_name = ""
|
13 |
self.document_id = ""
|
14 |
-
self.available =
|
15 |
for k in list(res_dict.keys()):
|
16 |
if k not in self.__dict__:
|
17 |
res_dict.pop(k)
|
@@ -19,7 +19,7 @@ class Chunk(Base):
|
|
19 |
|
20 |
|
21 |
def update(self,update_message:dict):
|
22 |
-
res = self.put(f"/dataset/{self.
|
23 |
res = res.json()
|
24 |
if res.get("code") != 0 :
|
25 |
raise Exception(res["message"])
|
|
|
8 |
self.important_keywords = []
|
9 |
self.create_time = ""
|
10 |
self.create_timestamp = 0.0
|
11 |
+
self.dataset_id = None
|
12 |
self.document_name = ""
|
13 |
self.document_id = ""
|
14 |
+
self.available = True
|
15 |
for k in list(res_dict.keys()):
|
16 |
if k not in self.__dict__:
|
17 |
res_dict.pop(k)
|
|
|
19 |
|
20 |
|
21 |
def update(self,update_message:dict):
|
22 |
+
res = self.put(f"/dataset/{self.dataset_id}/document/{self.document_id}/chunk/{self.id}",update_message)
|
23 |
res = res.json()
|
24 |
if res.get("code") != 0 :
|
25 |
raise Exception(res["message"])
|
sdk/python/ragflow/modules/dataset.py
CHANGED
@@ -10,10 +10,6 @@ from .base import Base
|
|
10 |
class DataSet(Base):
|
11 |
class ParserConfig(Base):
|
12 |
def __init__(self, rag, res_dict):
|
13 |
-
self.chunk_token_count = 128
|
14 |
-
self.layout_recognize = True
|
15 |
-
self.delimiter = '\n!?。;!?'
|
16 |
-
self.task_page_size = 12
|
17 |
super().__init__(rag, res_dict)
|
18 |
|
19 |
def __init__(self, rag, res_dict):
|
@@ -43,11 +39,16 @@ class DataSet(Base):
|
|
43 |
|
44 |
def upload_documents(self,document_list: List[dict]):
|
45 |
url = f"/dataset/{self.id}/document"
|
46 |
-
files = [("file",(ele["
|
47 |
res = self.post(path=url,json=None,files=files)
|
48 |
res = res.json()
|
49 |
-
if res.get("code")
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
def list_documents(self, id: str = None, keywords: str = None, offset: int =1, limit: int = 1024, orderby: str = "create_time", desc: bool = True):
|
53 |
res = self.get(f"/dataset/{self.id}/info",params={"id": id,"keywords": keywords,"offset": offset,"limit": limit,"orderby": orderby,"desc": desc})
|
|
|
10 |
class DataSet(Base):
|
11 |
class ParserConfig(Base):
|
12 |
def __init__(self, rag, res_dict):
|
|
|
|
|
|
|
|
|
13 |
super().__init__(rag, res_dict)
|
14 |
|
15 |
def __init__(self, rag, res_dict):
|
|
|
39 |
|
40 |
def upload_documents(self,document_list: List[dict]):
|
41 |
url = f"/dataset/{self.id}/document"
|
42 |
+
files = [("file",(ele["displayed_name"],ele["blob"])) for ele in document_list]
|
43 |
res = self.post(path=url,json=None,files=files)
|
44 |
res = res.json()
|
45 |
+
if res.get("code") == 0:
|
46 |
+
doc_list=[]
|
47 |
+
for doc in res["data"]:
|
48 |
+
document = Document(self.rag,doc)
|
49 |
+
doc_list.append(document)
|
50 |
+
return doc_list
|
51 |
+
raise Exception(res.get("message"))
|
52 |
|
53 |
def list_documents(self, id: str = None, keywords: str = None, offset: int =1, limit: int = 1024, orderby: str = "create_time", desc: bool = True):
|
54 |
res = self.get(f"/dataset/{self.id}/info",params={"id": id,"keywords": keywords,"offset": offset,"limit": limit,"orderby": orderby,"desc": desc})
|
sdk/python/ragflow/modules/document.py
CHANGED
@@ -5,12 +5,16 @@ from typing import List
|
|
5 |
|
6 |
|
7 |
class Document(Base):
|
|
|
|
|
|
|
|
|
8 |
def __init__(self, rag, res_dict):
|
9 |
self.id = ""
|
10 |
self.name = ""
|
11 |
self.thumbnail = None
|
12 |
-
self.
|
13 |
-
self.chunk_method = ""
|
14 |
self.parser_config = {"pages": [[1, 1000000]]}
|
15 |
self.source_type = "local"
|
16 |
self.type = ""
|
@@ -31,14 +35,14 @@ class Document(Base):
|
|
31 |
|
32 |
|
33 |
def update(self, update_message: dict):
|
34 |
-
res = self.put(f'/dataset/{self.
|
35 |
update_message)
|
36 |
res = res.json()
|
37 |
if res.get("code") != 0:
|
38 |
raise Exception(res["message"])
|
39 |
|
40 |
def download(self):
|
41 |
-
res = self.get(f"/dataset/{self.
|
42 |
try:
|
43 |
res = res.json()
|
44 |
raise Exception(res.get("message"))
|
@@ -48,7 +52,7 @@ class Document(Base):
|
|
48 |
|
49 |
def list_chunks(self,offset=0, limit=30, keywords="", id:str=None):
|
50 |
data={"document_id": self.id,"keywords": keywords,"offset":offset,"limit":limit,"id":id}
|
51 |
-
res = self.get(f'/dataset/{self.
|
52 |
res = res.json()
|
53 |
if res.get("code") == 0:
|
54 |
chunks=[]
|
@@ -59,15 +63,15 @@ class Document(Base):
|
|
59 |
raise Exception(res.get("message"))
|
60 |
|
61 |
|
62 |
-
def add_chunk(self, content: str):
|
63 |
-
res = self.post(f'/dataset/{self.
|
64 |
res = res.json()
|
65 |
if res.get("code") == 0:
|
66 |
return Chunk(self.rag,res["data"].get("chunk"))
|
67 |
raise Exception(res.get("message"))
|
68 |
|
69 |
def delete_chunks(self,ids:List[str]):
|
70 |
-
res = self.rm(f"dataset/{self.
|
71 |
res = res.json()
|
72 |
if res.get("code")!=0:
|
73 |
raise Exception(res.get("message"))
|
|
|
5 |
|
6 |
|
7 |
class Document(Base):
|
8 |
+
class ParserConfig(Base):
|
9 |
+
def __init__(self, rag, res_dict):
|
10 |
+
super().__init__(rag, res_dict)
|
11 |
+
|
12 |
def __init__(self, rag, res_dict):
|
13 |
self.id = ""
|
14 |
self.name = ""
|
15 |
self.thumbnail = None
|
16 |
+
self.dataset_id = None
|
17 |
+
self.chunk_method = "naive"
|
18 |
self.parser_config = {"pages": [[1, 1000000]]}
|
19 |
self.source_type = "local"
|
20 |
self.type = ""
|
|
|
35 |
|
36 |
|
37 |
def update(self, update_message: dict):
|
38 |
+
res = self.put(f'/dataset/{self.dataset_id}/info/{self.id}',
|
39 |
update_message)
|
40 |
res = res.json()
|
41 |
if res.get("code") != 0:
|
42 |
raise Exception(res["message"])
|
43 |
|
44 |
def download(self):
|
45 |
+
res = self.get(f"/dataset/{self.dataset_id}/document/{self.id}")
|
46 |
try:
|
47 |
res = res.json()
|
48 |
raise Exception(res.get("message"))
|
|
|
52 |
|
53 |
def list_chunks(self,offset=0, limit=30, keywords="", id:str=None):
|
54 |
data={"document_id": self.id,"keywords": keywords,"offset":offset,"limit":limit,"id":id}
|
55 |
+
res = self.get(f'/dataset/{self.dataset_id}/document/{self.id}/chunk', data)
|
56 |
res = res.json()
|
57 |
if res.get("code") == 0:
|
58 |
chunks=[]
|
|
|
63 |
raise Exception(res.get("message"))
|
64 |
|
65 |
|
66 |
+
def add_chunk(self, content: str,important_keywords:List[str]=[]):
|
67 |
+
res = self.post(f'/dataset/{self.dataset_id}/document/{self.id}/chunk', {"content":content,"important_keywords":important_keywords})
|
68 |
res = res.json()
|
69 |
if res.get("code") == 0:
|
70 |
return Chunk(self.rag,res["data"].get("chunk"))
|
71 |
raise Exception(res.get("message"))
|
72 |
|
73 |
def delete_chunks(self,ids:List[str]):
|
74 |
+
res = self.rm(f"dataset/{self.dataset_id}/document/{self.id}/chunk",{"ids":ids})
|
75 |
res = res.json()
|
76 |
if res.get("code")!=0:
|
77 |
raise Exception(res.get("message"))
|
sdk/python/ragflow/modules/session.py
CHANGED
@@ -40,7 +40,7 @@ class Session(Base):
|
|
40 |
"content": chunk["content_with_weight"],
|
41 |
"document_id": chunk["doc_id"],
|
42 |
"document_name": chunk["docnm_kwd"],
|
43 |
-
"
|
44 |
"image_id": chunk["img_id"],
|
45 |
"similarity": chunk["similarity"],
|
46 |
"vector_similarity": chunk["vector_similarity"],
|
@@ -75,7 +75,7 @@ class Chunk(Base):
|
|
75 |
self.content = None
|
76 |
self.document_id = ""
|
77 |
self.document_name = ""
|
78 |
-
self.
|
79 |
self.image_id = ""
|
80 |
self.similarity = None
|
81 |
self.vector_similarity = None
|
|
|
40 |
"content": chunk["content_with_weight"],
|
41 |
"document_id": chunk["doc_id"],
|
42 |
"document_name": chunk["docnm_kwd"],
|
43 |
+
"dataset_id": chunk["kb_id"],
|
44 |
"image_id": chunk["img_id"],
|
45 |
"similarity": chunk["similarity"],
|
46 |
"vector_similarity": chunk["vector_similarity"],
|
|
|
75 |
self.content = None
|
76 |
self.document_id = ""
|
77 |
self.document_name = ""
|
78 |
+
self.dataset_id = ""
|
79 |
self.image_id = ""
|
80 |
self.similarity = None
|
81 |
self.vector_similarity = None
|
sdk/python/ragflow/ragflow.py
CHANGED
@@ -49,17 +49,11 @@ class RAGFlow:
|
|
49 |
return res
|
50 |
|
51 |
def create_dataset(self, name: str, avatar: str = "", description: str = "", language: str = "English",
|
52 |
-
permission: str = "me",
|
53 |
-
document_count: int = 0, chunk_count: int = 0, chunk_method: str = "naive",
|
54 |
parser_config: DataSet.ParserConfig = None) -> DataSet:
|
55 |
-
if parser_config is None:
|
56 |
-
parser_config = DataSet.ParserConfig(self, {"chunk_token_count": 128, "layout_recognize": True,
|
57 |
-
"delimiter": "\n!?。;!?", "task_page_size": 12})
|
58 |
-
parser_config = parser_config.to_json()
|
59 |
res = self.post("/dataset",
|
60 |
{"name": name, "avatar": avatar, "description": description, "language": language,
|
61 |
-
"permission": permission,
|
62 |
-
"document_count": document_count, "chunk_count": chunk_count, "chunk_method": chunk_method,
|
63 |
"parser_config": parser_config
|
64 |
}
|
65 |
)
|
@@ -93,11 +87,11 @@ class RAGFlow:
|
|
93 |
return result_list
|
94 |
raise Exception(res["message"])
|
95 |
|
96 |
-
def create_chat(self, name: str, avatar: str = "",
|
97 |
llm: Chat.LLM = None, prompt: Chat.Prompt = None) -> Chat:
|
98 |
-
|
99 |
-
for dataset in
|
100 |
-
|
101 |
|
102 |
if llm is None:
|
103 |
llm = Chat.LLM(self, {"model_name": None,
|
@@ -130,7 +124,7 @@ class RAGFlow:
|
|
130 |
|
131 |
temp_dict = {"name": name,
|
132 |
"avatar": avatar,
|
133 |
-
"
|
134 |
"llm": llm.to_json(),
|
135 |
"prompt": prompt.to_json()}
|
136 |
res = self.post("/chat", temp_dict)
|
@@ -158,25 +152,22 @@ class RAGFlow:
|
|
158 |
raise Exception(res["message"])
|
159 |
|
160 |
|
161 |
-
def retrieve(self, question="",
|
162 |
-
|
163 |
"offset": offset,
|
164 |
"limit": limit,
|
165 |
"similarity_threshold": similarity_threshold,
|
166 |
"vector_similarity_weight": vector_similarity_weight,
|
167 |
"top_k": top_k,
|
168 |
-
"
|
169 |
-
"
|
170 |
-
"keyword":keyword
|
171 |
-
}
|
172 |
-
data_json ={
|
173 |
"question": question,
|
174 |
"datasets": datasets,
|
175 |
"documents": documents
|
176 |
}
|
177 |
|
178 |
# Send a POST request to the backend service (using requests library as an example, actual implementation may vary)
|
179 |
-
res = self.
|
180 |
res = res.json()
|
181 |
if res.get("code") ==0:
|
182 |
chunks=[]
|
|
|
49 |
return res
|
50 |
|
51 |
def create_dataset(self, name: str, avatar: str = "", description: str = "", language: str = "English",
|
52 |
+
permission: str = "me",chunk_method: str = "naive",
|
|
|
53 |
parser_config: DataSet.ParserConfig = None) -> DataSet:
|
|
|
|
|
|
|
|
|
54 |
res = self.post("/dataset",
|
55 |
{"name": name, "avatar": avatar, "description": description, "language": language,
|
56 |
+
"permission": permission, "chunk_method": chunk_method,
|
|
|
57 |
"parser_config": parser_config
|
58 |
}
|
59 |
)
|
|
|
87 |
return result_list
|
88 |
raise Exception(res["message"])
|
89 |
|
90 |
+
def create_chat(self, name: str, avatar: str = "", datasets: List[DataSet] = [],
|
91 |
llm: Chat.LLM = None, prompt: Chat.Prompt = None) -> Chat:
|
92 |
+
dataset_list = []
|
93 |
+
for dataset in datasets:
|
94 |
+
dataset_list.append(dataset.to_json())
|
95 |
|
96 |
if llm is None:
|
97 |
llm = Chat.LLM(self, {"model_name": None,
|
|
|
124 |
|
125 |
temp_dict = {"name": name,
|
126 |
"avatar": avatar,
|
127 |
+
"datasets": dataset_list,
|
128 |
"llm": llm.to_json(),
|
129 |
"prompt": prompt.to_json()}
|
130 |
res = self.post("/chat", temp_dict)
|
|
|
152 |
raise Exception(res["message"])
|
153 |
|
154 |
|
155 |
+
def retrieve(self, datasets,documents,question="", offset=1, limit=1024, similarity_threshold=0.2,vector_similarity_weight=0.3,top_k=1024,rerank_id:str=None,keyword:bool=False,):
|
156 |
+
data_json ={
|
157 |
"offset": offset,
|
158 |
"limit": limit,
|
159 |
"similarity_threshold": similarity_threshold,
|
160 |
"vector_similarity_weight": vector_similarity_weight,
|
161 |
"top_k": top_k,
|
162 |
+
"rerank_id": rerank_id,
|
163 |
+
"keyword": keyword,
|
|
|
|
|
|
|
164 |
"question": question,
|
165 |
"datasets": datasets,
|
166 |
"documents": documents
|
167 |
}
|
168 |
|
169 |
# Send a POST request to the backend service (using requests library as an example, actual implementation may vary)
|
170 |
+
res = self.post(f'/retrieval',json=data_json)
|
171 |
res = res.json()
|
172 |
if res.get("code") ==0:
|
173 |
chunks=[]
|
sdk/python/test/t_chat.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
from ragflow import RAGFlow, Chat
|
|
|
2 |
|
3 |
from common import API_KEY, HOST_ADDRESS
|
4 |
from test_sdkbase import TestSdk
|
@@ -11,7 +12,7 @@ class TestChat(TestSdk):
|
|
11 |
"""
|
12 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
13 |
kb = rag.create_dataset(name="test_create_chat")
|
14 |
-
chat = rag.create_chat("test_create",
|
15 |
if isinstance(chat, Chat):
|
16 |
assert chat.name == "test_create", "Name does not match."
|
17 |
else:
|
@@ -23,7 +24,7 @@ class TestChat(TestSdk):
|
|
23 |
"""
|
24 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
25 |
kb = rag.create_dataset(name="test_update_chat")
|
26 |
-
chat = rag.create_chat("test_update",
|
27 |
if isinstance(chat, Chat):
|
28 |
assert chat.name == "test_update", "Name does not match."
|
29 |
res=chat.update({"name":"new_chat"})
|
@@ -37,7 +38,7 @@ class TestChat(TestSdk):
|
|
37 |
"""
|
38 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
39 |
kb = rag.create_dataset(name="test_delete_chat")
|
40 |
-
chat = rag.create_chat("test_delete",
|
41 |
if isinstance(chat, Chat):
|
42 |
assert chat.name == "test_delete", "Name does not match."
|
43 |
res = rag.delete_chats(ids=[chat.id])
|
|
|
1 |
from ragflow import RAGFlow, Chat
|
2 |
+
from xgboost.testing import datasets
|
3 |
|
4 |
from common import API_KEY, HOST_ADDRESS
|
5 |
from test_sdkbase import TestSdk
|
|
|
12 |
"""
|
13 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
14 |
kb = rag.create_dataset(name="test_create_chat")
|
15 |
+
chat = rag.create_chat("test_create", datasets=[kb])
|
16 |
if isinstance(chat, Chat):
|
17 |
assert chat.name == "test_create", "Name does not match."
|
18 |
else:
|
|
|
24 |
"""
|
25 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
26 |
kb = rag.create_dataset(name="test_update_chat")
|
27 |
+
chat = rag.create_chat("test_update", datasets=[kb])
|
28 |
if isinstance(chat, Chat):
|
29 |
assert chat.name == "test_update", "Name does not match."
|
30 |
res=chat.update({"name":"new_chat"})
|
|
|
38 |
"""
|
39 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
40 |
kb = rag.create_dataset(name="test_delete_chat")
|
41 |
+
chat = rag.create_chat("test_delete", datasets=[kb])
|
42 |
if isinstance(chat, Chat):
|
43 |
assert chat.name == "test_delete", "Name does not match."
|
44 |
res = rag.delete_chats(ids=[chat.id])
|
sdk/python/test/t_session.py
CHANGED
@@ -7,14 +7,14 @@ class TestSession:
|
|
7 |
def test_create_session(self):
|
8 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
9 |
kb = rag.create_dataset(name="test_create_session")
|
10 |
-
assistant = rag.create_chat(name="test_create_session",
|
11 |
session = assistant.create_session()
|
12 |
assert isinstance(session,Session), "Failed to create a session."
|
13 |
|
14 |
def test_create_chat_with_success(self):
|
15 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
16 |
kb = rag.create_dataset(name="test_create_chat")
|
17 |
-
assistant = rag.create_chat(name="test_create_chat",
|
18 |
session = assistant.create_session()
|
19 |
question = "What is AI"
|
20 |
for ans in session.ask(question, stream=True):
|
@@ -24,7 +24,7 @@ class TestSession:
|
|
24 |
def test_delete_sessions_with_success(self):
|
25 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
26 |
kb = rag.create_dataset(name="test_delete_session")
|
27 |
-
assistant = rag.create_chat(name="test_delete_session",
|
28 |
session=assistant.create_session()
|
29 |
res=assistant.delete_sessions(ids=[session.id])
|
30 |
assert res is None, "Failed to delete the dataset."
|
@@ -32,7 +32,7 @@ class TestSession:
|
|
32 |
def test_update_session_with_success(self):
|
33 |
rag=RAGFlow(API_KEY,HOST_ADDRESS)
|
34 |
kb=rag.create_dataset(name="test_update_session")
|
35 |
-
assistant = rag.create_chat(name="test_update_session",
|
36 |
session=assistant.create_session(name="old session")
|
37 |
res=session.update({"name":"new session"})
|
38 |
assert res is None,"Failed to update the session"
|
@@ -41,7 +41,7 @@ class TestSession:
|
|
41 |
def test_list_sessions_with_success(self):
|
42 |
rag=RAGFlow(API_KEY,HOST_ADDRESS)
|
43 |
kb=rag.create_dataset(name="test_list_session")
|
44 |
-
assistant=rag.create_chat(name="test_list_session",
|
45 |
assistant.create_session("test_1")
|
46 |
assistant.create_session("test_2")
|
47 |
sessions=assistant.list_sessions()
|
|
|
7 |
def test_create_session(self):
|
8 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
9 |
kb = rag.create_dataset(name="test_create_session")
|
10 |
+
assistant = rag.create_chat(name="test_create_session", datasets=[kb])
|
11 |
session = assistant.create_session()
|
12 |
assert isinstance(session,Session), "Failed to create a session."
|
13 |
|
14 |
def test_create_chat_with_success(self):
|
15 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
16 |
kb = rag.create_dataset(name="test_create_chat")
|
17 |
+
assistant = rag.create_chat(name="test_create_chat", datasets=[kb])
|
18 |
session = assistant.create_session()
|
19 |
question = "What is AI"
|
20 |
for ans in session.ask(question, stream=True):
|
|
|
24 |
def test_delete_sessions_with_success(self):
|
25 |
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
26 |
kb = rag.create_dataset(name="test_delete_session")
|
27 |
+
assistant = rag.create_chat(name="test_delete_session",datasets=[kb])
|
28 |
session=assistant.create_session()
|
29 |
res=assistant.delete_sessions(ids=[session.id])
|
30 |
assert res is None, "Failed to delete the dataset."
|
|
|
32 |
def test_update_session_with_success(self):
|
33 |
rag=RAGFlow(API_KEY,HOST_ADDRESS)
|
34 |
kb=rag.create_dataset(name="test_update_session")
|
35 |
+
assistant = rag.create_chat(name="test_update_session",datasets=[kb])
|
36 |
session=assistant.create_session(name="old session")
|
37 |
res=session.update({"name":"new session"})
|
38 |
assert res is None,"Failed to update the session"
|
|
|
41 |
def test_list_sessions_with_success(self):
|
42 |
rag=RAGFlow(API_KEY,HOST_ADDRESS)
|
43 |
kb=rag.create_dataset(name="test_list_session")
|
44 |
+
assistant=rag.create_chat(name="test_list_session",datasets=[kb])
|
45 |
assistant.create_session("test_1")
|
46 |
assistant.create_session("test_2")
|
47 |
sessions=assistant.list_sessions()
|