KevinHuSh commited on
Commit
2d09c38
·
1 Parent(s): 919b966

refactor code (#583)

Browse files

### What problem does this PR solve?

### Type of change

- [x] Refactoring

api/apps/api_app.py CHANGED
@@ -33,7 +33,7 @@ from api.utils.api_utils import server_error_response, get_data_error_result, ge
33
  from itsdangerous import URLSafeTimedSerializer
34
 
35
  from api.utils.file_utils import filename_type, thumbnail
36
- from rag.utils import MINIO
37
 
38
 
39
  def generate_confirmation_token(tenent_id):
 
33
  from itsdangerous import URLSafeTimedSerializer
34
 
35
  from api.utils.file_utils import filename_type, thumbnail
36
+ from rag.utils.minio_conn import MINIO
37
 
38
 
39
  def generate_confirmation_token(tenent_id):
api/apps/chunk_app.py CHANGED
@@ -21,7 +21,8 @@ from elasticsearch_dsl import Q
21
 
22
  from rag.app.qa import rmPrefix, beAdoc
23
  from rag.nlp import search, huqie
24
- from rag.utils import ELASTICSEARCH, rmSpace
 
25
  from api.db import LLMType, ParserType
26
  from api.db.services.knowledgebase_service import KnowledgebaseService
27
  from api.db.services.llm_service import TenantLLMService
 
21
 
22
  from rag.app.qa import rmPrefix, beAdoc
23
  from rag.nlp import search, huqie
24
+ from rag.utils.es_conn import ELASTICSEARCH
25
+ from rag.utils import rmSpace
26
  from api.db import LLMType, ParserType
27
  from api.db.services.knowledgebase_service import KnowledgebaseService
28
  from api.db.services.llm_service import TenantLLMService
api/apps/document_app.py CHANGED
@@ -27,7 +27,7 @@ from flask_login import login_required, current_user
27
  from api.db.services.file2document_service import File2DocumentService
28
  from api.db.services.file_service import FileService
29
  from rag.nlp import search
30
- from rag.utils import ELASTICSEARCH
31
  from api.db.services import duplicate_name
32
  from api.db.services.knowledgebase_service import KnowledgebaseService
33
  from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
 
27
  from api.db.services.file2document_service import File2DocumentService
28
  from api.db.services.file_service import FileService
29
  from rag.nlp import search
30
+ from rag.utils.es_conn import ELASTICSEARCH
31
  from api.db.services import duplicate_name
32
  from api.db.services.knowledgebase_service import KnowledgebaseService
33
  from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
api/apps/file2document_app.py CHANGED
@@ -29,7 +29,7 @@ from api.db.services.document_service import DocumentService
29
  from api.settings import RetCode
30
  from api.utils.api_utils import get_json_result
31
  from rag.nlp import search
32
- from rag.utils import ELASTICSEARCH
33
 
34
 
35
  @manager.route('/convert', methods=['POST'])
 
29
  from api.settings import RetCode
30
  from api.utils.api_utils import get_json_result
31
  from rag.nlp import search
32
+ from rag.utils.es_conn import ELASTICSEARCH
33
 
34
 
35
  @manager.route('/convert', methods=['POST'])
api/apps/file_app.py CHANGED
@@ -33,7 +33,7 @@ from api.settings import RetCode
33
  from api.utils.api_utils import get_json_result
34
  from api.utils.file_utils import filename_type
35
  from rag.nlp import search
36
- from rag.utils import ELASTICSEARCH
37
  from rag.utils.minio_conn import MINIO
38
 
39
 
 
33
  from api.utils.api_utils import get_json_result
34
  from api.utils.file_utils import filename_type
35
  from rag.nlp import search
36
+ from rag.utils.es_conn import ELASTICSEARCH
37
  from rag.utils.minio_conn import MINIO
38
 
39
 
api/apps/kb_app.py CHANGED
@@ -28,7 +28,7 @@ from api.db.db_models import Knowledgebase
28
  from api.settings import stat_logger, RetCode
29
  from api.utils.api_utils import get_json_result
30
  from rag.nlp import search
31
- from rag.utils import ELASTICSEARCH
32
 
33
 
34
  @manager.route('/create', methods=['post'])
 
28
  from api.settings import stat_logger, RetCode
29
  from api.utils.api_utils import get_json_result
30
  from rag.nlp import search
31
+ from rag.utils.es_conn import ELASTICSEARCH
32
 
33
 
34
  @manager.route('/create', methods=['post'])
api/db/services/document_service.py CHANGED
@@ -16,7 +16,7 @@
16
  from peewee import Expression
17
 
18
  from elasticsearch_dsl import Q
19
- from rag.utils import ELASTICSEARCH
20
  from rag.utils.minio_conn import MINIO
21
  from rag.nlp import search
22
 
 
16
  from peewee import Expression
17
 
18
  from elasticsearch_dsl import Q
19
+ from rag.utils.es_conn import ELASTICSEARCH
20
  from rag.utils.minio_conn import MINIO
21
  from rag.nlp import search
22
 
api/settings.py CHANGED
@@ -32,7 +32,7 @@ access_logger = getLogger("access")
32
  database_logger = getLogger("database")
33
  chat_logger = getLogger("chat")
34
 
35
- from rag.utils import ELASTICSEARCH
36
  from rag.nlp import search
37
  from api.utils import get_base_config, decrypt_database_config
38
 
 
32
  database_logger = getLogger("database")
33
  chat_logger = getLogger("chat")
34
 
35
+ from rag.utils.es_conn import ELASTICSEARCH
36
  from rag.nlp import search
37
  from api.utils import get_base_config, decrypt_database_config
38
 
deepdoc/parser/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
 
2
 
3
- from .pdf_parser import HuParser as PdfParser, PlainParser
4
- from .docx_parser import HuDocxParser as DocxParser
5
- from .excel_parser import HuExcelParser as ExcelParser
6
- from .ppt_parser import HuPptParser as PptParser
 
1
 
2
 
3
+ from .pdf_parser import RAGFlowPdfParser as PdfParser, PlainParser
4
+ from .docx_parser import RAGFlowDocxParser as DocxParser
5
+ from .excel_parser import RAGFlowExcelParser as ExcelParser
6
+ from .ppt_parser import RAGFlowPptParser as PptParser
deepdoc/parser/docx_parser.py CHANGED
@@ -7,7 +7,7 @@ from rag.nlp import huqie
7
  from io import BytesIO
8
 
9
 
10
- class HuDocxParser:
11
 
12
  def __extract_table_content(self, tb):
13
  df = []
 
7
  from io import BytesIO
8
 
9
 
10
+ class RAGFlowDocxParser:
11
 
12
  def __extract_table_content(self, tb):
13
  df = []
deepdoc/parser/excel_parser.py CHANGED
@@ -6,7 +6,7 @@ from io import BytesIO
6
  from rag.nlp import find_codec
7
 
8
 
9
- class HuExcelParser:
10
  def html(self, fnm):
11
  if isinstance(fnm, str):
12
  wb = load_workbook(fnm)
@@ -74,5 +74,5 @@ class HuExcelParser:
74
 
75
 
76
  if __name__ == "__main__":
77
- psr = HuExcelParser()
78
  psr(sys.argv[1])
 
6
  from rag.nlp import find_codec
7
 
8
 
9
+ class RAGFlowExcelParser:
10
  def html(self, fnm):
11
  if isinstance(fnm, str):
12
  wb = load_workbook(fnm)
 
74
 
75
 
76
  if __name__ == "__main__":
77
+ psr = RAGFlowExcelParser()
78
  psr(sys.argv[1])
deepdoc/parser/pdf_parser.py CHANGED
@@ -23,7 +23,7 @@ from huggingface_hub import snapshot_download
23
  logging.getLogger("pdfminer").setLevel(logging.WARNING)
24
 
25
 
26
- class HuParser:
27
  def __init__(self):
28
  self.ocr = OCR()
29
  if hasattr(self, "model_speciess"):
 
23
  logging.getLogger("pdfminer").setLevel(logging.WARNING)
24
 
25
 
26
+ class RAGFlowPdfParser:
27
  def __init__(self):
28
  self.ocr = OCR()
29
  if hasattr(self, "model_speciess"):
deepdoc/parser/ppt_parser.py CHANGED
@@ -14,7 +14,7 @@ from io import BytesIO
14
  from pptx import Presentation
15
 
16
 
17
- class HuPptParser(object):
18
  def __init__(self):
19
  super().__init__()
20
 
 
14
  from pptx import Presentation
15
 
16
 
17
+ class RAGFlowPptParser(object):
18
  def __init__(self):
19
  super().__init__()
20
 
deepdoc/vision/t_ocr.py CHANGED
@@ -11,10 +11,6 @@
11
  # limitations under the License.
12
  #
13
 
14
- from deepdoc.vision.seeit import draw_box
15
- from deepdoc.vision import OCR, init_in_out
16
- import argparse
17
- import numpy as np
18
  import os
19
  import sys
20
  sys.path.insert(
@@ -25,6 +21,11 @@ sys.path.insert(
25
  os.path.abspath(__file__)),
26
  '../../')))
27
 
 
 
 
 
 
28
 
29
  def main(args):
30
  ocr = OCR()
 
11
  # limitations under the License.
12
  #
13
 
 
 
 
 
14
  import os
15
  import sys
16
  sys.path.insert(
 
21
  os.path.abspath(__file__)),
22
  '../../')))
23
 
24
+ from deepdoc.vision.seeit import draw_box
25
+ from deepdoc.vision import OCR, init_in_out
26
+ import argparse
27
+ import numpy as np
28
+
29
 
30
  def main(args):
31
  ocr = OCR()
deepdoc/vision/t_recognizer.py CHANGED
@@ -10,17 +10,7 @@
10
  # See the License for the specific language governing permissions and
11
  # limitations under the License.
12
  #
13
-
14
- from deepdoc.vision.seeit import draw_box
15
- from deepdoc.vision import Recognizer, LayoutRecognizer, TableStructureRecognizer, OCR, init_in_out
16
- from api.utils.file_utils import get_project_base_directory
17
- import argparse
18
- import os
19
- import sys
20
- import re
21
-
22
- import numpy as np
23
-
24
  sys.path.insert(
25
  0,
26
  os.path.abspath(
@@ -29,6 +19,13 @@ sys.path.insert(
29
  os.path.abspath(__file__)),
30
  '../../')))
31
 
 
 
 
 
 
 
 
32
 
33
  def main(args):
34
  images, outputs = init_in_out(args)
 
10
  # See the License for the specific language governing permissions and
11
  # limitations under the License.
12
  #
13
+ import os, sys
 
 
 
 
 
 
 
 
 
 
14
  sys.path.insert(
15
  0,
16
  os.path.abspath(
 
19
  os.path.abspath(__file__)),
20
  '../../')))
21
 
22
+ from deepdoc.vision.seeit import draw_box
23
+ from deepdoc.vision import Recognizer, LayoutRecognizer, TableStructureRecognizer, OCR, init_in_out
24
+ from api.utils.file_utils import get_project_base_directory
25
+ import argparse
26
+ import re
27
+ import numpy as np
28
+
29
 
30
  def main(args):
31
  images, outputs = init_in_out(args)
rag/llm/__init__.py CHANGED
@@ -22,7 +22,7 @@ EmbeddingModel = {
22
  "Ollama": OllamaEmbed,
23
  "OpenAI": OpenAIEmbed,
24
  "Xinference": XinferenceEmbed,
25
- "Tongyi-Qianwen": HuEmbedding, #QWenEmbed,
26
  "ZHIPU-AI": ZhipuEmbed,
27
  "FastEmbed": FastEmbed,
28
  "Youdao": YoudaoEmbed
 
22
  "Ollama": OllamaEmbed,
23
  "OpenAI": OpenAIEmbed,
24
  "Xinference": XinferenceEmbed,
25
+ "Tongyi-Qianwen": DefaultEmbedding, #QWenEmbed,
26
  "ZHIPU-AI": ZhipuEmbed,
27
  "FastEmbed": FastEmbed,
28
  "Youdao": YoudaoEmbed
rag/llm/embedding_model.py CHANGED
@@ -56,7 +56,7 @@ class Base(ABC):
56
  raise NotImplementedError("Please implement encode method!")
57
 
58
 
59
- class HuEmbedding(Base):
60
  def __init__(self, *args, **kwargs):
61
  """
62
  If you have trouble downloading HuggingFace models, -_^ this might help!!
 
56
  raise NotImplementedError("Please implement encode method!")
57
 
58
 
59
+ class DefaultEmbedding(Base):
60
  def __init__(self, *args, **kwargs):
61
  """
62
  If you have trouble downloading HuggingFace models, -_^ this might help!!
rag/nlp/huchunk.py DELETED
@@ -1,475 +0,0 @@
1
- # Licensed under the Apache License, Version 2.0 (the "License");
2
- # you may not use this file except in compliance with the License.
3
- # You may obtain a copy of the License at
4
- #
5
- # http://www.apache.org/licenses/LICENSE-2.0
6
- #
7
- # Unless required by applicable law or agreed to in writing, software
8
- # distributed under the License is distributed on an "AS IS" BASIS,
9
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
- # See the License for the specific language governing permissions and
11
- # limitations under the License.
12
- #
13
- import re
14
- import os
15
- import copy
16
- import base64
17
- import magic
18
- from dataclasses import dataclass
19
- from typing import List
20
- import numpy as np
21
- from io import BytesIO
22
-
23
-
24
- class HuChunker:
25
-
26
- @dataclass
27
- class Fields:
28
- text_chunks: List = None
29
- table_chunks: List = None
30
-
31
- def __init__(self):
32
- self.MAX_LVL = 12
33
- self.proj_patt = [
34
- (r"第[零一二三四五六七八九十百]+章", 1),
35
- (r"第[零一二三四五六七八九十百]+[条节]", 2),
36
- (r"[零一二三四五六七八九十百]+[、  ]", 3),
37
- (r"[\((][零一二三四五六七八九十百]+[)\)]", 4),
38
- (r"[0-9]+(、|\.[  ]|\.[^0-9])", 5),
39
- (r"[0-9]+\.[0-9]+(、|[  ]|[^0-9])", 6),
40
- (r"[0-9]+\.[0-9]+\.[0-9]+(、|[  ]|[^0-9])", 7),
41
- (r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+(、|[  ]|[^0-9])", 8),
42
- (r".{,48}[::??]@", 9),
43
- (r"[0-9]+)", 10),
44
- (r"[\((][0-9]+[)\)]", 11),
45
- (r"[零一二三四五六七八九十百]+是", 12),
46
- (r"[⚫•➢✓ ]", 12)
47
- ]
48
- self.lines = []
49
-
50
- def _garbage(self, txt):
51
- patt = [
52
- r"(在此保证|不得以任何形式翻版|请勿传阅|仅供内部使用|未经事先书面授权)",
53
- r"(版权(归本公司)*所有|免责声明|保留一切权力|承担全部责任|特别声明|报告中涉及)",
54
- r"(不承担任何责任|投资者的通知事项:|任何机构和个人|本报告仅为|不构成投资)",
55
- r"(不构成对任何个人或机构投资建议|联系其所在国家|本报告由从事证券交易)",
56
- r"(本研究报告由|「认可投资者」|所有研究报告均以|请发邮件至)",
57
- r"(本报告仅供|市场有风险,投资需谨慎|本报告中提及的)",
58
- r"(本报告反映|此信息仅供|证券分析师承诺|具备证券投资咨询业务资格)",
59
- r"^(时间|签字|签章)[::]",
60
- r"(参考文献|目录索引|图表索引)",
61
- r"[ ]*年[ ]+月[ ]+日",
62
- r"^(中国证券业协会|[0-9]+年[0-9]+月[0-9]+日)$",
63
- r"\.{10,}",
64
- r"(———————END|帮我转发|欢迎收藏|快来关注我吧)"
65
- ]
66
- return any([re.search(p, txt) for p in patt])
67
-
68
- def _proj_match(self, line):
69
- for p, j in self.proj_patt:
70
- if re.match(p, line):
71
- return j
72
- return
73
-
74
- def _does_proj_match(self):
75
- mat = [None for _ in range(len(self.lines))]
76
- for i in range(len(self.lines)):
77
- mat[i] = self._proj_match(self.lines[i])
78
- return mat
79
-
80
- def naive_text_chunk(self, text, ti="", MAX_LEN=612):
81
- if text:
82
- self.lines = [l.strip().replace(u'\u3000', u' ')
83
- .replace(u'\xa0', u'')
84
- for l in text.split("\n\n")]
85
- self.lines = [l for l in self.lines if not self._garbage(l)]
86
- self.lines = [re.sub(r"([ ]+| )", " ", l)
87
- for l in self.lines if l]
88
- if not self.lines:
89
- return []
90
- arr = self.lines
91
-
92
- res = [""]
93
- i = 0
94
- while i < len(arr):
95
- a = arr[i]
96
- if not a:
97
- i += 1
98
- continue
99
- if len(a) > MAX_LEN:
100
- a_ = a.split("\n")
101
- if len(a_) >= 2:
102
- arr.pop(i)
103
- for j in range(2, len(a_) + 1):
104
- if len("\n".join(a_[:j])) >= MAX_LEN:
105
- arr.insert(i, "\n".join(a_[:j - 1]))
106
- arr.insert(i + 1, "\n".join(a_[j - 1:]))
107
- break
108
- else:
109
- assert False, f"Can't split: {a}"
110
- continue
111
-
112
- if len(res[-1]) < MAX_LEN / 3:
113
- res[-1] += "\n" + a
114
- else:
115
- res.append(a)
116
- i += 1
117
-
118
- if ti:
119
- for i in range(len(res)):
120
- if res[i].find("——来自") >= 0:
121
- continue
122
- res[i] += f"\t——来自“{ti}”"
123
-
124
- return res
125
-
126
- def _merge(self):
127
- # merge continuous same level text
128
- lines = [self.lines[0]] if self.lines else []
129
- for i in range(1, len(self.lines)):
130
- if self.mat[i] == self.mat[i - 1] \
131
- and len(lines[-1]) < 256 \
132
- and len(self.lines[i]) < 256:
133
- lines[-1] += "\n" + self.lines[i]
134
- continue
135
- lines.append(self.lines[i])
136
- self.lines = lines
137
- self.mat = self._does_proj_match()
138
- return self.mat
139
-
140
- def text_chunks(self, text):
141
- if text:
142
- self.lines = [l.strip().replace(u'\u3000', u' ')
143
- .replace(u'\xa0', u'')
144
- for l in re.split(r"[\r\n]", text)]
145
- self.lines = [l for l in self.lines if not self._garbage(l)]
146
- self.lines = [l for l in self.lines if l]
147
- self.mat = self._does_proj_match()
148
- mat = self._merge()
149
-
150
- tree = []
151
- for i in range(len(self.lines)):
152
- tree.append({"proj": mat[i],
153
- "children": [],
154
- "read": False})
155
- # find all children
156
- for i in range(len(self.lines) - 1):
157
- if tree[i]["proj"] is None:
158
- continue
159
- ed = i + 1
160
- while ed < len(tree) and (tree[ed]["proj"] is None or
161
- tree[ed]["proj"] > tree[i]["proj"]):
162
- ed += 1
163
-
164
- nxt = tree[i]["proj"] + 1
165
- st = set([p["proj"] for p in tree[i + 1: ed] if p["proj"]])
166
- while nxt not in st:
167
- nxt += 1
168
- if nxt > self.MAX_LVL:
169
- break
170
- if nxt <= self.MAX_LVL:
171
- for j in range(i + 1, ed):
172
- if tree[j]["proj"] is not None:
173
- break
174
- tree[i]["children"].append(j)
175
- for j in range(i + 1, ed):
176
- if tree[j]["proj"] != nxt:
177
- continue
178
- tree[i]["children"].append(j)
179
- else:
180
- for j in range(i + 1, ed):
181
- tree[i]["children"].append(j)
182
-
183
- # get DFS combinations, find all the paths to leaf
184
- paths = []
185
-
186
- def dfs(i, path):
187
- nonlocal tree, paths
188
- path.append(i)
189
- tree[i]["read"] = True
190
- if len(self.lines[i]) > 256:
191
- paths.append(path)
192
- return
193
- if not tree[i]["children"]:
194
- if len(path) > 1 or len(self.lines[i]) >= 32:
195
- paths.append(path)
196
- return
197
- for j in tree[i]["children"]:
198
- dfs(j, copy.deepcopy(path))
199
-
200
- for i, t in enumerate(tree):
201
- if t["read"]:
202
- continue
203
- dfs(i, [])
204
-
205
- # concat txt on the path for all paths
206
- res = []
207
- lines = np.array(self.lines)
208
- for p in paths:
209
- if len(p) < 2:
210
- tree[p[0]]["read"] = False
211
- continue
212
- txt = "\n".join(lines[p[:-1]]) + "\n" + lines[p[-1]]
213
- res.append(txt)
214
- # concat continuous orphans
215
- assert len(tree) == len(lines)
216
- ii = 0
217
- while ii < len(tree):
218
- if tree[ii]["read"]:
219
- ii += 1
220
- continue
221
- txt = lines[ii]
222
- e = ii + 1
223
- while e < len(tree) and not tree[e]["read"] and len(txt) < 256:
224
- txt += "\n" + lines[e]
225
- e += 1
226
- res.append(txt)
227
- ii = e
228
-
229
- # if the node has not been read, find its daddy
230
- def find_daddy(st):
231
- nonlocal lines, tree
232
- proj = tree[st]["proj"]
233
- if len(self.lines[st]) > 512:
234
- return [st]
235
- if proj is None:
236
- proj = self.MAX_LVL + 1
237
- for i in range(st - 1, -1, -1):
238
- if tree[i]["proj"] and tree[i]["proj"] < proj:
239
- a = [st] + find_daddy(i)
240
- return a
241
- return []
242
-
243
- return res
244
-
245
-
246
- class PdfChunker(HuChunker):
247
-
248
- def __init__(self, pdf_parser):
249
- self.pdf = pdf_parser
250
- super().__init__()
251
-
252
- def tableHtmls(self, pdfnm):
253
- _, tbls = self.pdf(pdfnm, return_html=True)
254
- res = []
255
- for img, arr in tbls:
256
- if arr[0].find("<table>") < 0:
257
- continue
258
- buffered = BytesIO()
259
- if img:
260
- img.save(buffered, format="JPEG")
261
- img_str = base64.b64encode(
262
- buffered.getvalue()).decode('utf-8') if img else ""
263
- res.append({"table": arr[0], "image": img_str})
264
- return res
265
-
266
- def html(self, pdfnm):
267
- txts, tbls = self.pdf(pdfnm, return_html=True)
268
- res = []
269
- txt_cks = self.text_chunks(txts)
270
- for txt, img in [(self.pdf.remove_tag(c), self.pdf.crop(c))
271
- for c in txt_cks]:
272
- buffered = BytesIO()
273
- if img:
274
- img.save(buffered, format="JPEG")
275
- img_str = base64.b64encode(
276
- buffered.getvalue()).decode('utf-8') if img else ""
277
- res.append({"table": "<p>%s</p>" % txt.replace("\n", "<br/>"),
278
- "image": img_str})
279
-
280
- for img, arr in tbls:
281
- if not arr:
282
- continue
283
- buffered = BytesIO()
284
- if img:
285
- img.save(buffered, format="JPEG")
286
- img_str = base64.b64encode(
287
- buffered.getvalue()).decode('utf-8') if img else ""
288
- res.append({"table": arr[0], "image": img_str})
289
-
290
- return res
291
-
292
- def __call__(self, pdfnm, return_image=True, naive_chunk=False):
293
- flds = self.Fields()
294
- text, tbls = self.pdf(pdfnm)
295
- fnm = pdfnm
296
- txt_cks = self.text_chunks(text) if not naive_chunk else \
297
- self.naive_text_chunk(text, ti=fnm if isinstance(fnm, str) else "")
298
- flds.text_chunks = [(self.pdf.remove_tag(c),
299
- self.pdf.crop(c) if return_image else None) for c in txt_cks]
300
-
301
- flds.table_chunks = [(arr, img if return_image else None)
302
- for img, arr in tbls]
303
- return flds
304
-
305
-
306
- class DocxChunker(HuChunker):
307
-
308
- def __init__(self, doc_parser):
309
- self.doc = doc_parser
310
- super().__init__()
311
-
312
- def _does_proj_match(self):
313
- mat = []
314
- for s in self.styles:
315
- s = s.split(" ")[-1]
316
- try:
317
- mat.append(int(s))
318
- except Exception as e:
319
- mat.append(None)
320
- return mat
321
-
322
- def _merge(self):
323
- i = 1
324
- while i < len(self.lines):
325
- if self.mat[i] == self.mat[i - 1] \
326
- and len(self.lines[i - 1]) < 256 \
327
- and len(self.lines[i]) < 256:
328
- self.lines[i - 1] += "\n" + self.lines[i]
329
- self.styles.pop(i)
330
- self.lines.pop(i)
331
- self.mat.pop(i)
332
- continue
333
- i += 1
334
- self.mat = self._does_proj_match()
335
- return self.mat
336
-
337
- def __call__(self, fnm):
338
- flds = self.Fields()
339
- flds.title = os.path.splitext(
340
- os.path.basename(fnm))[0] if isinstance(
341
- fnm, type("")) else ""
342
- secs, tbls = self.doc(fnm)
343
- self.lines = [l for l, s in secs]
344
- self.styles = [s for l, s in secs]
345
-
346
- txt_cks = self.text_chunks("")
347
- flds.text_chunks = [(t, None) for t in txt_cks if not self._garbage(t)]
348
- flds.table_chunks = [(tb, None) for tb in tbls for t in tb if t]
349
- return flds
350
-
351
-
352
- class ExcelChunker(HuChunker):
353
-
354
- def __init__(self, excel_parser):
355
- self.excel = excel_parser
356
- super().__init__()
357
-
358
- def __call__(self, fnm):
359
- flds = self.Fields()
360
- flds.text_chunks = [(t, None) for t in self.excel(fnm)]
361
- flds.table_chunks = []
362
- return flds
363
-
364
-
365
- class PptChunker(HuChunker):
366
-
367
- def __init__(self):
368
- super().__init__()
369
-
370
- def __extract(self, shape):
371
- if shape.shape_type == 19:
372
- tb = shape.table
373
- rows = []
374
- for i in range(1, len(tb.rows)):
375
- rows.append("; ".join([tb.cell(
376
- 0, j).text + ": " + tb.cell(i, j).text for j in range(len(tb.columns)) if tb.cell(i, j)]))
377
- return "\n".join(rows)
378
-
379
- if shape.has_text_frame:
380
- return shape.text_frame.text
381
-
382
- if shape.shape_type == 6:
383
- texts = []
384
- for p in shape.shapes:
385
- t = self.__extract(p)
386
- if t:
387
- texts.append(t)
388
- return "\n".join(texts)
389
-
390
- def __call__(self, fnm):
391
- from pptx import Presentation
392
- ppt = Presentation(fnm) if isinstance(
393
- fnm, str) else Presentation(
394
- BytesIO(fnm))
395
- txts = []
396
- for slide in ppt.slides:
397
- texts = []
398
- for shape in slide.shapes:
399
- txt = self.__extract(shape)
400
- if txt:
401
- texts.append(txt)
402
- txts.append("\n".join(texts))
403
-
404
- import aspose.slides as slides
405
- import aspose.pydrawing as drawing
406
- imgs = []
407
- with slides.Presentation(BytesIO(fnm)) as presentation:
408
- for slide in presentation.slides:
409
- buffered = BytesIO()
410
- slide.get_thumbnail(
411
- 0.5, 0.5).save(
412
- buffered, drawing.imaging.ImageFormat.jpeg)
413
- imgs.append(buffered.getvalue())
414
- assert len(imgs) == len(
415
- txts), "Slides text and image do not match: {} vs. {}".format(len(imgs), len(txts))
416
-
417
- flds = self.Fields()
418
- flds.text_chunks = [(txts[i], imgs[i]) for i in range(len(txts))]
419
- flds.table_chunks = []
420
-
421
- return flds
422
-
423
-
424
- class TextChunker(HuChunker):
425
-
426
- @dataclass
427
- class Fields:
428
- text_chunks: List = None
429
- table_chunks: List = None
430
-
431
- def __init__(self):
432
- super().__init__()
433
-
434
- @staticmethod
435
- def is_binary_file(file_path):
436
- mime = magic.Magic(mime=True)
437
- if isinstance(file_path, str):
438
- file_type = mime.from_file(file_path)
439
- else:
440
- file_type = mime.from_buffer(file_path)
441
- if 'text' in file_type:
442
- return False
443
- else:
444
- return True
445
-
446
- def __call__(self, fnm):
447
- flds = self.Fields()
448
- if self.is_binary_file(fnm):
449
- return flds
450
- txt = ""
451
- if isinstance(fnm, str):
452
- with open(fnm, "r") as f:
453
- txt = f.read()
454
- else:
455
- txt = fnm.decode("utf-8")
456
- flds.text_chunks = [(c, None) for c in self.naive_text_chunk(txt)]
457
- flds.table_chunks = []
458
- return flds
459
-
460
-
461
- if __name__ == "__main__":
462
- import sys
463
- sys.path.append(os.path.dirname(__file__) + "/../")
464
- if sys.argv[1].split(".")[-1].lower() == "pdf":
465
- from deepdoc.parser import PdfParser
466
- ckr = PdfChunker(PdfParser())
467
- if sys.argv[1].split(".")[-1].lower().find("doc") >= 0:
468
- from deepdoc.parser import DocxParser
469
- ckr = DocxChunker(DocxParser())
470
- if sys.argv[1].split(".")[-1].lower().find("xlsx") >= 0:
471
- from deepdoc.parser import ExcelParser
472
- ckr = ExcelChunker(ExcelParser())
473
-
474
- # ckr.html(sys.argv[1])
475
- print(ckr(sys.argv[1]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
rag/nlp/synonym.py CHANGED
@@ -17,12 +17,12 @@ class Dealer:
17
  try:
18
  self.dictionary = json.load(open(path, 'r'))
19
  except Exception as e:
20
- logging.warning("Missing synonym.json")
21
  self.dictionary = {}
22
 
23
  if not redis:
24
  logging.warning(
25
- "Real-time synonym is disabled, since no redis connection.")
26
  if not len(self.dictionary.keys()):
27
  logging.warning(f"Fail to load synonym")
28
 
 
17
  try:
18
  self.dictionary = json.load(open(path, 'r'))
19
  except Exception as e:
20
+ logging.warn("Missing synonym.json")
21
  self.dictionary = {}
22
 
23
  if not redis:
24
  logging.warning(
25
+ "Realtime synonym is disabled, since no redis connection.")
26
  if not len(self.dictionary.keys()):
27
  logging.warning(f"Fail to load synonym")
28
 
rag/svr/cache_file_svr.py CHANGED
@@ -4,7 +4,7 @@ import traceback
4
 
5
  from api.db.db_models import close_connection
6
  from api.db.services.task_service import TaskService
7
- from rag.utils import MINIO
8
  from rag.utils.redis_conn import REDIS_CONN
9
 
10
 
 
4
 
5
  from api.db.db_models import close_connection
6
  from api.db.services.task_service import TaskService
7
+ from rag.utils.minio_conn import MINIO
8
  from rag.utils.redis_conn import REDIS_CONN
9
 
10
 
rag/svr/task_broker.py CHANGED
@@ -24,9 +24,9 @@ from api.db.services.file2document_service import File2DocumentService
24
  from api.db.services.file_service import FileService
25
  from api.db.services.task_service import TaskService
26
  from deepdoc.parser import PdfParser
27
- from deepdoc.parser.excel_parser import HuExcelParser
28
  from rag.settings import cron_logger
29
- from rag.utils import MINIO
30
  from rag.utils import findMaxTm
31
  import pandas as pd
32
  from api.db import FileType, TaskStatus
@@ -121,7 +121,7 @@ def dispatch():
121
  tsks.append(task)
122
 
123
  elif r["parser_id"] == "table":
124
- rn = HuExcelParser.row_number(
125
  r["name"], file_bin)
126
  for i in range(0, rn, 3000):
127
  task = new_task()
 
24
  from api.db.services.file_service import FileService
25
  from api.db.services.task_service import TaskService
26
  from deepdoc.parser import PdfParser
27
+ from deepdoc.parser.excel_parser import RAGFlowExcelParser
28
  from rag.settings import cron_logger
29
+ from rag.utils.minio_conn import MINIO
30
  from rag.utils import findMaxTm
31
  import pandas as pd
32
  from api.db import FileType, TaskStatus
 
121
  tsks.append(task)
122
 
123
  elif r["parser_id"] == "table":
124
+ rn = RAGFlowExcelParser.row_number(
125
  r["name"], file_bin)
126
  for i in range(0, rn, 3000):
127
  task = new_task()
rag/svr/task_executor.py CHANGED
@@ -26,7 +26,7 @@ import traceback
26
  from functools import partial
27
 
28
  from api.db.services.file2document_service import File2DocumentService
29
- from rag.utils import MINIO
30
  from api.db.db_models import close_connection
31
  from rag.settings import database_logger
32
  from rag.settings import cron_logger, DOC_MAXIMUM_SIZE
@@ -35,7 +35,7 @@ import numpy as np
35
  from elasticsearch_dsl import Q
36
  from multiprocessing.context import TimeoutError
37
  from api.db.services.task_service import TaskService
38
- from rag.utils import ELASTICSEARCH
39
  from timeit import default_timer as timer
40
  from rag.utils import rmSpace, findMaxTm
41
 
 
26
  from functools import partial
27
 
28
  from api.db.services.file2document_service import File2DocumentService
29
+ from rag.utils.minio_conn import MINIO
30
  from api.db.db_models import close_connection
31
  from rag.settings import database_logger
32
  from rag.settings import cron_logger, DOC_MAXIMUM_SIZE
 
35
  from elasticsearch_dsl import Q
36
  from multiprocessing.context import TimeoutError
37
  from api.db.services.task_service import TaskService
38
+ from rag.utils.es_conn import ELASTICSEARCH
39
  from timeit import default_timer as timer
40
  from rag.utils import rmSpace, findMaxTm
41
 
rag/utils/__init__.py CHANGED
@@ -15,9 +15,6 @@ def singleton(cls, *args, **kw):
15
  return _singleton
16
 
17
 
18
- from .minio_conn import MINIO
19
- from .es_conn import ELASTICSEARCH
20
-
21
  def rmSpace(txt):
22
  txt = re.sub(r"([^a-z0-9.,]) +([^ ])", r"\1\2", txt, flags=re.IGNORECASE)
23
  return re.sub(r"([^ ]) +([^a-z0-9.,])", r"\1\2", txt, flags=re.IGNORECASE)
 
15
  return _singleton
16
 
17
 
 
 
 
18
  def rmSpace(txt):
19
  txt = re.sub(r"([^a-z0-9.,]) +([^ ])", r"\1\2", txt, flags=re.IGNORECASE)
20
  return re.sub(r"([^ ]) +([^a-z0-9.,])", r"\1\2", txt, flags=re.IGNORECASE)
rag/utils/es_conn.py CHANGED
@@ -15,7 +15,7 @@ es_logger.info("Elasticsearch version: "+str(elasticsearch.__version__))
15
 
16
 
17
  @singleton
18
- class HuEs:
19
  def __init__(self):
20
  self.info = {}
21
  self.conn()
@@ -454,4 +454,4 @@ class HuEs:
454
  scroll_size = len(page['hits']['hits'])
455
 
456
 
457
- ELASTICSEARCH = HuEs()
 
15
 
16
 
17
  @singleton
18
+ class ESConnection:
19
  def __init__(self):
20
  self.info = {}
21
  self.conn()
 
454
  scroll_size = len(page['hits']['hits'])
455
 
456
 
457
+ ELASTICSEARCH = ESConnection()
rag/utils/minio_conn.py CHANGED
@@ -8,7 +8,7 @@ from rag.utils import singleton
8
 
9
 
10
  @singleton
11
- class HuMinio(object):
12
  def __init__(self):
13
  self.conn = None
14
  self.__open__()
@@ -86,10 +86,12 @@ class HuMinio(object):
86
  time.sleep(1)
87
  return
88
 
89
- MINIO = HuMinio()
 
 
90
 
91
  if __name__ == "__main__":
92
- conn = HuMinio()
93
  fnm = "/opt/home/kevinhu/docgpt/upload/13/11-408.jpg"
94
  from PIL import Image
95
  img = Image.open(fnm)
 
8
 
9
 
10
  @singleton
11
+ class RAGFlowMinio(object):
12
  def __init__(self):
13
  self.conn = None
14
  self.__open__()
 
86
  time.sleep(1)
87
  return
88
 
89
+
90
+ MINIO = RAGFlowMinio()
91
+
92
 
93
  if __name__ == "__main__":
94
+ conn = RAGFlowMinio()
95
  fnm = "/opt/home/kevinhu/docgpt/upload/13/11-408.jpg"
96
  from PIL import Image
97
  img = Image.open(fnm)