KevinHuSh commited on
Commit
0de1478
·
1 Parent(s): 77b7e10

refine code to prevent exception (#1231)

Browse files

### What problem does this PR solve?


### Type of change

- [x] Refactoring

Files changed (1) hide show
  1. rag/app/naive.py +4 -4
rag/app/naive.py CHANGED
@@ -153,7 +153,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
153
  txt += l
154
  sections = []
155
  for sec in txt.split("\n"):
156
- if num_tokens_from_string(sec) > 10 * parser_config.get("chunk_token_num", 128):
157
  sections.append((sec[:int(len(sec)/2)], ""))
158
  sections.append((sec[int(len(sec)/2):], ""))
159
  else:
@@ -169,7 +169,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
169
 
170
  elif re.search(r"\.json$", filename, re.IGNORECASE):
171
  callback(0.1, "Start to parse.")
172
- sections = JsonParser(parser_config.get("chunk_token_num", 128))(binary)
173
  sections = [(l, "") for l in sections if l]
174
  callback(0.8, "Finish parsing.")
175
 
@@ -187,8 +187,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
187
 
188
  st = timer()
189
  chunks = naive_merge(
190
- sections, parser_config.get(
191
- "chunk_token_num", 128), parser_config.get(
192
  "delimiter", "\n!?。;!?"))
193
 
194
  res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
 
153
  txt += l
154
  sections = []
155
  for sec in txt.split("\n"):
156
+ if num_tokens_from_string(sec) > 10 * int(parser_config.get("chunk_token_num", 128)):
157
  sections.append((sec[:int(len(sec)/2)], ""))
158
  sections.append((sec[int(len(sec)/2):], ""))
159
  else:
 
169
 
170
  elif re.search(r"\.json$", filename, re.IGNORECASE):
171
  callback(0.1, "Start to parse.")
172
+ sections = JsonParser(int(parser_config.get("chunk_token_num", 128)))(binary)
173
  sections = [(l, "") for l in sections if l]
174
  callback(0.8, "Finish parsing.")
175
 
 
187
 
188
  st = timer()
189
  chunks = naive_merge(
190
+ sections, int(parser_config.get(
191
+ "chunk_token_num", 128)), parser_config.get(
192
  "delimiter", "\n!?。;!?"))
193
 
194
  res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))