Kevin Hu commited on
Commit
4a4d1d6
·
1 Parent(s): 209a044

reduce rerank batch size (#2801)

Browse files

### What problem does this PR solve?

### Type of change


- [x] Performance Improvement

Files changed (2) hide show
  1. api/db/init_data.py +1 -1
  2. rag/llm/rerank_model.py +1 -1
api/db/init_data.py CHANGED
@@ -132,7 +132,7 @@ def init_llm_factory():
132
  TenantService.filter_update([1 == 1], {
133
  "parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
134
  ## insert openai two embedding models to the current openai user.
135
- print("Start to insert 2 OpenAI embedding models...")
136
  tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
137
  for tid in tenant_ids:
138
  for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
 
132
  TenantService.filter_update([1 == 1], {
133
  "parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
134
  ## insert openai two embedding models to the current openai user.
135
+ # print("Start to insert 2 OpenAI embedding models...")
136
  tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
137
  for tid in tenant_ids:
138
  for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
rag/llm/rerank_model.py CHANGED
@@ -142,7 +142,7 @@ class YoudaoRerank(DefaultRerank):
142
  token_count = 0
143
  for _, t in pairs:
144
  token_count += num_tokens_from_string(t)
145
- batch_size = 32
146
  res = []
147
  for i in range(0, len(pairs), batch_size):
148
  scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length)
 
142
  token_count = 0
143
  for _, t in pairs:
144
  token_count += num_tokens_from_string(t)
145
+ batch_size = 8
146
  res = []
147
  for i in range(0, len(pairs), batch_size):
148
  scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length)