lidp
commited on
Commit
·
9c1ae34
1
Parent(s):
2083caf
Add RAGFlow benchmark (#2387)
Browse files### What problem does this PR solve?
### Type of change
- [x] New Feature (non-breaking change which adds functionality)
- rag/benchmark.py +166 -26
rag/benchmark.py
CHANGED
@@ -13,10 +13,10 @@
|
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
#
|
16 |
-
|
17 |
-
import
|
18 |
from collections import defaultdict
|
19 |
-
from api.db import
|
20 |
from api.db.services.llm_service import LLMBundle
|
21 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
22 |
from api.settings import retrievaler
|
@@ -24,25 +24,27 @@ from api.utils import get_uuid
|
|
24 |
from rag.nlp import tokenize, search
|
25 |
from rag.utils.es_conn import ELASTICSEARCH
|
26 |
from ranx import evaluate
|
|
|
|
|
27 |
|
28 |
|
29 |
-
class
|
30 |
def __init__(self, kb_id):
|
31 |
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
32 |
self.similarity_threshold = kb.similarity_threshold
|
33 |
self.vector_similarity_weight = kb.vector_similarity_weight
|
34 |
self.embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id, lang=kb.language)
|
35 |
|
36 |
-
def _get_benchmarks(self, query, count=16):
|
37 |
req = {"question": query, "size": count, "vector": True, "similarity": self.similarity_threshold}
|
38 |
-
sres = retrievaler.search(req, search.index_name(
|
39 |
return sres
|
40 |
|
41 |
-
def _get_retrieval(self, qrels):
|
42 |
run = defaultdict(dict)
|
43 |
query_list = list(qrels.keys())
|
44 |
for query in query_list:
|
45 |
-
sres = self._get_benchmarks(query)
|
46 |
sim, _, _ = retrievaler.rerank(sres, query, 1 - self.vector_similarity_weight,
|
47 |
self.vector_similarity_weight)
|
48 |
for index, id in enumerate(sres.ids):
|
@@ -61,34 +63,172 @@ class benchmark_ndcg10:
|
|
61 |
d["q_%d_vec" % len(v)] = v
|
62 |
return docs
|
63 |
|
64 |
-
def
|
65 |
qrels = defaultdict(dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
|
|
|
|
|
|
67 |
docs = []
|
68 |
-
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
d = {
|
72 |
"id": get_uuid()
|
73 |
}
|
74 |
-
tokenize(d, text)
|
75 |
docs.append(d)
|
|
|
|
|
76 |
if len(docs) >= 32:
|
77 |
-
|
|
|
78 |
docs = []
|
79 |
-
qrels[query][d["id"]] = float(rel)
|
80 |
-
docs = self.embedding(docs)
|
81 |
-
ELASTICSEARCH.bulk(docs, search.index_name("benchmark"))
|
82 |
|
83 |
-
|
84 |
-
|
85 |
|
|
|
86 |
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
-
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
#
|
16 |
+
import json
|
17 |
+
import os
|
18 |
from collections import defaultdict
|
19 |
+
from api.db import LLMType
|
20 |
from api.db.services.llm_service import LLMBundle
|
21 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
22 |
from api.settings import retrievaler
|
|
|
24 |
from rag.nlp import tokenize, search
|
25 |
from rag.utils.es_conn import ELASTICSEARCH
|
26 |
from ranx import evaluate
|
27 |
+
import pandas as pd
|
28 |
+
from tqdm import tqdm
|
29 |
|
30 |
|
31 |
+
class Benchmark:
|
32 |
def __init__(self, kb_id):
|
33 |
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
34 |
self.similarity_threshold = kb.similarity_threshold
|
35 |
self.vector_similarity_weight = kb.vector_similarity_weight
|
36 |
self.embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id, lang=kb.language)
|
37 |
|
38 |
+
def _get_benchmarks(self, query, dataset_idxnm, count=16):
|
39 |
req = {"question": query, "size": count, "vector": True, "similarity": self.similarity_threshold}
|
40 |
+
sres = retrievaler.search(req, search.index_name(dataset_idxnm), self.embd_mdl)
|
41 |
return sres
|
42 |
|
43 |
+
def _get_retrieval(self, qrels, dataset_idxnm):
|
44 |
run = defaultdict(dict)
|
45 |
query_list = list(qrels.keys())
|
46 |
for query in query_list:
|
47 |
+
sres = self._get_benchmarks(query, dataset_idxnm)
|
48 |
sim, _, _ = retrievaler.rerank(sres, query, 1 - self.vector_similarity_weight,
|
49 |
self.vector_similarity_weight)
|
50 |
for index, id in enumerate(sres.ids):
|
|
|
63 |
d["q_%d_vec" % len(v)] = v
|
64 |
return docs
|
65 |
|
66 |
+
def ms_marco_index(self, file_path, index_name):
|
67 |
qrels = defaultdict(dict)
|
68 |
+
texts = defaultdict(dict)
|
69 |
+
docs = []
|
70 |
+
filelist = os.listdir(file_path)
|
71 |
+
for dir in filelist:
|
72 |
+
data = pd.read_parquet(os.path.join(file_path, dir))
|
73 |
+
for i in tqdm(range(len(data)), colour="green", desc="Indexing:" + dir):
|
74 |
+
|
75 |
+
query = data.iloc[i]['query']
|
76 |
+
for rel, text in zip(data.iloc[i]['passages']['is_selected'], data.iloc[i]['passages']['passage_text']):
|
77 |
+
d = {
|
78 |
+
"id": get_uuid()
|
79 |
+
}
|
80 |
+
tokenize(d, text, "english")
|
81 |
+
docs.append(d)
|
82 |
+
texts[d["id"]] = text
|
83 |
+
qrels[query][d["id"]] = int(rel)
|
84 |
+
if len(docs) >= 32:
|
85 |
+
docs = self.embedding(docs)
|
86 |
+
ELASTICSEARCH.bulk(docs, search.index_name(index_name))
|
87 |
+
docs = []
|
88 |
+
|
89 |
+
docs = self.embedding(docs)
|
90 |
+
ELASTICSEARCH.bulk(docs, search.index_name(index_name))
|
91 |
+
return qrels, texts
|
92 |
|
93 |
+
def trivia_qa_index(self, file_path, index_name):
|
94 |
+
qrels = defaultdict(dict)
|
95 |
+
texts = defaultdict(dict)
|
96 |
docs = []
|
97 |
+
filelist = os.listdir(file_path)
|
98 |
+
for dir in filelist:
|
99 |
+
data = pd.read_parquet(os.path.join(file_path, dir))
|
100 |
+
for i in tqdm(range(len(data)), colour="green", desc="Indexing:" + dir):
|
101 |
+
query = data.iloc[i]['question']
|
102 |
+
for rel, text in zip(data.iloc[i]["search_results"]['rank'],
|
103 |
+
data.iloc[i]["search_results"]['search_context']):
|
104 |
+
d = {
|
105 |
+
"id": get_uuid()
|
106 |
+
}
|
107 |
+
tokenize(d, text, "english")
|
108 |
+
docs.append(d)
|
109 |
+
texts[d["id"]] = text
|
110 |
+
qrels[query][d["id"]] = int(rel)
|
111 |
+
if len(docs) >= 32:
|
112 |
+
docs = self.embedding(docs)
|
113 |
+
ELASTICSEARCH.bulk(docs, search.index_name(index_name))
|
114 |
+
docs = []
|
115 |
+
|
116 |
+
docs = self.embedding(docs)
|
117 |
+
ELASTICSEARCH.bulk(docs, search.index_name(index_name))
|
118 |
+
return qrels, texts
|
119 |
+
|
120 |
+
def miracl_index(self, file_path, corpus_path, index_name):
|
121 |
+
|
122 |
+
corpus_total = {}
|
123 |
+
for corpus_file in os.listdir(corpus_path):
|
124 |
+
tmp_data = pd.read_json(os.path.join(corpus_path, corpus_file), lines=True)
|
125 |
+
for index, i in tmp_data.iterrows():
|
126 |
+
corpus_total[i['docid']] = i['text']
|
127 |
+
|
128 |
+
topics_total = {}
|
129 |
+
for topics_file in os.listdir(os.path.join(file_path, 'topics')):
|
130 |
+
if 'test' in topics_file:
|
131 |
+
continue
|
132 |
+
tmp_data = pd.read_csv(os.path.join(file_path, 'topics', topics_file), sep='\t', names=['qid', 'query'])
|
133 |
+
for index, i in tmp_data.iterrows():
|
134 |
+
topics_total[i['qid']] = i['query']
|
135 |
+
|
136 |
+
qrels = defaultdict(dict)
|
137 |
+
texts = defaultdict(dict)
|
138 |
+
docs = []
|
139 |
+
for qrels_file in os.listdir(os.path.join(file_path, 'qrels')):
|
140 |
+
if 'test' in qrels_file:
|
141 |
+
continue
|
142 |
+
|
143 |
+
tmp_data = pd.read_csv(os.path.join(file_path, 'qrels', qrels_file), sep='\t',
|
144 |
+
names=['qid', 'Q0', 'docid', 'relevance'])
|
145 |
+
for i in tqdm(range(len(tmp_data)), colour="green", desc="Indexing:" + qrels_file):
|
146 |
+
query = topics_total[tmp_data.iloc[i]['qid']]
|
147 |
+
text = corpus_total[tmp_data.iloc[i]['docid']]
|
148 |
+
rel = tmp_data.iloc[i]['relevance']
|
149 |
d = {
|
150 |
"id": get_uuid()
|
151 |
}
|
152 |
+
tokenize(d, text, 'english')
|
153 |
docs.append(d)
|
154 |
+
texts[d["id"]] = text
|
155 |
+
qrels[query][d["id"]] = int(rel)
|
156 |
if len(docs) >= 32:
|
157 |
+
docs = self.embedding(docs)
|
158 |
+
ELASTICSEARCH.bulk(docs, search.index_name(index_name))
|
159 |
docs = []
|
|
|
|
|
|
|
160 |
|
161 |
+
docs = self.embedding(docs)
|
162 |
+
ELASTICSEARCH.bulk(docs, search.index_name(index_name))
|
163 |
|
164 |
+
return qrels, texts
|
165 |
|
166 |
+
def save_results(self, qrels, run, texts, dataset, file_path):
|
167 |
+
keep_result = []
|
168 |
+
run_keys = list(run.keys())
|
169 |
+
for run_i in tqdm(range(len(run_keys)), desc="Calculating ndcg@10 for single query"):
|
170 |
+
key = run_keys[run_i]
|
171 |
+
keep_result.append({'query': key, 'qrel': qrels[key], 'run': run[key],
|
172 |
+
'ndcg@10': evaluate({key: qrels[key]}, {key: run[key]}, "ndcg@10")})
|
173 |
+
keep_result = sorted(keep_result, key=lambda kk: kk['ndcg@10'])
|
174 |
+
with open(os.path.join(file_path, dataset + 'result.md'), 'w', encoding='utf-8') as f:
|
175 |
+
f.write('## Score For Every Query\n')
|
176 |
+
for keep_result_i in keep_result:
|
177 |
+
f.write('### query: ' + keep_result_i['query'] + ' ndcg@10:' + str(keep_result_i['ndcg@10']) + '\n')
|
178 |
+
scores = [[i[0], i[1]] for i in keep_result_i['run'].items()]
|
179 |
+
scores = sorted(scores, key=lambda kk: kk[1])
|
180 |
+
for score in scores[:10]:
|
181 |
+
f.write('- text: ' + str(texts[score[0]]) + '\t qrel: ' + str(score[1]) + '\n')
|
182 |
+
print(os.path.join(file_path, dataset + '_result.md'), 'Saved!')
|
183 |
+
|
184 |
+
def __call__(self, dataset, file_path, miracl_corpus=''):
|
185 |
+
if dataset == "ms_marco_v1.1":
|
186 |
+
qrels, texts = self.ms_marco_index(file_path, "benchmark_ms_marco_v1.1")
|
187 |
+
run = self._get_retrieval(qrels, "benchmark_ms_marco_v1.1")
|
188 |
+
print(dataset, evaluate(qrels, run, ["ndcg@10", "map@5", "mrr"]))
|
189 |
+
self.save_results(qrels, run, texts, dataset, file_path)
|
190 |
+
if dataset == "trivia_qa":
|
191 |
+
qrels, texts = self.trivia_qa_index(file_path, "benchmark_trivia_qa")
|
192 |
+
run = self._get_retrieval(qrels, "benchmark_trivia_qa")
|
193 |
+
print(dataset, evaluate(qrels, run, ["ndcg@10", "map@5", "mrr"]))
|
194 |
+
self.save_results(qrels, run, texts, dataset, file_path)
|
195 |
+
if dataset == "miracl":
|
196 |
+
for lang in ['ar', 'bn', 'de', 'en', 'es', 'fa', 'fi', 'fr', 'hi', 'id', 'ja', 'ko', 'ru', 'sw', 'te', 'th',
|
197 |
+
'yo', 'zh']:
|
198 |
+
if not os.path.isdir(os.path.join(file_path, 'miracl-v1.0-' + lang)):
|
199 |
+
print('Directory: ' + os.path.join(file_path, 'miracl-v1.0-' + lang) + ' not found!')
|
200 |
+
continue
|
201 |
+
if not os.path.isdir(os.path.join(file_path, 'miracl-v1.0-' + lang, 'qrels')):
|
202 |
+
print('Directory: ' + os.path.join(file_path, 'miracl-v1.0-' + lang, 'qrels') + 'not found!')
|
203 |
+
continue
|
204 |
+
if not os.path.isdir(os.path.join(file_path, 'miracl-v1.0-' + lang, 'topics')):
|
205 |
+
print('Directory: ' + os.path.join(file_path, 'miracl-v1.0-' + lang, 'topics') + 'not found!')
|
206 |
+
continue
|
207 |
+
if not os.path.isdir(os.path.join(miracl_corpus, 'miracl-corpus-v1.0-' + lang)):
|
208 |
+
print('Directory: ' + os.path.join(miracl_corpus, 'miracl-corpus-v1.0-' + lang) + ' not found!')
|
209 |
+
continue
|
210 |
+
qrels, texts = self.miracl_index(os.path.join(file_path, 'miracl-v1.0-' + lang),
|
211 |
+
os.path.join(miracl_corpus, 'miracl-corpus-v1.0-' + lang),
|
212 |
+
"benchmark_miracl_" + lang)
|
213 |
+
run = self._get_retrieval(qrels, "benchmark_miracl_" + lang)
|
214 |
+
print(dataset, evaluate(qrels, run, ["ndcg@10", "map@5", "mrr"]))
|
215 |
+
self.save_results(qrels, run, texts, dataset, file_path)
|
216 |
|
217 |
+
|
218 |
+
if __name__ == '__main__':
|
219 |
+
print('*****************RAGFlow Benchmark*****************')
|
220 |
+
kb_id = input('Please input kb_id:\n')
|
221 |
+
ex = Benchmark(kb_id)
|
222 |
+
dataset = input(
|
223 |
+
'RAGFlow Benchmark Support:\n\tms_marco_v1.1:<https://huggingface.co/datasets/microsoft/ms_marco>\n\ttrivia_qa:<https://huggingface.co/datasets/mandarjoshi/trivia_qa>\n\tmiracl:<https://huggingface.co/datasets/miracl/miracl>\nPlease input dataset choice:\n')
|
224 |
+
if dataset in ['ms_marco_v1.1', 'trivia_qa']:
|
225 |
+
if dataset == "ms_marco_v1.1":
|
226 |
+
print("Notice: Please provide the ms_marco_v1.1 dataset only. ms_marco_v2.1 is not supported!")
|
227 |
+
dataset_path = input('Please input ' + dataset + ' dataset path:\n')
|
228 |
+
ex(dataset, dataset_path)
|
229 |
+
elif dataset == 'miracl':
|
230 |
+
dataset_path = input('Please input ' + dataset + ' dataset path:\n')
|
231 |
+
corpus_path = input('Please input ' + dataset + '-corpus dataset path:\n')
|
232 |
+
ex(dataset, dataset_path, miracl_corpus=corpus_path)
|
233 |
+
else:
|
234 |
+
print("Dataset: ", dataset, "not supported!")
|