|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import logging |
|
import json |
|
import re |
|
from rag.utils.doc_store_conn import MatchTextExpr |
|
|
|
from rag.nlp import rag_tokenizer, term_weight, synonym |
|
|
|
|
|
class FulltextQueryer: |
|
def __init__(self): |
|
self.tw = term_weight.Dealer() |
|
self.syn = synonym.Dealer() |
|
self.query_fields = [ |
|
"title_tks^10", |
|
"title_sm_tks^5", |
|
"important_kwd^30", |
|
"important_tks^20", |
|
"question_tks^20", |
|
"content_ltks^2", |
|
"content_sm_ltks", |
|
] |
|
|
|
@staticmethod |
|
def subSpecialChar(line): |
|
return re.sub(r"([:\{\}/\[\]\-\*\"\(\)\|\+~\^])", r"\\\1", line).strip() |
|
|
|
@staticmethod |
|
def isChinese(line): |
|
arr = re.split(r"[ \t]+", line) |
|
if len(arr) <= 3: |
|
return True |
|
e = 0 |
|
for t in arr: |
|
if not re.match(r"[a-zA-Z]+$", t): |
|
e += 1 |
|
return e * 1.0 / len(arr) >= 0.7 |
|
|
|
@staticmethod |
|
def rmWWW(txt): |
|
patts = [ |
|
( |
|
r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀|谁|哪位|哪个)是*", |
|
"", |
|
), |
|
(r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "), |
|
( |
|
r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down|of|to|or|and|if) ", |
|
" ") |
|
] |
|
for r, p in patts: |
|
txt = re.sub(r, p, txt, flags=re.IGNORECASE) |
|
return txt |
|
|
|
def question(self, txt, tbl="qa", min_match: float = 0.6): |
|
txt = re.sub( |
|
r"[ :|\r\n\t,,。??/`!!&^%%()\[\]{}<>]+", |
|
" ", |
|
rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(txt.lower())), |
|
).strip() |
|
txt = FulltextQueryer.rmWWW(txt) |
|
|
|
if not self.isChinese(txt): |
|
txt = FulltextQueryer.rmWWW(txt) |
|
tks = rag_tokenizer.tokenize(txt).split() |
|
keywords = [t for t in tks if t] |
|
tks_w = self.tw.weights(tks, preprocess=False) |
|
tks_w = [(re.sub(r"[ \\\"'^]", "", tk), w) for tk, w in tks_w] |
|
tks_w = [(re.sub(r"^[a-z0-9]$", "", tk), w) for tk, w in tks_w if tk] |
|
tks_w = [(re.sub(r"^[\+-]", "", tk), w) for tk, w in tks_w if tk] |
|
tks_w = [(tk.strip(), w) for tk, w in tks_w if tk.strip()] |
|
syns = [] |
|
for tk, w in tks_w: |
|
syn = self.syn.lookup(tk) |
|
syn = rag_tokenizer.tokenize(" ".join(syn)).split() |
|
keywords.extend(syn) |
|
syn = ["\"{}\"^{:.4f}".format(s, w / 4.) for s in syn if s.strip()] |
|
syns.append(" ".join(syn)) |
|
|
|
q = ["({}^{:.4f}".format(tk, w) + " {})".format(syn) for (tk, w), syn in zip(tks_w, syns) if |
|
tk and not re.match(r"[.^+\(\)-]", tk)] |
|
for i in range(1, len(tks_w)): |
|
left, right = tks_w[i - 1][0].strip(), tks_w[i][0].strip() |
|
if not left or not right: |
|
continue |
|
q.append( |
|
'"%s %s"^%.4f' |
|
% ( |
|
tks_w[i - 1][0], |
|
tks_w[i][0], |
|
max(tks_w[i - 1][1], tks_w[i][1]) * 2, |
|
) |
|
) |
|
if not q: |
|
q.append(txt) |
|
query = " ".join(q) |
|
return MatchTextExpr( |
|
self.query_fields, query, 100 |
|
), keywords |
|
|
|
def need_fine_grained_tokenize(tk): |
|
if len(tk) < 3: |
|
return False |
|
if re.match(r"[0-9a-z\.\+#_\*-]+$", tk): |
|
return False |
|
return True |
|
|
|
txt = FulltextQueryer.rmWWW(txt) |
|
qs, keywords = [], [] |
|
for tt in self.tw.split(txt)[:256]: |
|
if not tt: |
|
continue |
|
keywords.append(tt) |
|
twts = self.tw.weights([tt]) |
|
syns = self.syn.lookup(tt) |
|
if syns and len(keywords) < 32: |
|
keywords.extend(syns) |
|
logging.debug(json.dumps(twts, ensure_ascii=False)) |
|
tms = [] |
|
for tk, w in sorted(twts, key=lambda x: x[1] * -1): |
|
sm = ( |
|
rag_tokenizer.fine_grained_tokenize(tk).split() |
|
if need_fine_grained_tokenize(tk) |
|
else [] |
|
) |
|
sm = [ |
|
re.sub( |
|
r"[ ,\./;'\[\]\\`~!@#$%\^&\*\(\)=\+_<>\?:\"\{\}\|,。;‘’【】、!¥……()——《》?:“”-]+", |
|
"", |
|
m, |
|
) |
|
for m in sm |
|
] |
|
sm = [FulltextQueryer.subSpecialChar(m) for m in sm if len(m) > 1] |
|
sm = [m for m in sm if len(m) > 1] |
|
|
|
if len(keywords) < 32: |
|
keywords.append(re.sub(r"[ \\\"']+", "", tk)) |
|
keywords.extend(sm) |
|
|
|
tk_syns = self.syn.lookup(tk) |
|
tk_syns = [FulltextQueryer.subSpecialChar(s) for s in tk_syns] |
|
if len(keywords) < 32: |
|
keywords.extend([s for s in tk_syns if s]) |
|
tk_syns = [rag_tokenizer.fine_grained_tokenize(s) for s in tk_syns if s] |
|
tk_syns = [f"\"{s}\"" if s.find(" ") > 0 else s for s in tk_syns] |
|
|
|
if len(keywords) >= 32: |
|
break |
|
|
|
tk = FulltextQueryer.subSpecialChar(tk) |
|
if tk.find(" ") > 0: |
|
tk = '"%s"' % tk |
|
if tk_syns: |
|
tk = f"({tk} OR (%s)^0.2)" % " ".join(tk_syns) |
|
if sm: |
|
tk = f'{tk} OR "%s" OR ("%s"~2)^0.5' % (" ".join(sm), " ".join(sm)) |
|
if tk.strip(): |
|
tms.append((tk, w)) |
|
|
|
tms = " ".join([f"({t})^{w}" for t, w in tms]) |
|
|
|
if len(twts) > 1: |
|
tms += ' ("%s"~2)^1.5' % rag_tokenizer.tokenize(tt) |
|
|
|
syns = " OR ".join( |
|
[ |
|
'"%s"' |
|
% rag_tokenizer.tokenize(FulltextQueryer.subSpecialChar(s)) |
|
for s in syns |
|
] |
|
) |
|
if syns: |
|
tms = f"({tms})^5 OR ({syns})^0.7" |
|
|
|
qs.append(tms) |
|
|
|
if qs: |
|
query = " OR ".join([f"({t})" for t in qs if t]) |
|
return MatchTextExpr( |
|
self.query_fields, query, 100, {"minimum_should_match": min_match} |
|
), keywords |
|
return None, keywords |
|
|
|
def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3, vtweight=0.7): |
|
from sklearn.metrics.pairwise import cosine_similarity as CosineSimilarity |
|
import numpy as np |
|
|
|
sims = CosineSimilarity([avec], bvecs) |
|
tksim = self.token_similarity(atks, btkss) |
|
return np.array(sims[0]) * vtweight + np.array(tksim) * tkweight, tksim, sims[0] |
|
|
|
def token_similarity(self, atks, btkss): |
|
def toDict(tks): |
|
d = {} |
|
if isinstance(tks, str): |
|
tks = tks.split() |
|
for t, c in self.tw.weights(tks, preprocess=False): |
|
if t not in d: |
|
d[t] = 0 |
|
d[t] += c |
|
return d |
|
|
|
atks = toDict(atks) |
|
btkss = [toDict(tks) for tks in btkss] |
|
return [self.similarity(atks, btks) for btks in btkss] |
|
|
|
def similarity(self, qtwt, dtwt): |
|
if isinstance(dtwt, type("")): |
|
dtwt = {t: w for t, w in self.tw.weights(self.tw.split(dtwt), preprocess=False)} |
|
if isinstance(qtwt, type("")): |
|
qtwt = {t: w for t, w in self.tw.weights(self.tw.split(qtwt), preprocess=False)} |
|
s = 1e-9 |
|
for k, v in qtwt.items(): |
|
if k in dtwt: |
|
s += v |
|
q = 1e-9 |
|
for k, v in qtwt.items(): |
|
q += v |
|
return s / q |
|
|
|
def paragraph(self, content_tks: str, keywords: list = [], keywords_topn=30): |
|
if isinstance(content_tks, str): |
|
content_tks = [c.strip() for c in content_tks.strip() if c.strip()] |
|
tks_w = self.tw.weights(content_tks, preprocess=False) |
|
|
|
keywords = [f'"{k.strip()}"' for k in keywords] |
|
for tk, w in sorted(tks_w, key=lambda x: x[1] * -1)[:keywords_topn]: |
|
tk_syns = self.syn.lookup(tk) |
|
tk_syns = [FulltextQueryer.subSpecialChar(s) for s in tk_syns] |
|
tk_syns = [rag_tokenizer.fine_grained_tokenize(s) for s in tk_syns if s] |
|
tk_syns = [f"\"{s}\"" if s.find(" ") > 0 else s for s in tk_syns] |
|
tk = FulltextQueryer.subSpecialChar(tk) |
|
if tk.find(" ") > 0: |
|
tk = '"%s"' % tk |
|
if tk_syns: |
|
tk = f"({tk} OR (%s)^0.2)" % " ".join(tk_syns) |
|
if tk: |
|
keywords.append(f"{tk}^{w}") |
|
|
|
return MatchTextExpr(self.query_fields, " ".join(keywords), 100, |
|
{"minimum_should_match": min(3, len(keywords) / 10)}) |
|
|