ankush13r's picture
Update rag.py
929fe0b verified
raw
history blame
1.94 kB
import logging
import os
import requests
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
class RAG:
NO_ANSWER_MESSAGE: str = "Ho sento, no he pogut respondre la teva pregunta."
#vectorstore = "index-intfloat_multilingual-e5-small-500-100-CA-ES" # mixed
#vectorstore = "vectorestore" # CA only
vectorstore = "index-BAAI_bge-m3-1500-200-recursive_splitter-CA_ES_UE"
def __init__(self, hf_token, embeddings_model, model_name):
self.model_name = model_name
self.hf_token = hf_token
# load vectore store
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model, model_kwargs={'device': 'cpu'})
self.vectore_store = FAISS.load_local(self.vectorstore, embeddings, allow_dangerous_deserialization=True)#, allow_dangerous_deserialization=True)
logging.info("RAG loaded!")
def get_context(self, instruction, number_of_contexts=2):
documentos = self.vectore_store.similarity_search_with_score(instruction, k=number_of_contexts)
return documentos
def beautiful_context(self, docs):
text_context = ""
full_context = ""
source_context = []
for doc in docs:
text_context += doc[0].page_content
full_context += doc[0].metadata["Títol de la norma"] + "\n\n"
full_context += doc[0].metadata["url"] + "\n\n"
full_context += doc[0].page_content + "\n"
source_context.append(doc[0].metadata["url"])
return text_context, full_context, source_context
def get_context(self, prompt: str, model_parameters: dict) -> str:
try:
docs = self.get_context(prompt, model_parameters["NUM_CHUNKS"])
return self.beautiful_context(docs)
except Exception as err:
print(err)
return None, None, None