|
|
|
from src.retriever import init_vectorDB_from_doc, retriever |
|
|
|
from transformers import AutoTokenizer, pipeline |
|
from langchain_core.prompts import ChatPromptTemplate |
|
from typing import List,Optional, Tuple |
|
from langchain.docstore.document import Document as LangchainDocument |
|
from langchain_community.vectorstores import FAISS |
|
from langchain.chains.combine_documents import create_stuff_documents_chain |
|
from langchain.chains import create_retrieval_chain |
|
def promt_template(): |
|
prompt_in_chat_format = """Using the information contained in the given context, give a comprehensive answer to the question. |
|
Respond only to the question asked, response should be concise and relevant to the question. |
|
Provide the number of the source document when relevant.If the answer cannot be deduced from the context, do not give an answer. Please answer in french |
|
{context} |
|
""" |
|
|
|
prompt = ChatPromptTemplate.from_messages( |
|
[ |
|
("system",prompt_in_chat_format), |
|
("human", "{input}") |
|
]) |
|
|
|
|
|
return prompt |
|
|
|
def answer_with_rag( |
|
query: str, retriever,llm |
|
|
|
) -> Tuple[str, List[LangchainDocument]]: |
|
|
|
|
|
|
|
|
|
|
|
|
|
RAG_PROMPT_TEMPLATE = promt_template() |
|
document_chain = create_stuff_documents_chain(llm, RAG_PROMPT_TEMPLATE) |
|
retrieval_chain=create_retrieval_chain(retriever,document_chain) |
|
print("=> Final prompt:") |
|
|
|
|
|
print("=> Generating answer...") |
|
response=retrieval_chain.invoke({'input':query}) |
|
|
|
return response['answer'], response["context"] |