|
import os |
|
|
|
import gradio as gr |
|
from dotenv import load_dotenv |
|
from PyPDF2 import PdfReader |
|
from langchain.vectorstores import Chroma |
|
from langchain.vectorstores import FAISS |
|
from langchain.document_loaders import PyPDFLoader |
|
from langchain.text_splitter import CharacterTextSplitter |
|
from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings |
|
from langchain.embeddings import HuggingFaceBgeEmbeddings |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.chains import ConversationalRetrievalChain |
|
from htmlTemplates import css, bot_template, user_template |
|
from langchain.llms import HuggingFaceHub |
|
|
|
|
|
|
|
inference_api_key = os.environ['HF'] |
|
api_hf_embeddings = HuggingFaceInferenceAPIEmbeddings( |
|
api_key=inference_api_key, |
|
model_name="sentence-transformers/all-MiniLM-l6-v2" |
|
) |
|
|
|
|
|
loader = PyPDFLoader("./new_papers/ALiBi.pdf") |
|
documents = loader.load() |
|
print("-----------") |
|
print(documents[0]) |
|
print("-----------") |
|
|
|
|
|
text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=0) |
|
vdocuments = text_splitter.split_documents(documents) |
|
|
|
|
|
|
|
|
|
model = "BAAI/bge-base-en-v1.5" |
|
encode_kwargs = { |
|
"normalize_embeddings": True |
|
} |
|
embeddings = HuggingFaceBgeEmbeddings( |
|
model_name=model, encode_kwargs=encode_kwargs, model_kwargs={"device": "cpu"} |
|
) |
|
api_db = FAISS.from_texts(texts=vdocuments, embedding=embeddings) |
|
api_db.as_retriever.similarity("What is ICD?") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def pdf_retrieval(query): |
|
|
|
response = api_db.similarity_search(query) |
|
return response |
|
|
|
|
|
|
|
api_tool = gr.Interface( |
|
fn=pdf_retrieval, |
|
inputs=[gr.Textbox()], |
|
outputs=gr.Textbox(), |
|
live=True, |
|
title="API PDF Retrieval Tool", |
|
description="This tool indexes PDF documents and retrieves relevant answers based on a given query (HF Inference API Embeddings).", |
|
) |
|
|
|
|
|
api_tool.launch() |