File size: 1,586 Bytes
cae3cb9 6663cb2 018fb30 f7493dd 037c950 1f5e9cb 037c950 008f20f 037c950 018fb30 38e2fac cbed288 c7297e1 f8472cb c7297e1 18cb8f3 32b2f7d e181ae7 f8472cb 037c950 e181ae7 018fb30 68b31c9 018fb30 68b31c9 037c950 68b31c9 018fb30 037c950 018fb30 e181ae7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import os
#!pip install -q gradio langchain pypdf chromadb
import gradio as gr
from langchain.vectorstores import Chroma
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
# Use Hugging Face Inference API embeddings
inference_api_key = os.environ['HF']
api_hf_embeddings = HuggingFaceInferenceAPIEmbeddings(
api_key=inference_api_key,
model_name="sentence-transformers/all-MiniLM-l6-v2"
)
# Load and process the PDF files
loader = PyPDFLoader("./new_papers/ALiBi.pdf")
documents = loader.load()
print("-----------")
print(documents[0])
print("-----------")
# Extract the embedding arrays from the PDF documents
embeddings = []
for doc in documents:
embeddings.extend(doc['embeddings'])
# Create Chroma vector store for API embeddings
api_db = Chroma.from_texts(embeddings, api_hf_embeddings, collection_name="api-collection")
# Define the PDF retrieval function
def pdf_retrieval(query):
# Run the query through the retriever
response = api_db.similarity_search(query)
return response
# Create Gradio interface for the API retriever
# Create Gradio interface for the API retriever
api_tool = gr.Interface(
fn=pdf_retrieval,
inputs=[gr.Textbox()],
outputs=gr.Textbox(),
live=True,
title="API PDF Retrieval Tool",
description="This tool indexes PDF documents and retrieves relevant answers based on a given query (HF Inference API Embeddings).",
)
# Launch the Gradio interface
#api_tool.launch()
|