Spaces:
Sleeping
Sleeping
File size: 11,164 Bytes
3ec9224 5be8df6 209f685 f195484 d8161c5 f195484 209f685 1ef8d7c 209f685 1a34146 1ef8d7c d5bf870 209f685 1a34146 57ffc82 1a34146 5be8df6 1a34146 5be8df6 209f685 5be8df6 1a34146 209f685 1a34146 5be8df6 209f685 1a34146 1ef8d7c 4091a1a 1ef8d7c b426474 5be8df6 209f685 1a34146 5be8df6 4091a1a 1a34146 209f685 1a34146 5be8df6 209f685 1a34146 5be8df6 dfa0ad2 5be8df6 88fa380 1a34146 40e59f0 209f685 eb94a8f 1a34146 209f685 1a34146 5be8df6 1a34146 5be8df6 209f685 9733941 209f685 5be8df6 00bd139 5be8df6 209f685 5be8df6 209f685 5be8df6 1ef8d7c 4091a1a 5be8df6 209f685 5be8df6 209f685 5be8df6 209f685 1ef8d7c 5be8df6 1ef8d7c 5be8df6 209f685 5be8df6 209f685 1a34146 00bd139 5be8df6 209f685 5be8df6 1a34146 5be8df6 1a34146 209f685 00bd139 5be8df6 209f685 40e59f0 209f685 5be8df6 9733941 209f685 9733941 4091a1a 209f685 9733941 209f685 00bd139 1a34146 209f685 5be8df6 1a34146 209f685 5be8df6 209f685 5be8df6 3ca2785 00bd139 1ef8d7c 1a34146 40e59f0 1a34146 209f685 1a34146 a25f0eb 1a34146 878c0a1 1a34146 14155e5 1a34146 209f685 1a34146 323ccbe 5be8df6 209f685 5be8df6 dfa0ad2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 |
import gradio as gr
import os
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.llms import HuggingFacePipeline
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import ConversationalRetrievalChain
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain_community.llms import HuggingFaceHub
from pathlib import Path
import chromadb
from transformers import AutoTokenizer
import transformers
import torch
import tqdm
import accelerate
default_persist_directory = '/ChromaDB'
llm_name0 = "mistralai/Mixtral-8x7B-Instruct-v0.1"
list_llm = [llm_name0]
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
# Load PDF document and create doc splits
def load_doc(list_file_path, chunk_size, chunk_overlap):
# Processing for one document only
# loader = PyPDFLoader(file_path)
# pages = loader.load()
loaders = [PyPDFLoader(x) for x in list_file_path]
pages = []
for loader in loaders:
pages.extend(loader.load())
# text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = chunk_size,
chunk_overlap = chunk_overlap)
doc_splits = text_splitter.split_documents(pages)
return doc_splits
# Create vector database
def create_db(splits, collection_name):
embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")
new_client = chromadb.EphemeralClient()
vectordb = Chroma.from_documents(documents=splits, embedding=embedding, persist_directory="./chroma_db", client=new_client, collection_name=collection_name)
return vectordb
# Load vector database
def load_db():
embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")
vectordb = Chroma(
# persist_directory=default_persist_directory,
embedding_function=embedding)
return vectordb
# Initialize langchain LLM chain
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
progress(0.1, desc="Initializing HF tokenizer...")
# HuggingFaceHub uses HF inference endpoints
progress(0.5, desc="Initializing HF Hub...")
if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
llm = HuggingFaceHub(
repo_id=llm_model,
model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
)
progress(0.75, desc="Defining buffer memory...")
memory = ConversationBufferMemory(
memory_key="chat_history",
output_key='answer',
return_messages=True
)
# retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
retriever=vector_db.as_retriever()
progress(0.8, desc="Defining retrieval chain...")
qa_chain = ConversationalRetrievalChain.from_llm(
llm,
retriever=retriever,
chain_type="stuff",
memory=memory,
# combine_docs_chain_kwargs={"prompt": your_prompt})
return_source_documents=True,
# return_generated_question=True,
# verbose=True,
)
progress(0.9, desc="Done!")
return qa_chain
# Initialize database
def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
# Create list of documents (when valid)
#file_path = file_obj.name
list_file_path = [x.name for x in list_file_obj if x is not None]
collection_name = Path(list_file_path[0]).stem
print('list_file_path: ', list_file_path)
print('Collection name: ', collection_name)
progress(0.25, desc="Loading document...")
# Load document and create splits
doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
# Create or load Vector database
progress(0.5, desc="Generating vector database...")
# global vector_db
vector_db = create_db(doc_splits, collection_name)
progress(0.9, desc="Done!")
return vector_db, collection_name, "Complete!"
def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
# print("llm_option",llm_option)
llm_name = list_llm[llm_option]
print("llm_name: ",llm_name)
qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
return qa_chain, "Complete!"
def format_chat_history(message, chat_history):
formatted_chat_history = []
for user_message, bot_message in chat_history:
formatted_chat_history.append(f"User: {user_message}")
formatted_chat_history.append(f"Assistant: {bot_message}")
return formatted_chat_history
def conversation(qa_chain, message, history):
formatted_chat_history = format_chat_history(message, history)
#print("formatted_chat_history",formatted_chat_history)
formatted_chat_history = ""
# Generate response using QA chain
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
response_answer = response["answer"]
response_sources = response["source_documents"]
response_source1 = response_sources[0].page_content.strip()
response_source2 = response_sources[1].page_content.strip()
# Langchain sources are zero-based
response_source1_page = response_sources[0].metadata["page"] + 1
response_source2_page = response_sources[1].metadata["page"] + 1
print ('Response: ', response)
# Append user message and response to chat history
new_history = history + [(message, response_answer)]
# return gr.update(value=""), new_history, response_sources[0], response_sources[1]
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page
def upload_file(file_obj):
list_file_path = []
for idx, file in enumerate(file_obj):
file_path = file_obj.name
list_file_path.append(file_path)
# print(file_path)
# initialize_database(file_path, progress)
return list_file_path
def demo():
with gr.Blocks(theme="base") as demo:
vector_db = gr.State()
qa_chain = gr.State()
collection_name = gr.State()
gr.Markdown(
"""<center><h2>ChatPDF</center></h2>""")
with gr.Tab("Step 1 - Document pre-processing"):
with gr.Row():
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
# upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
with gr.Row():
db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
with gr.Accordion("Advanced options - Document text splitter", open=False):
with gr.Row():
slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
with gr.Row():
slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
with gr.Row():
db_progress = gr.Textbox(label="Vector database initialization", value="None")
with gr.Row():
db_btn = gr.Button("Generate vector database...")
with gr.Tab("Step 2 - QA chain initialization"):
with gr.Row():
llm_btn = gr.Radio(list_llm_simple, \
label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
with gr.Accordion("Advanced options - LLM model", open=False):
with gr.Row():
slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
with gr.Row():
slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
with gr.Row():
slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
with gr.Row():
llm_progress = gr.Textbox(value="None",label="QA chain initialization")
with gr.Row():
qachain_btn = gr.Button("Initialize question-answering chain...")
with gr.Tab("Step 3 - Conversation with chatbot"):
chatbot = gr.Chatbot(height=300)
with gr.Accordion("Advanced - Document references", open=False):
with gr.Row():
doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
source1_page = gr.Number(label="Page", scale=1)
with gr.Row():
doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
source2_page = gr.Number(label="Page", scale=1)
with gr.Row():
msg = gr.Textbox(placeholder="Type message", container=True)
with gr.Row():
submit_btn = gr.Button("Submit")
clear_btn = gr.ClearButton([msg, chatbot])
# Preprocessing events
#upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
db_btn.click(initialize_database, \
inputs=[document, slider_chunk_size, slider_chunk_overlap], \
outputs=[vector_db, collection_name, db_progress])
qachain_btn.click(initialize_LLM, \
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0], \
inputs=None, \
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
queue=False)
# Chatbot events
msg.submit(conversation, \
inputs=[qa_chain, msg, chatbot], \
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
queue=False)
submit_btn.click(conversation, \
inputs=[qa_chain, msg, chatbot], \
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
queue=False)
clear_btn.click(lambda:[None,"",0,"",0], \
inputs=None, \
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
queue=False)
demo.queue().launch(debug=True)
if __name__ == "__main__":
demo()
|