import os
import gradio as gr
from langchain_huggingface import HuggingFaceEndpoint,HuggingFaceEmbeddings,ChatHuggingFace
from langchain_core.load import dumpd, dumps, load, loads
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.callbacks import StreamingStdOutCallbackHandler

from langchain_chroma import Chroma
from langchain_core.documents import Document  
from langchain_text_splitters import CharacterTextSplitter
from pypdf import PdfReader
import random

token=""
#repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
emb = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceEmbeddings(model_name=emb)
db = Chroma(persist_directory="./chroma_langchain_db")
db.persist()
# Load the document, split it into chunks, embed each chunk and load it into the vector store.
#raw_documents = TextLoader('state_of_the_union.txt').load()
def embed_fn(inp):
    print("Try Embeddings")
    print(inp)
    print("End Embeddings")
    #for eaa in inp:
    text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=10)
    #documents = text_splitter.split_documents([eaa])
    documents = text_splitter.split_text(inp)
    print("documents")
    print(documents)
    print("end documents")
    out_emb= hf.embed_documents(documents)
    #chain = history[:-1]
    string_representation = dumps(out_emb, pretty=True)
    print(string_representation)
    #db = Chroma(collection_name="test1", embedding_function=HuggingFaceEmbeddings())
    db.from_texts(documents,HuggingFaceEmbeddings(model_name=emb))
    #from_documents(documents, HuggingFaceEmbeddings)
    print("DB")
    print(db)
    print("end DB")
    #return db
def proc_doc(doc_in):
    for doc in doc_in:
        if doc.endswith(".txt"):
            yield [["",f"Loading Document: {doc}"]]
            outp = read_txt(doc)
            embed_fn(outp)
            yield [["","Loaded"]]
        elif doc.endswith(".pdf"):
            yield [["",f"Loading Document: {doc}"]]
            outp = read_pdf(doc)
            embed_fn(outp)
            yield [["","Loaded"]]


def read_txt(txt_path):
    text=""
    with open(txt_path,"r") as f:
        text = f.read()
    f.close()
    return text

def read_pdf(pdf_path):
    text=""
    reader = PdfReader(f'{pdf_path}')
    number_of_pages = len(reader.pages)
    for i in range(number_of_pages):
        page = reader.pages[i]
        text = f'{text}\n{page.extract_text()}'
    return text
def run_llm(input_text,history):
    MAX_TOKENS=20000
    qur= hf.embed_query(input_text)
    docs = db.similarity_search_by_vector(qur, k=3)
    
    '''if len(docs) >2:

    doc_list = str(docs).split(" ")
    if len(doc_list) > MAX_TOKENS:
        doc_cnt = int(len(doc_list) / MAX_TOKENS)
        print(doc_cnt)
        for ea in doc_cnt:'''


    print(docs)

    callbacks = [StreamingStdOutCallbackHandler()]  
    llm = HuggingFaceEndpoint(  
    endpoint_url=repo_id,  
    max_new_tokens=2056,  
    seed=random.randint(1,99999999999),
    top_k=10,  
    top_p=0.95,  
    typical_p=0.95,  
    temperature=0.01,  
    repetition_penalty=1.03,  
    #callbacks=callbacks,  
    streaming=True,  
    huggingfacehub_api_token=token,
    )  


    '''llm=HuggingFaceEndpoint(
        endpoint_url=repo_id,
        streaming=True,
        max_new_tokens=2400,
        huggingfacehub_api_token=token)'''
    print(input_text)
    print(history)
    out=""
    #prompt = ChatPromptTemplate.from_messages(
    sys_prompt = f"Use this data to help answer users questions: {str(docs)}"
    user_prompt = f"{input_text}"
    prompt=[
        {"role": "system", "content": f"[INST] Use this data to help answer users questions: {str(docs)} [/INST]"},
        {"role": "user", "content": f"[INST]{input_text}[/INST]"},
    ]
    #chat = ChatHuggingFace(llm=llm, verbose=True)
    messages = [  
    ("system", f"[INST] Use this data to help answer users questions: {str(docs)} [/INST]"),  
    ("user", f"[INST]{input_text}[/INST]"),  
    ]  
    
    #yield(llm.invoke(prompt))
    
    t=llm.invoke(prompt)
    for chunk in t:
        out+=chunk
        yield out
      
    
css="""
#component-0 {
    height:400px;
}
"""

with gr.Blocks(css=css) as app:
    data=gr.State()
    with gr.Column():
        #input_text = gr.Textbox(label="You: ")
        chat = gr.ChatInterface(
            fn=run_llm,
            type="tuples",
            concurrency_limit=20,
            
        )
    with gr.Row():
        msg=gr.HTML()
        file_in=gr.Files(file_count="multiple")
    file_in.change(proc_doc, file_in, msg)
        #btn = gr.Button("Generate")
app.queue().launch()