from datasets import load_dataset
from datasets import Dataset
from sentence_transformers import SentenceTransformer
import faiss
import time
import json
#import torch
import pandas as pd
from llama_cpp import Llama
#from langchain_community.llms import LlamaCpp
from threading import Thread
from huggingface_hub import Repository, upload_file
import os


HF_TOKEN = os.getenv('HF_Token')
#Log_Path="./Logfolder"
logfile = 'DiabetesChatLog.txt'
historylog = [{
        "Prompt": '',
        "Output": ''
}]

data = load_dataset("Namitg02/Test", split='train', streaming=False)
#Returns a list of dictionaries, each representing a row in the dataset.
length = len(data)

embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
embedding_dim = embedding_model.get_sentence_embedding_dimension()
# Returns dimensions of embedidng


index =  faiss.IndexFlatL2(embedding_dim)
data.add_faiss_index("embeddings", custom_index=index) 
# adds an index column for the embeddings

#question = "How can I reverse Diabetes?"

SYS_PROMPT = """You are an assistant for answering questions.
You are given the extracted parts of documents and a question. Provide a conversational answer.
If you don't know the answer, just say "I do not know." Don't make up an answer. Don't repeat the SYS_PROMPT."""
# Provides context of how to answer the question

#llm_model = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF", tinyllama-1.1b-chat-v1.0.Q5_K_M.gguf
# TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF , TinyLlama/TinyLlama-1.1B-Chat-v0.6, andrijdavid/TinyLlama-1.1B-Chat-v1.0-GGUF"

model = Llama(
    model_path="./llama-2-7b-chat.Q4_K_M.gguf",
 #   chat_format="llama-2",
    n_gpu_layers = 0,
    temperature=0.75,
    n_ctx = 4096,
    max_tokens=500,
    top_p=0.95 #,
 #   eos_tokens=terminators
    # callback_manager=callback_manager,
    # verbose=True,  # Verbose is required to pass to the callback manager
    )
#initiate model and tokenizer
                        
def search(query: str, k: int = 2 ):
    """a function that embeds a new query and returns the most probable results"""
    embedded_query = embedding_model.encode(query) # create embedding of a new query
    scores, retrieved_examples = data.get_nearest_examples( # retrieve results
        "embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
        k=k # get only top k results
    )
    return scores, retrieved_examples
# returns scores (List[float]): the retrieval scores from either FAISS (IndexFlatL2 by default) and examples (dict) format
# called by talk function that passes prompt

def format_prompt(prompt,retrieved_documents,k,history,memory_limit=3):
    """using the retrieved documents we will prompt the model to generate our responses"""   
    PROMPT = f"Question:{prompt}\nContext:"
    for idx in range(k) :
        PROMPT+= f"{retrieved_documents['0'][idx]}\n"
    print("historyinfo")
    print(history)
    if len(history) == 0:
        return PROMPT
    
    if len(history) > memory_limit:
        history = history[-memory_limit:]
    
    print("checkwohist")
#    PROMPT = PROMPT + f"{history[0][0]} [/INST] {history[0][1]} </s>"
#    print("checkwthhist")
#    print(PROMPT)
    # Handle conversation history
    for user_message, bot_message in history[0:]:
        PROMPT += f"<s>[INST] {user_message} [/INST] {bot_message} </s>"
    print("checkwthhist2")
    print(PROMPT)
    return PROMPT


# Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string that are retreived

def talk(prompt, history):
    k = 2 # number of retrieved documents
    scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
    print(retrieved_documents.keys())
#    print("check4")
    formatted_prompt = format_prompt(prompt,retrieved_documents,k,history,memory_limit=3) # create a new prompt using the retrieved documents
    print("check5")
    pd.options.display.max_colwidth = 4000
#    print(retrieved_documents['0'])
#    print(formatted_prompt)
  #  formatted_prompt_with_history = add_history(formatted_prompt, history)

   # formatted_prompt_with_history = formatted_prompt_with_history[:600] # to avoid memory issue
#    print(formatted_prompt_with_history)
    messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]  
    print(messages)
    # binding the system context and new prompt for LLM
    # the chat template structure should be based on text generation model format
    print("check 6")

# indicates the end of a sequence
#    stream = model.create_chat_completion(messages = messages, max_tokens=1000, stop=["</s>"], stream=False)
    stream = model.create_chat_completion(messages = messages, stream=False)
#    print(f"{stream}")
    print("check 7")
    print(stream['choices'][0]['message']['content'])
    return(stream['choices'][0]['message']['content'])
#    text = ""
#    for output in stream:
#       text += output['choices'][0]['message']['content']  
#       print(f"{output}")
#       print("check3H")
#       print(text)
#       yield text 
 
    
    
    # calling the model to generate response based on message/ input
    # do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
    # temperature controls randomness. more renadomness with higher temperature
    # only the tokens comprising the top_p probability mass are considered for responses
    # This output is a data structure containing all the information returned by generate(), but that can also be used as tuple or dictionary.
 

    
TITLE = "AI Copilot for Diabetes Patients"

DESCRIPTION = "I provide answers to concerns related to Diabetes"

import gradio as gr
# Design chatbot
demo = gr.ChatInterface(
    fn=talk,
    chatbot=gr.Chatbot(
        show_label=True,
        show_share_button=True,
        show_copy_button=True,
        likeable=True,
        layout="bubble",
        bubble_full_width=False,
    ),
    theme="Soft",
    examples=[["what is Diabetes?"]],
    title=TITLE,
    description=DESCRIPTION,
)
# launch chatbot and calls the talk function which in turn calls other functions
print("check14")
#print(historylog)
#memory_panda = pd.DataFrame(historylog)
#Logfile = Dataset.from_pandas(memory_panda)
#Logfile.push_to_hub("Namitg02/Logfile",token = HF_TOKEN)
demo.launch()