import os

import openai
import streamlit as st
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI as l_OpenAI
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForCausalLM

from helpers.foundation_models import *
import requests


# API Keys
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)


# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []


# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])


# Sidebar
st.sidebar.markdown(
    r"""
    # 🌟 Streamlit + Hugging Face Demo 🤖

    ## Introduction 📖

    This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit. 
    """
)


option = st.sidebar.selectbox(
    "Which task do you want to do?",
    ("Sentiment Analysis", "Medical Summarization", "Llama2 on YSA", "Llama2 on BRK Letters", "ChatGPT", "ChatGPT (with Google)"),
)


clear_button = st.sidebar.button("Clear Conversation", key="clear")
st.sidebar.write("---")
st.sidebar.markdown("Yiqiao Yin: [Site](https://www.y-yin.io/) | [LinkedIn](https://www.linkedin.com/in/yiqiaoyin/)")


st.sidebar.markdown(
    r"""
    To fine-tune LLM such as Llama2 on custom data, please use the following tutorials as resources. For the options above such as `Llama2 on YSA` and `Llama2 on BRK Letters`, they are developed based on the content of the following videos.

    ## Video Series Overview

    ### Video 1: Process Your Own PDF Doc into LLM Finetune-Ready Format

    Learn how to transform PDF documents into AI model fine-tuning ready formats. This video will take you through the steps to make your PDF data AI-ready.

    - [Watch Video](https://youtu.be/hr2kSC1evQM)
    - [Tutorial Notebook](https://github.com/yiqiao-yin/WYNAssociates/blob/main/docs/ref-deeplearning/ex24f%20-%20process%20custom%20data%20from%20pdf%20and%20push%20to%20huggingface%20to%20prep%20for%20fine%20tune%20task%20of%20llama%202%20using%20lora.ipynb)

    ### Video 2: Fine-tune Llama2-7b LLM Using Custom Data

    Dive into customizing the Llama-2 model with your unique dataset. This installment turns your data into a bespoke AI model.

    - [Watch Video](https://youtu.be/tDkY2gpvylE)
    - [Guide to Fine-Tuning](https://github.com/yiqiao-yin/WYNAssociates/blob/main/docs/ref-deeplearning/ex24f%20-%20fine%20tune%20Llama%202%20using%20ysa%20data%20in%20colab.ipynb)

    ### Video 3: Deploy Inference Endpoint on HuggingFace!

    Discover how to make your AI model accessible to the world by deploying it on HuggingFace. This video turns your project into a global phenomenon.

    - [Watch Video](https://youtu.be/382yy-mCeCA)
    - [Deployment Guide](https://github.com/yiqiao-yin/WYNAssociates/blob/main/docs/ref-deeplearning/ex24f%20-%20inference%20endpoint%20interaction%20from%20huggingface.ipynb)
    - [HuggingFace Space](https://huggingface.co/spaces/eagle0504/streamlit-demo)

    """
)


# Reset everything
if clear_button:
    st.session_state.messages = []


# React to user input
if prompt := st.chat_input("What is up?"):

    # Display user message in chat message container
    st.chat_message("user").markdown(prompt)

    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    # Execute options
    with st.spinner("Wait for it..."):
        if option == "Sentiment Analysis":
            pipe_sentiment_analysis = pipeline("sentiment-analysis")
            if prompt:
                out = pipe_sentiment_analysis(prompt)
                final_response = f"""
                    Prompt: {prompt}
                    Sentiment: {out[0]["label"]}
                    Score: {out[0]["score"]}
                """
        elif option == "Medical Summarization":
            pipe_summarization = pipeline(
                "summarization", model="Falconsai/medical_summarization"
            )
            if prompt:
                out = pipe_summarization(prompt)
                final_response = out[0]["summary_text"]
        elif option == "Llama2 on YSA":
            if prompt:
                try:
                    out = llama2_7b_ysa(prompt)
                    engineered_prompt = f"""
                        The user asked the question: {prompt}
    
                        We have found relevant content: {out}
    
                        Answer the user question based on the above content in paragraphs.
                    """
                    final_response = call_chatgpt(query=engineered_prompt)
                except:
                    final_response = "Sorry, the inference endpoint is temporarily down. 😔"
        elif option == "Llama2 on BRK Letters":
            if prompt:
                try:
                    out = llama2_7b_brk_letters(prompt)
                    engineered_prompt = f"""
                        The user asked the question: {prompt}
    
                        We have found relevant content: {out}
    
                        Answer the user question based on the above content in paragraphs.
                    """
                    final_response = call_chatgpt(query=engineered_prompt)
                except:
                    final_response = "Sorry, the inference endpoint is temporarily down. 😔"
        elif option == "ChatGPT":
            if prompt:
                out = call_chatgpt(query=prompt)
                final_response = out
        elif option == "ChatGPT (with Google)":
            if prompt:
                ans_langchain = call_langchain(prompt)
                prompt = f"""
                    Based on the internet search results: {ans_langchain};

                    Answer the user question: {prompt}
                """
                out = call_chatgpt(query=prompt)
                final_response = out
        else:
            final_response = ""

    response = f"{final_response}"
    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        st.markdown(response)
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response})