import spaces
import gradio as gr
from tagger.utils import gradio_copy_text, COPY_ACTION_JS
from tagger.tagger import convert_danbooru_to_e621_prompt, insert_recom_prompt
from genimage import generate_image
from llmdolphin import (get_llm_formats, get_dolphin_model_format,
    get_dolphin_models, get_dolphin_model_info, select_dolphin_model, get_dolphin_loras, select_dolphin_lora,
    add_dolphin_loras, select_dolphin_format, add_dolphin_models, get_dolphin_sysprompt,
    get_dolphin_sysprompt_mode, select_dolphin_sysprompt, get_dolphin_languages,
    select_dolphin_language, dolphin_respond, dolphin_parse, respond_playground)

css = """
.title { text-align: center; }
"""

with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_cache=(60, 3600)) as app:
    with gr.Tab("Prompt Translator"):
        with gr.Column():
            gr.Markdown("""# Natural Text to SD Prompt Translator With LLM alpha
                            Text in natural language (English, Japanese, ...) => Prompt
                        """, elem_classes="title")
            state = gr.State(value={})
            with gr.Group():
                chatbot = gr.Chatbot(show_copy_button=True, show_share_button=False, layout="bubble", container=True)
                with gr.Row(equal_height=True):
                    chat_msg = gr.Textbox(show_label=False, placeholder="Input text in English, Japanese, or any other languages and press Enter or click Send.", scale=4)
                    chat_submit = gr.Button("Send", scale=1, variant="primary")
                    chat_clear = gr.Button("Clear", scale=1, variant="secondary")
                with gr.Accordion("Additional inputs", open=False):
                    chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0]), label="Message format")
                    chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
                    with gr.Row():
                        chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
                        chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
                        chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
                        chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
                        chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
                    with gr.Accordion("Loras", open=True, visible=False):
                        chat_lora = gr.Dropdown(choices=get_dolphin_loras(), value=get_dolphin_loras()[0], allow_custom_value=True, label="Lora")
                        chat_lora_scale = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.01, label="Lora scale")
                        chat_add_lora_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/ggml-org/LoRA-Qwen2.5-14B-Instruct-abliterated-v2-F16-GGUF/blob/main/LoRA-Qwen2.5-14B-Instruct-abliterated-v2-f16.gguf", lines=1)
                        chat_add_lora_submit = gr.Button("Update lists of loras")
                with gr.Accordion("Add models", open=False):
                    chat_add_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/mradermacher/MagnumChronos-i1-GGUF/blob/main/MagnumChronos.i1-Q4_K_M.gguf", lines=1)
                    chat_add_format = gr.Dropdown(choices=get_llm_formats(), value=get_llm_formats()[0], label="Message format")
                    chat_add_submit = gr.Button("Update lists of models")
                with gr.Accordion("Modes", open=True):
                    chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0], allow_custom_value=True, label="Model")
                    chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0]), label="Model info")
                    with gr.Row():
                        chat_mode = gr.Dropdown(choices=get_dolphin_sysprompt_mode(), value=get_dolphin_sysprompt_mode()[0], allow_custom_value=False, label="Mode")
                        chat_lang = gr.Dropdown(choices=get_dolphin_languages(), value="English", allow_custom_value=True, label="Output language")
            with gr.Row():
                with gr.Group():
                    output_text = gr.TextArea(label="Output tags", interactive=False, show_copy_button=True)
                    copy_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
                with gr.Group():
                    output_text_pony = gr.TextArea(label="Output tags (Pony e621 style)", interactive=False, show_copy_button=True)
                    copy_btn_pony = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
                with gr.Accordion(label="Advanced options", open=False, visible=False):
                    tag_type = gr.Radio(label="Output tag conversion", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="e621", visible=False)
                    dummy_np = gr.Textbox(label="Negative prompt", value="", visible=False)
                    dummy_np_pony = gr.Textbox(label="Negative prompt", value="", visible=False)
                    recom_animagine = gr.Textbox(label="Animagine reccomended prompt", value="Animagine", visible=False)
                    recom_pony = gr.Textbox(label="Pony reccomended prompt", value="Pony", visible=False)
            generate_image_btn = gr.Button(value="GENERATE IMAGE", size="lg", variant="primary")
            with gr.Row():
                result_image = gr.Gallery(label="Generated images", columns=1, object_fit="contain", container=True, preview=True, height=512,
                                          show_label=False, show_share_button=False, show_download_button=True, interactive=False, visible=True, format="png")
    with gr.Tab("GGUF-Playground"):
        gr.Markdown("""# Chat with lots of Models and LLMs using llama.cpp
                    This tab is copy of [CaioXapelaum/GGUF-Playground](https://huggingface.co/spaces/CaioXapelaum/GGUF-Playground).<br>
                    Don't worry about the strange appearance, **it's just a bug of Gradio!**""", elem_classes="title")
        pg_chatbot = gr.Chatbot(scale=1, show_copy_button=True, show_share_button=False)
        with gr.Accordion("Additional inputs", open=False):
            pg_chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0], allow_custom_value=True, label="Model")
            pg_chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0]), label="Model info")
            pg_chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0]), label="Message format")
            pg_chat_sysmsg = gr.Textbox(value="You are a helpful assistant.", label="System message")
            with gr.Row():
                pg_chat_tokens = gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens")
                pg_chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
                pg_chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
                pg_chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
                pg_chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
                with gr.Accordion("Loras", open=True, visible=False):
                    pg_chat_lora = gr.Dropdown(choices=get_dolphin_loras(), value=get_dolphin_loras()[0], allow_custom_value=True, label="Lora")
                    pg_chat_lora_scale = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.01, label="Lora scale")
                    pg_chat_add_lora_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/ggml-org/LoRA-Qwen2.5-14B-Instruct-abliterated-v2-F16-GGUF/blob/main/LoRA-Qwen2.5-14B-Instruct-abliterated-v2-f16.gguf", lines=1)
                    pg_chat_add_lora_submit = gr.Button("Update lists of loras")
            with gr.Accordion("Add models", open=True):
                pg_chat_add_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/mradermacher/MagnumChronos-i1-GGUF/blob/main/MagnumChronos.i1-Q4_K_M.gguf", lines=1)
                pg_chat_add_format = gr.Dropdown(choices=get_llm_formats(), value=get_llm_formats()[0], label="Message format")
                pg_chat_add_submit = gr.Button("Update lists of models")
        gr.ChatInterface(
            fn=respond_playground,
            #title="Chat with lots of Models and LLMs using llama.cpp",
            #retry_btn="Retry",
            #undo_btn="Undo",
            #clear_btn="Clear",
            submit_btn="Send",
            #additional_inputs_accordion='gr.Accordion(label="Additional Inputs", open=False)',
            additional_inputs=[pg_chat_model, pg_chat_sysmsg, pg_chat_tokens, pg_chat_temperature, pg_chat_topp, pg_chat_topk, pg_chat_rp,
                               pg_chat_lora, pg_chat_lora_scale, state],
            chatbot=pg_chatbot
        )
    gr.LoginButton()
    gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")

    gr.on(
        triggers=[chat_msg.submit, chat_submit.click],
        fn=dolphin_respond,
        inputs=[chat_msg, chatbot, chat_model, chat_sysmsg, chat_tokens, chat_temperature, chat_topp, chat_topk, chat_rp, chat_lora, chat_lora_scale, state],
        outputs=[chatbot],
        queue=True,
        show_progress="full",
        trigger_mode="once",
    ).success(dolphin_parse, [chatbot, state], [output_text, copy_btn, copy_btn_pony]
    ).success(convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False,
    ).success(insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False,
    ).success(insert_recom_prompt, [output_text_pony, dummy_np_pony, recom_pony], [output_text_pony, dummy_np_pony], queue=False)
    chat_clear.click(lambda: None, None, chatbot, queue=False)
    chat_model.change(select_dolphin_model, [chat_model, state], [chat_model, chat_format, chat_model_info, state], queue=True, show_progress="full")\
    .success(lambda: None, None, chatbot, queue=False)
    chat_format.change(select_dolphin_format, [chat_format, state], [chat_format, state], queue=False)\
    .success(lambda: None, None, chatbot, queue=False)
    chat_lora.change(select_dolphin_lora, [chat_lora, state], [chat_lora, state], queue=True, show_progress="full")\
    .success(lambda: None, None, chatbot, queue=False)
    chat_mode.change(select_dolphin_sysprompt, [chat_mode, state], [chat_sysmsg, state], queue=False)
    chat_lang.change(select_dolphin_language, [chat_lang, state], [chat_sysmsg, state], queue=False)
    gr.on(
        triggers=[chat_add_text.submit, chat_add_submit.click],
        fn=add_dolphin_models,
        inputs=[chat_add_text, chat_add_format],
        outputs=[chat_model],
        queue=True,
        trigger_mode="once",
    )
    gr.on(
        triggers=[chat_add_lora_text.submit, chat_add_lora_submit.click],
        fn=add_dolphin_loras,
        inputs=[chat_add_lora_text],
        outputs=[chat_lora],
        queue=True,
        trigger_mode="once",
    )

    copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS)
    copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS)

    generate_image_btn.click(generate_image, [output_text, dummy_np], [result_image], show_progress="full")

    pg_chat_model.change(select_dolphin_model, [pg_chat_model, state], [pg_chat_model, pg_chat_format, pg_chat_model_info, state], queue=True, show_progress="full")
    pg_chat_format.change(select_dolphin_format, [pg_chat_format, state], [pg_chat_format, state], queue=False)
    pg_chat_lora.change(select_dolphin_lora, [pg_chat_lora, state], [pg_chat_lora, state], queue=True, show_progress="full")
    gr.on(
        triggers=[pg_chat_add_text.submit, pg_chat_add_submit.click],
        fn=add_dolphin_models,
        inputs=[pg_chat_add_text, pg_chat_add_format],
        outputs=[pg_chat_model],
        queue=True,
        trigger_mode="once",
    )
    gr.on(
        triggers=[pg_chat_add_lora_text.submit, pg_chat_add_lora_submit.click],
        fn=add_dolphin_loras,
        inputs=[pg_chat_add_lora_text],
        outputs=[pg_chat_lora],
        queue=True,
        trigger_mode="once",
    )


if __name__ == "__main__":
    app.queue()
    app.launch(ssr_mode=False)