# -*-coding:utf-8-*-
import streamlit as st
# code from https://huggingface.co/kakaobrain/kogpt
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM 


tokenizer = AutoTokenizer.from_pretrained(
  'kakaobrain/kogpt', revision='KoGPT6B-ryan1.5b', cache_dir='./model_dir/',
  bos_token='[BOS]', eos_token='[EOS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]'
)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = AutoModelForCausalLM.from_pretrained(
  'kakaobrain/kogpt', revision='KoGPT6B-ryan1.5b',cache_dir='./model_dir/',
  pad_token_id=tokenizer.eos_token_id,
  torch_dtype=torch.float16, low_cpu_mem_usage=True
).to(device=device, non_blocking=True)
_ = model.eval()

print("Model loading done!")

def gpt(prompt):
  with torch.no_grad():
    tokens = tokenizer.encode(prompt, return_tensors='pt').to(device=device, non_blocking=True)
    gen_tokens = model.generate(tokens, do_sample=True, temperature=0.8, max_length=256)
    generated = tokenizer.batch_decode(gen_tokens)[0]

  return generated
  
    
#prompts
st.title("여러분들의 문장을 완성해줍니다. 🤖")
st.markdown("카카오 gpt 사용합니다.")
st.subheader("몇가지 예제: ")
example_1_str = "오늘의 날씨는 너무 눈부시다. 내일은 "
example_2_str = "우리는 행복을 언제나 갈망하지만 항상 "
example_1 = st.button(example_1_str)
example_2 = st.button(example_2_str)
textbox = st.text_area('오늘은 아름다움을 향해 달리고 ', '',height=100,  max_chars=500 )
button = st.button('생성:')
# output
st.subheader("결과값: ")
if example_1:
    with st.spinner('In progress.......'):
        output_text = gpt(example_1_str)
    st.markdown("\n"+output_text)
if example_2:
    with st.spinner('In progress.......'):
        output_text = gpt(example_2_str)
    st.markdown("\n"+output_text)
if button:
    with st.spinner('In progress.......'):
        if textbox:
            output_text = gpt(textbox)
        else:
            output_text = " "
    st.markdown("\n" + output_text)