import streamlit as st
from langchain.prompts import PromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
import os, spaces

os.environ['GOOGLE_API_KEY'] = os.getenv('geminiapi')

# Function for LLM response
def llm_response(user_text, number_of_words, blog_audience):
    # define llm
    llm = ChatGoogleGenerativeAI(model="gemini-pro")
    # define prompt template
    ptemplate = ''' 
    You are an Expert Blog Writer. For the topic {user_text},
    write a Blog in {number_of_words} words for an audience of {blog_audience}.
    '''
    prompt = PromptTemplate(template=ptemplate,input_variables=['user_text','number_of_words','blog_audience'])
    final_prompt = prompt.format(user_text=user_text, number_of_words=number_of_words, blog_audience=blog_audience)
    # invoke llm to get result
    result = llm.invoke(final_prompt)
    # print result on screen
    st.subheader("Result:")
    st.write(result.content)

# define page config
st.set_page_config(
    page_title="Blog Generation",
    page_icon="🧊",
    layout="centered",
    initial_sidebar_state="collapsed",
)

st.header("Blog Generation App🧊")
user_text = st.text_input("Enter title for blog")
col1,col2 = st.columns([6,6])

with col1:
    number_of_words = st.text_input("Number of words in Blog")

with col2:
    blog_audience = st.selectbox("Select target audience",
                                  ['Data Scientists', 'Researchers', 'Common People'],
                                  index=2)

submit_btn = st.button("Submit")

if submit_btn:
    llm_response(user_text, number_of_words, blog_audience) # function call for printing results