import streamlit as st import openai from langchain_openai import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate import os from dotenv import load_dotenv load_dotenv() ## Langsmith Tracking os.environ['LANGCHAIN_API_KEY'] = os.getenv('LANGCHAIN_API_KEY') os.environ['LANGCHAIN_TRACING_V2'] = 'true' os.environ['LANGCHAIN_PROJECT'] = "Simple Q&A Chatbot With OpenAI" ## Prompt Template prompt = ChatPromptTemplate( [ ("system", "You are a helpful assistant. Please answer the user's questions to the best of your ability."), ("user", "Question:{question}"), ] ) def generate_response(question, api_key, llm, temperature, max_tokens): openai.api_key = api_key llm = ChatOpenAI(model=llm,temperature=temperature,max_tokens=max_tokens) output_parser = StrOutputParser() chain = prompt|llm|output_parser answer = chain.invoke({'question':question}) return answer ## Title of the chat st.title('Enhanced Q&A Chatbot With OpenAI') ## Sidebar for settings st.sidebar.title('Settings') api_key = st.sidebar.text_input("Enter your Open AI API Key:", type="password") ## Dropdown to select various OpenAI models llm = st.sidebar.selectbox('Select OpenAI Model', ['gpt-4o-mini', 'gpt-4-turbo', 'gpt-4o']) ## Adjust response parameters temperature = st.sidebar.slider('Temperature', min_value=0.0, max_value=1.0, value=0.7) max_tokens = st.sidebar.slider('Max Tokens', min_value=50, max_value=3000, value=150) ## Main Interface for user input st.write('Ask me anything!') user_input = st.text_input('Enter your question here:') if user_input: response = generate_response(user_input, api_key, llm, temperature, max_tokens) st.write(response) else: st.write('Please enter a question to get started!')