Sample / app.py
adinarayana's picture
Update app.py
da76e2e verified
raw
history blame
1.96 kB
# QandA Chatbot - attempt 1
from langchain.llms import OpenAI
# from dotenv import load_dotenv
import os
# take environment variables from .env
# load_dotenv()
import streamlit as st
# load OpenAI model and get a response
def get_openai_response(question):
llm = OpenAI(
openai_api_key=os.getenv("OPEN_API_KEY"),
model_name="gpt-3.5-turbo-instruct",
temperature=0.6,
)
response = llm(question)
return response
# modify with chain and other stuff
## streamlit app
st.set_page_config(page_title="QandA Demo")
st.header("QandA - Langchain Application")
st.write("UPDATE: This app uses the 'gpt-3.5-turbo-instruct' model through Langchain to answer queries")
input = st.text_input("Enter your query: ", key=input)
response = get_openai_response(input)
submit = st.button("Generate")
if submit:
st.subheader("The response is")
st.write(response)
# pip install langchain
# from langchain.llms import OpenAI
# # from dotenv import load_dotenv
# import os
# # take environment variables from .env
# # load_dotenv()
# import streamlit as st
# # load OpenAI model and get a response
# def get_openai_response(question):
# llm = OpenAI(
# openai_api_key=os.getenv("OPEN_API_KEY"),
# model_name="gpt-3.5-turbo-instruct",
# temperature=0.6,
# )
# response = llm(question)
# return response
# # modify with chain and other stuff
# ## streamlit app
# st.set_page_config(page_title="Trail Demo")
# st.header("Sample")
# st.write("UPDATE: This app uses the 'gpt-3.5-turbo-instruct' model through Langchain")
# # input = st.text_input("Enter your query: ", key=input)
# uploaded_file = st.file_uploader('Choose your .pdf file', type="pdf")
# if uploaded_file is not None:
# df = extract_data(uploaded_file)
# response = get_openai_response(df)
# submit = st.button("Generate")
# if submit:
# st.subheader("The response is")
# st.write(response)