Spaces:
Sleeping
Sleeping
import openai | |
import streamlit as st | |
import json | |
import mysql.connector | |
import random | |
import time | |
st.title("LaptopGPT v1.0") | |
openai.api_base = "https://master-naturally-bluegill.ngrok-free.app/v1" | |
openai.api_key = "not needed for a local LLM" | |
if "openai_model" not in st.session_state: | |
st.session_state["openai_model"] = "GPT4All Falcon" | |
# Initialize message history list | |
if "messages" not in st.session_state: | |
st.session_state.messages = [{"role": "assistant", "content": "I'm ready for a prompt!"}] | |
# display message history in chat window | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
token_number = st.sidebar.slider("Maximum Tokens", min_value=10, max_value=400, value=50, step=1) | |
temp_number = st.sidebar.slider("Temperature", min_value=0.00, max_value=1.00, value=0.28, step=0.01) | |
model_selector = st.sidebar.selectbox("Available Models", ("GPT4All Falcon", "Llama-2-7B Chat", "Mini Orca (Small)")) | |
display_stats = st.sidebar.checkbox("Display stats") | |
iter = random.randint(0, 999999999) | |
siter = str(iter) | |
# User input | |
if prompt := st.chat_input("Type some text here!"): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
response = openai.Completion.create( | |
model=model_selector, | |
prompt=prompt, | |
max_tokens=token_number, | |
temperature=temp_number, | |
top_p=0.95, | |
n=1, | |
echo=False, | |
stream=False | |
) | |
response_text = response["choices"][0]["text"] | |
print(response_text) | |
print(response["usage"]["prompt_tokens"]) | |
print(response["usage"]["completion_tokens"]) | |
print(response["usage"]["total_tokens"]) | |
##actual text from the json | |
#response_text = getSentToDatabase() | |
##entire json | |
#response = getSentToDatabase() | |
print(prompt) | |
print(response) | |
prompt_tokens = response["usage"]["prompt_tokens"] | |
completion_tokens = response["usage"]["completion_tokens"] | |
total_tokens = response["usage"]["total_tokens"] | |
cost = (prompt_tokens * 0.03 + completion_tokens * 0.06) / 1000 | |
st.markdown(response_text) | |
st.session_state.messages.append({"role": "assistant", "content": response_text}) | |
if (display_stats == True): | |
st.markdown(''':red[Tokens in prompt: ]''' + " " + str(prompt_tokens)) | |
st.markdown(''':orange[Tokens in response: ]''' + " " + str(completion_tokens)) | |
st.markdown(''':green[Total Tokens: ]''' + " " + str(total_tokens)) | |
st.markdown(''':blue[GPT4 Cost: ]''' + " $" + str(round(cost, 5))) | |
st.markdown(''':rainbow[Our Cost: ]''' + " $0.00") | |
# Extracting fields from the response | |
id = response["id"] | |
model = response["model"] | |
created = response["created"] | |
finish_reason = response["choices"][0]["finish_reason"] | |
text_content = response["choices"][0]["text"] | |
# Splitting the 'text' field into different rows based on '\n' | |
#text_line = text_content.split('\n') | |
conn = mysql.connector.connect( | |
host='34.70.75.35', | |
database='chatbotdb', | |
user='remote3', | |
password='password' | |
) | |
cursor = conn.cursor() | |
timestr = time.strftime("%Y%m%d-%H%M%S") | |
# Extracting data from JSON | |
id_value = "web_" + timestr | |
finish_reason = response["choices"][0]["finish_reason"] | |
index_value = response["choices"][0]["index"] | |
#logprobs = "web" | |
logprobs = json.dumps(response["choices"][0]["logprobs"]) if response["choices"][0]["logprobs"] is not None else None | |
#text_lines = response["choices"][0]["text"].split('\n') | |
text_line1 = prompt | |
text_line2 = response["choices"][0]["text"] | |
created = response["created"] | |
model = response["model"] | |
object = response["object"] | |
completion_tokens = response["usage"]["completion_tokens"] | |
prompt_tokens = response["usage"]["prompt_tokens"] | |
total_tokens = response["usage"]["total_tokens"] | |
# Insert data into the table | |
insert_data_query = """ | |
INSERT INTO chatbot3 (id, finish_reason, index_value, logprobs, text_line1, text_line2, created, model, object, completion_tokens, prompt_tokens, total_tokens) | |
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s); | |
""" | |
cursor.execute(insert_data_query, (id_value, finish_reason, index_value, logprobs, text_line1, text_line2, created, model, object, completion_tokens, prompt_tokens, total_tokens)) | |
# Commit the changes and close the connection | |
conn.commit() | |
conn.close() | |
def clear_chat_history(): | |
st.session_state.messages = [{"role": "assistant", "content": "I'm ready for a prompt!"}] | |
#model.current_chat_session = empty_chat_session(model.config["systemPrompt"]) | |
st.sidebar.button('Clear Chat History', on_click=clear_chat_history) | |
#token_number = st.sidebar.slider("Maximum Tokens", min_value=10, max_value=500, value=75, step=1) | |
#st.sidebar.write(token_number) | |