File size: 5,510 Bytes
d235416
 
1446787
 
 
 
d235416
73b85e4
d235416
2c0bb9b
 
d235416
 
2c0bb9b
3048c31
d235416
89e30c4
3048c31
d235416
 
 
27eef03
caa783c
d0261a9
3166b1e
0f29995
 
1446787
 
 
 
14fcab4
 
 
 
 
1b622f8
14fcab4
 
09ae4bf
1b622f8
14fcab4
7d402f3
14fcab4
774738c
77fcc25
14fcab4
 
f93765b
14fcab4
 
2583d67
a158da0
69da8ae
 
276bdd2
8fe83e5
276bdd2
8fe83e5
cc29956
 
 
 
 
1446787
 
 
 
8fe83e5
 
 
 
 
 
1b622f8
f93765b
69da8ae
0f29995
0e1772d
0f29995
 
 
4873942
0f29995
eb1fea9
1446787
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92b6b65
1446787
 
5fa04f7
 
1446787
 
 
 
 
 
3c12ff9
1446787
 
 
 
 
 
 
 
3c12ff9
1446787
 
587d806
1446787
 
 
 
 
 
eb1fea9
 
27eef03
14fcab4
eb1fea9
a158da0
ebcf3dd
774738c
a158da0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import openai
import streamlit as st
import json
import mysql.connector
import random
import time

st.title("LaptopGPT v1.0")

openai.api_base = "https://master-naturally-bluegill.ngrok-free.app/v1"
openai.api_key = "not needed for a local LLM"

if "openai_model" not in st.session_state:
    st.session_state["openai_model"] = "GPT4All Falcon"
# Initialize message history list
if "messages" not in st.session_state:
    st.session_state.messages = [{"role": "assistant", "content": "I'm ready for a prompt!"}]
# display message history in chat window
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])
        
token_number = st.sidebar.slider("Maximum Tokens", min_value=10, max_value=400, value=50, step=1)
temp_number = st.sidebar.slider("Temperature", min_value=0.00, max_value=1.00, value=0.28, step=0.01)
model_selector = st.sidebar.selectbox("Available Models", ("GPT4All Falcon", "Llama-2-7B Chat", "Mini Orca (Small)"))

display_stats = st.sidebar.checkbox("Display stats")

iter = random.randint(0, 999999999)
siter = str(iter)

# User input
if prompt := st.chat_input("Type some text here!"):
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)
    
    with st.chat_message("assistant"):
        message_placeholder = st.empty()
        full_response = ""
    
        response = openai.Completion.create(
            model=model_selector,
            prompt=prompt,
            max_tokens=token_number,
            temperature=temp_number,
            top_p=0.95,
            n=1,
            echo=False,
            stream=False
        )

        
        response_text = response["choices"][0]["text"]
        print(response_text)
        print(response["usage"]["prompt_tokens"])
        print(response["usage"]["completion_tokens"])
        print(response["usage"]["total_tokens"])

        ##actual text from the json
        #response_text = getSentToDatabase()
        ##entire json
        #response = getSentToDatabase()

        print(prompt)
        print(response)

        
        prompt_tokens = response["usage"]["prompt_tokens"]
        completion_tokens = response["usage"]["completion_tokens"]
        total_tokens = response["usage"]["total_tokens"]

        cost = (prompt_tokens * 0.03 + completion_tokens * 0.06) / 1000

            
        st.markdown(response_text)
        st.session_state.messages.append({"role": "assistant", "content": response_text})

        if (display_stats == True):
            st.markdown(''':red[Tokens in prompt: ]''' + " " + str(prompt_tokens))
            st.markdown(''':orange[Tokens in response: ]''' + " " + str(completion_tokens))
            st.markdown(''':green[Total Tokens: ]''' + " " + str(total_tokens))
            st.markdown(''':blue[GPT4 Cost: ]''' + " $" + str(round(cost, 5)))
            st.markdown(''':rainbow[Our Cost: ]''' + " $0.00")

    

        # Extracting fields from the response
        id = response["id"]
        model = response["model"]
        created = response["created"]
        finish_reason = response["choices"][0]["finish_reason"]
        text_content = response["choices"][0]["text"]
        
        # Splitting the 'text' field into different rows based on '\n'
        #text_line = text_content.split('\n')
        
        conn = mysql.connector.connect(
           host='34.70.75.35',
          database='chatbotdb',
          user='remote3',
         password='password'
        
        )
        
        cursor = conn.cursor()

        timestr = time.strftime("%Y%m%d-%H%M%S")
        
        # Extracting data from JSON
        id_value = "web_" + timestr
        finish_reason = response["choices"][0]["finish_reason"]
        index_value = response["choices"][0]["index"]
        #logprobs = "web"
        logprobs = json.dumps(response["choices"][0]["logprobs"]) if response["choices"][0]["logprobs"] is not None else None
        #text_lines = response["choices"][0]["text"].split('\n')
        text_line1 = prompt
        text_line2 = response["choices"][0]["text"]
        
        created = response["created"]
        model = response["model"]
        object = response["object"]
        completion_tokens = response["usage"]["completion_tokens"]
        prompt_tokens = response["usage"]["prompt_tokens"]
        total_tokens = response["usage"]["total_tokens"]

        

        # Insert data into the table
        insert_data_query = """
        INSERT INTO chatbot3 (id, finish_reason, index_value, logprobs, text_line1, text_line2, created, model, object, completion_tokens, prompt_tokens, total_tokens)
        VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
        """
        cursor.execute(insert_data_query, (id_value, finish_reason, index_value, logprobs, text_line1, text_line2, created, model, object, completion_tokens, prompt_tokens, total_tokens))
        
        # Commit the changes and close the connection
        conn.commit()
                
        
        conn.close()

def clear_chat_history():
    st.session_state.messages = [{"role": "assistant", "content": "I'm ready for a prompt!"}]
    #model.current_chat_session = empty_chat_session(model.config["systemPrompt"])
st.sidebar.button('Clear Chat History', on_click=clear_chat_history)

#token_number = st.sidebar.slider("Maximum Tokens", min_value=10, max_value=500, value=75, step=1)
#st.sidebar.write(token_number)