Spaces:
Runtime error
Runtime error
File size: 6,781 Bytes
0697d96 9984f89 0697d96 1689c6c 0697d96 fd8f0db 0697d96 d91c8b8 0697d96 c91db31 a480e0c 2b77fb7 0697d96 874f5be |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import gradio as gr
import openai
import requests
import csv
import os
#openai.api_key = os.environ['gptapikey']
prompt_templates = {"ChatGPT 4 API CoverWhale": ""}
def get_empty_state():
return {"total_tokens": 0, "messages": []}
def download_prompt_templates():
url = "https://raw.githubusercontent.com/f/awesome-chatgpt-prompts/main/prompts.csv"
try:
response = requests.get(url)
reader = csv.reader(response.text.splitlines())
next(reader) # skip the header row
for row in reader:
if len(row) >= 2:
act = row[0].strip('"')
prompt = row[1].strip('"')
prompt_templates[act] = prompt
except requests.exceptions.RequestException as e:
print(f"An error occurred while downloading prompt templates: {e}")
return
choices = list(prompt_templates.keys())
choices = choices[:1] + sorted(choices[1:])
return gr.update(value=choices[0], choices=choices)
def on_token_change(user_token):
openai.api_key = user_token
def on_prompt_template_change(prompt_template):
if not isinstance(prompt_template, str): return
return prompt_templates[prompt_template]
def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, context_length, state):
history = state['messages']
if not prompt:
return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']}", state
prompt_template = prompt_templates[prompt_template]
system_prompt = []
if prompt_template:
system_prompt = [{ "role": "system", "content": prompt_template }]
prompt_msg = { "role": "user", "content": prompt }
if not user_token:
history.append(prompt_msg)
history.append({
"role": "system",
"content": "Error: OpenAI API Key is not set."
})
return '', [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: 0", state
try:
completion = openai.ChatCompletion.create(model="gpt-4", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
history.append(prompt_msg)
history.append(completion.choices[0].message.to_dict())
state['total_tokens'] += completion['usage']['total_tokens']
except Exception as e:
history.append(prompt_msg)
history.append({
"role": "system",
"content": f"Error: {e}"
})
total_tokens_used_msg = f"Total tokens used: {state['total_tokens']} ---- Total Query Cost: ${state['total_tokens']*.00005} "
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
#with open('chatlog.txt', 'a') as f:
# f.write(str(chat_messages))
return '', chat_messages, total_tokens_used_msg, state
def clear_conversation():
return gr.update(value=None, visible=True), None, "", get_empty_state()
css = """
#body {background-color: #f5f2ee;}
#col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
#chatbox {min-height: 600px;}
#header {text-align: center; color: #ba55d3;}
#prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}
#total_tokens_str {text-align: right; font-size: 0.8em; color: #666;}
#label {font-size: 0.8em; padding: 0.5em; margin: 0;}
.message { font-size: 1.2em; }
"""
with gr.Blocks(css=css) as demo:
state = gr.State(get_empty_state())
with gr.Column(elem_id="col-container"):
gr.Markdown("""## ChatGPT4 Paid API Portal - CoverWhale
Queries are covered by [OpenAI's API data usage policy](https://openai.com/policies/api-data-usage-policies).
OpenAI will not use API data to train their models and purge queries after 30 days.
[Cover Whale](https://www.coverwhale.com) """,
elem_id="header")
with gr.Row():
with gr.Column():
chatbot = gr.Chatbot(elem_id="chatbox")
input_message = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True).style(container=False)
btn_submit = gr.Button("Submit")
total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
btn_clear_conversation = gr.Button("🔃 Start New Conversation")
# with gr.Column():
#gr.Markdown("Enter your OpenAI API Key. You can get one [here](https://platform.openai.com/account/api-keys).", elem_id="label")
user_token = gr.Textbox(value=os.environ['gptapikey'], visible=False)
#user_token = os.environ['gptapikey']
#print(user_token)
prompt_template = gr.Dropdown(label="Set a custom instruction for the chatbot:", choices=list(prompt_templates.keys()))
prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
with gr.Accordion("Advanced parameters", open=False):
temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Temperature", info="Higher = more creative/chaotic")
max_tokens = gr.Slider(minimum=100, maximum=8101, value=5100, step=1, label="Max tokens per response")
context_length = gr.Slider(minimum=1, maximum=10, value=2, step=1, label="Context length", info="Number of previous messages to send to the chatbot. Be careful with high values, it can blow up the token budget quickly.")
gr.HTML('''<br><br><br><center><img src="https://www.coverwhale.com/wp-content/uploads/2022/06/coverwhale_logo_purple.2560760b-768x136.png"></center>''')
btn_submit.click(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
user_token.change(on_token_change, inputs=[user_token], outputs=[])
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template], queur=False)
demo.queue(concurrency_count=10)
demo.launch(height='1000px')
|