Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -922,7 +922,7 @@ def read_file_content(file,max_length):
|
|
922 |
|
923 |
# 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
|
924 |
@st.cache_resource
|
925 |
-
def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo
|
926 |
model = model_choice
|
927 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
928 |
conversation.append({'role': 'user', 'content': prompt})
|
@@ -933,9 +933,7 @@ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
|
|
933 |
res_box = st.empty()
|
934 |
collected_chunks = []
|
935 |
collected_messages = []
|
936 |
-
|
937 |
-
st.write('LLM stream ' + 'gpt-3.5-turbo')
|
938 |
-
for chunk in openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=conversation, temperature=0.5, stream=True):
|
939 |
collected_chunks.append(chunk)
|
940 |
chunk_message = chunk['choices'][0]['delta']
|
941 |
collected_messages.append(chunk_message)
|
@@ -953,7 +951,7 @@ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
|
|
953 |
return full_reply_content
|
954 |
|
955 |
@st.cache_resource
|
956 |
-
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo
|
957 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
958 |
conversation.append({'role': 'user', 'content': prompt})
|
959 |
if len(file_content)>0:
|
|
|
922 |
|
923 |
# 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
|
924 |
@st.cache_resource
|
925 |
+
def chat_with_model(prompt, document_section='', model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
|
926 |
model = model_choice
|
927 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
928 |
conversation.append({'role': 'user', 'content': prompt})
|
|
|
933 |
res_box = st.empty()
|
934 |
collected_chunks = []
|
935 |
collected_messages = []
|
936 |
+
for chunk in openai.ChatCompletion.create(model='gpt-4-0125-preview', messages=conversation, temperature=0.5, stream=True): # gpt-4-0125-preview gpt-3.5-turbo
|
|
|
|
|
937 |
collected_chunks.append(chunk)
|
938 |
chunk_message = chunk['choices'][0]['delta']
|
939 |
collected_messages.append(chunk_message)
|
|
|
951 |
return full_reply_content
|
952 |
|
953 |
@st.cache_resource
|
954 |
+
def chat_with_file_contents(prompt, file_content, model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
|
955 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
956 |
conversation.append({'role': 'user', 'content': prompt})
|
957 |
if len(file_content)>0:
|