Spaces:
Running
Running
Commit
·
57c2a16
1
Parent(s):
7e727b6
try and except for solving 'while a run run is active'
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import os
|
2 |
import re
|
3 |
import streamlit as st
|
|
|
4 |
from dotenv import load_dotenv
|
5 |
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
|
6 |
|
@@ -20,33 +21,57 @@ def remove_citation(text: str) -> str:
|
|
20 |
pattern = r"【\d+†\w+】"
|
21 |
return re.sub(pattern, "📚", text)
|
22 |
|
23 |
-
# Initialize session state for messages
|
24 |
if "messages" not in st.session_state:
|
25 |
st.session_state["messages"] = []
|
26 |
if "thread_id" not in st.session_state:
|
27 |
st.session_state["thread_id"] = None
|
28 |
-
if "is_processing" not in st.session_state:
|
29 |
-
st.session_state["is_processing"] = False
|
30 |
|
31 |
st.title("Solution Specifier A")
|
32 |
|
33 |
def predict(user_input: str) -> str:
|
34 |
"""
|
35 |
This function calls our OpenAIAssistantRunnable to get a response.
|
36 |
-
|
37 |
-
|
38 |
"""
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
-
# Display any existing messages
|
50 |
for msg in st.session_state["messages"]:
|
51 |
if msg["role"] == "user":
|
52 |
with st.chat_message("user"):
|
@@ -56,35 +81,23 @@ for msg in st.session_state["messages"]:
|
|
56 |
st.write(msg["content"])
|
57 |
|
58 |
# Create the chat input widget at the bottom of the page
|
59 |
-
|
60 |
-
user_input = st.chat_input(
|
61 |
-
"Type your message here...",
|
62 |
-
disabled=st.session_state["is_processing"]
|
63 |
-
)
|
64 |
|
65 |
# When the user hits ENTER on st.chat_input
|
66 |
-
if user_input
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
st.
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
st.
|
83 |
-
|
84 |
-
# Display the assistant's reply
|
85 |
-
with st.chat_message("assistant"):
|
86 |
-
st.write(response_text)
|
87 |
-
|
88 |
-
finally:
|
89 |
-
# Reset processing state when done
|
90 |
-
st.session_state["is_processing"] = False
|
|
|
1 |
import os
|
2 |
import re
|
3 |
import streamlit as st
|
4 |
+
import openai
|
5 |
from dotenv import load_dotenv
|
6 |
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
|
7 |
|
|
|
21 |
pattern = r"【\d+†\w+】"
|
22 |
return re.sub(pattern, "📚", text)
|
23 |
|
24 |
+
# Initialize session state for messages and thread_id
|
25 |
if "messages" not in st.session_state:
|
26 |
st.session_state["messages"] = []
|
27 |
if "thread_id" not in st.session_state:
|
28 |
st.session_state["thread_id"] = None
|
|
|
|
|
29 |
|
30 |
st.title("Solution Specifier A")
|
31 |
|
32 |
def predict(user_input: str) -> str:
|
33 |
"""
|
34 |
This function calls our OpenAIAssistantRunnable to get a response.
|
35 |
+
We either create a new thread (no thread_id yet) or continue the existing thread.
|
36 |
+
If the server complains that a run is still active, we reset the thread_id and retry once.
|
37 |
"""
|
38 |
+
try:
|
39 |
+
if st.session_state["thread_id"] is None:
|
40 |
+
# Start a new thread
|
41 |
+
response = extractor_llm.invoke({"content": user_input})
|
42 |
+
st.session_state["thread_id"] = response.thread_id
|
43 |
+
else:
|
44 |
+
# Continue the existing thread
|
45 |
+
response = extractor_llm.invoke(
|
46 |
+
{"content": user_input, "thread_id": st.session_state["thread_id"]}
|
47 |
+
)
|
48 |
+
output = response.return_values["output"]
|
49 |
+
return remove_citation(output)
|
50 |
+
|
51 |
+
except openai.error.BadRequestError as e:
|
52 |
+
# If the error says a run is still active, reset to a new thread and re-invoke once
|
53 |
+
err_msg = str(e)
|
54 |
+
if "while a run" in err_msg:
|
55 |
+
st.session_state["thread_id"] = None
|
56 |
+
# Re-invoke once to get a fresh thread
|
57 |
+
try:
|
58 |
+
response = extractor_llm.invoke({"content": user_input})
|
59 |
+
st.session_state["thread_id"] = response.thread_id
|
60 |
+
output = response.return_values["output"]
|
61 |
+
return remove_citation(output)
|
62 |
+
except Exception as e2:
|
63 |
+
st.error(f"Error after resetting thread: {e2}")
|
64 |
+
return ""
|
65 |
+
else:
|
66 |
+
# Some other bad request
|
67 |
+
st.error(err_msg)
|
68 |
+
return ""
|
69 |
+
except Exception as e:
|
70 |
+
# Catch-all for any other error
|
71 |
+
st.error(str(e))
|
72 |
+
return ""
|
73 |
|
74 |
+
# Display any existing messages
|
75 |
for msg in st.session_state["messages"]:
|
76 |
if msg["role"] == "user":
|
77 |
with st.chat_message("user"):
|
|
|
81 |
st.write(msg["content"])
|
82 |
|
83 |
# Create the chat input widget at the bottom of the page
|
84 |
+
user_input = st.chat_input("Type your message here...")
|
|
|
|
|
|
|
|
|
85 |
|
86 |
# When the user hits ENTER on st.chat_input
|
87 |
+
if user_input:
|
88 |
+
# Add the user message to session state
|
89 |
+
st.session_state["messages"].append({"role": "user", "content": user_input})
|
90 |
+
|
91 |
+
# Display the user's message
|
92 |
+
with st.chat_message("user"):
|
93 |
+
st.write(user_input)
|
94 |
+
|
95 |
+
# Get the assistant's response
|
96 |
+
response_text = predict(user_input)
|
97 |
+
|
98 |
+
# Add the assistant response to session state
|
99 |
+
st.session_state["messages"].append({"role": "assistant", "content": response_text})
|
100 |
+
|
101 |
+
# Display the assistant's reply
|
102 |
+
with st.chat_message("assistant"):
|
103 |
+
st.write(response_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|