Spaces:
Sleeping
Sleeping
import streamlit as st | |
import openai | |
import os | |
# Function to get the API key from Streamlit secrets | |
def get_api_key(): | |
try: | |
return st.secrets["API_KEY"] | |
except KeyError: | |
st.error("API_KEY not found in Streamlit secrets. Please add it.") | |
return None | |
# Function to interact with the OpenAI API with streaming | |
def generate_response(messages, model_name, api_key): # Modified to accept 'messages' | |
try: | |
client = openai.OpenAI(api_key=api_key) # Instantiate OpenAI client with api_key | |
stream = client.chat.completions.create( | |
model=model_name, | |
messages=messages, # Use the entire conversation history | |
stream=True, | |
) | |
return stream | |
except openai.APIError as e: | |
# Log the error for debugging, but don't display it in the UI | |
print(f"OpenAI API Error with {model_name}: {e}") | |
return None | |
except openai.RateLimitError as e: | |
# Log the error for debugging, but don't display it in the UI | |
print(f"OpenAI Rate Limit Error with {model_name}: {e}") | |
return None | |
except openai.AuthenticationError as e: | |
# Log the error for debugging, but don't display it in the UI | |
print(f"OpenAI Authentication Error with {model_name}: {e}") | |
return None | |
except Exception as e: | |
# Log the error for debugging, but don't display it in the UI | |
print(f"An unexpected error occurred with {model_name}: {e}") | |
return None | |
# Main Streamlit app | |
def main(): | |
st.title("Chatbot with Model Switching and Streaming") | |
# Initialize conversation history in session state | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display previous messages | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Get user input | |
prompt = st.chat_input("Say something") | |
if prompt: | |
# Add user message to the state | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Define model priority | |
models = ["o1-preview", "o1-preview-2024-09-12","o1-mini","gpt-4o-mini","gpt-3.5-turbo"] # Add more models as needed | |
# Get API key | |
api_key = get_api_key() | |
if not api_key: | |
return | |
full_response = "" | |
# Prepare messages for OpenAI: | |
openai_messages = st.session_state.messages | |
for model in models: | |
stream = generate_response(openai_messages, model, api_key) # Pass the messages | |
if stream: | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
for chunk in stream: | |
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: | |
full_response += chunk.choices[0].delta.content | |
message_placeholder.markdown(full_response + "β") | |
message_placeholder.markdown(full_response) | |
print(f"Using {model} for generation") | |
break # Break after successful response | |
full_response = "" # Reset for the next model attempt | |
if full_response: | |
# Add bot message to state | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
if __name__ == "__main__": | |
main() |