Spaces:
Sleeping
Sleeping
import streamlit as st | |
import google.generativeai as genai | |
from datetime import datetime | |
from prompts import USER_SUPPORT_SYSTEM_PROMPT, MI_SYSTEM_PROMPT | |
def show_moti_chat(): | |
st.title("Moti Chat - AI Mental Health Assistant") | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [ | |
{"role": "system", "content": USER_SUPPORT_SYSTEM_PROMPT} | |
] | |
# Chat interface | |
st.write("This is a safe space to discuss your thoughts and feelings. I'm here to listen and support you.") | |
# Display chat history | |
for message in st.session_state.messages[1:]: # Skip system message | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Chat input | |
if prompt := st.chat_input("Share what's on your mind..."): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Generate AI response | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
try: | |
# Prepare chat context | |
chat_context = "\n".join([ | |
msg["content"] for msg in st.session_state.messages[-5:] # Last 5 messages for context | |
]) | |
# Generate response using Gemini | |
response = generate_response(chat_context, prompt) | |
# Display response | |
message_placeholder.markdown(response) | |
# Add AI response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
except Exception as e: | |
error_message = f"I apologize, but I'm having trouble responding right now. Error: {str(e)}" | |
message_placeholder.error(error_message) | |
st.session_state.messages.append({"role": "assistant", "content": error_message}) | |
def generate_response(context, prompt): | |
try: | |
# Configure Gemini model | |
model = genai.GenerativeModel('gemini-pro') | |
# Prepare the prompt with context and MI principles | |
full_prompt = f""" | |
Context: You are a mental health support AI using Motivational Interviewing principles. | |
Previous conversation context: {context} | |
User's message: {prompt} | |
Please provide an empathetic, supportive response that: | |
1. Shows empathy and understanding | |
2. Uses reflective listening | |
3. Encourages self-reflection | |
4. Supports autonomy | |
5. Avoids direct advice unless specifically requested | |
6. Maintains professional boundaries | |
Response: | |
""" | |
# Generate response | |
response = model.generate_content(full_prompt) | |
# Process and return response | |
return response.text | |
except Exception as e: | |
st.error(f"Error generating response: {str(e)}") | |
return "I apologize, but I'm having trouble responding right now. Please try again." | |
def save_chat_history(): | |
"""Save chat history to file""" | |
if len(st.session_state.messages) > 1: # If there are messages besides the system prompt | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
filename = f"chat_history_{timestamp}.txt" | |
try: | |
with open(filename, "w", encoding="utf-8") as f: | |
for message in st.session_state.messages[1:]: # Skip system message | |
f.write(f"{message['role']}: {message['content']}\n\n") | |
return filename | |
except Exception as e: | |
st.error(f"Error saving chat history: {str(e)}") | |
return None | |
def load_chat_history(file): | |
"""Load chat history from file""" | |
try: | |
messages = [{"role": "system", "content": USER_SUPPORT_SYSTEM_PROMPT}] | |
with open(file, "r", encoding="utf-8") as f: | |
content = f.read().split("\n\n") | |
for message in content: | |
if message.strip(): | |
role, content = message.split(": ", 1) | |
messages.append({"role": role, "content": content}) | |
return messages | |
except Exception as e: | |
st.error(f"Error loading chat history: {str(e)}") | |
return None | |
# Add chat controls | |
def show_chat_controls(): | |
st.sidebar.subheader("Chat Controls") | |
# Save chat history | |
if st.sidebar.button("Save Chat History"): | |
filename = save_chat_history() | |
if filename: | |
st.sidebar.success(f"Chat history saved to {filename}") | |
# Load chat history | |
uploaded_file = st.sidebar.file_uploader("Load Chat History", type="txt") | |
if uploaded_file is not None: | |
messages = load_chat_history(uploaded_file) | |
if messages: | |
st.session_state.messages = messages | |
st.sidebar.success("Chat history loaded successfully") | |
# Clear chat | |
if st.sidebar.button("Clear Chat"): | |
st.session_state.messages = [ | |
{"role": "system", "content": USER_SUPPORT_SYSTEM_PROMPT} | |
] | |
st.sidebar.success("Chat cleared") | |
# Add emergency resources | |
def show_emergency_resources(): | |
with st.sidebar.expander("Emergency Resources"): | |
st.markdown(""" | |
If you're experiencing a mental health emergency: | |
🚨 **Emergency Services**: 911 | |
🆘 **Crisis Hotlines**: | |
- National Suicide Prevention Lifeline: 988 | |
- Crisis Text Line: Text HOME to 741741 | |
🏥 **Other Resources**: | |
- SAMHSA's National Helpline: 1-800-662-4357 | |
- National Alliance on Mental Illness: 1-800-950-6264 | |
Remember: If you're having thoughts of self-harm or suicide, | |
please seek immediate professional help. | |
""") | |
# Add feedback mechanism | |
def show_feedback(): | |
with st.sidebar.expander("Provide Feedback"): | |
feedback = st.text_area("How can we improve?") | |
if st.button("Submit Feedback"): | |
# Add feedback handling logic here | |
st.success("Thank you for your feedback!") | |
def show_moti_chat_main(): | |
show_moti_chat() | |
show_chat_controls() | |
show_emergency_resources() | |
show_feedback() |