arpita-23 commited on
Commit
3053308
·
verified ·
1 Parent(s): 060f2bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -97
app.py CHANGED
@@ -1,98 +1,174 @@
1
- import streamlit as st
2
- import google.generativeai as genai
3
  import os
4
- from PIL import Image
5
- import numpy as np
6
- from deepface import DeepFace # Replacing FER with DeepFace
7
- from dotenv import load_dotenv
8
-
9
- # Print out successful imports
10
- print("DeepFace is installed and ready to use!")
11
- print("Google Generative AI module is successfully imported!")
12
-
13
- # Load API keys and environment variables
14
- load_dotenv()
15
- genai.configure(api_key="AIzaSyAEzZLb7R1CNTWwFXoUsWNrV47X9JgGu1o")
16
-
17
- # gemini function for general content generation
18
- def get_gemini_response(input):
19
- try:
20
- model = genai.GenerativeModel('gemini-pro')
21
- response = model.generate_content(input)
22
- return response
23
- except Exception as e:
24
- # Handle quota exceeded error
25
- if "RATE_LIMIT_EXCEEDED" in str(e):
26
- st.error("Quota exceeded for content generation. Please try again later.")
27
- return None
28
- else:
29
- st.error(f"Error: {e}")
30
- return None
31
-
32
- # Function to analyze image for depression and emotion detection using DeepFace
33
- def detect_emotions(image):
34
- try:
35
- # Convert PIL Image to NumPy array
36
- image_array = np.array(image)
37
- # Use DeepFace to analyze emotions
38
- analysis = DeepFace.analyze(image_array, actions=['emotion'], enforce_detection=False)
39
- # Return the dominant emotion and its score
40
- return analysis[0]['dominant_emotion'], analysis[0]['emotion']
41
- except Exception as e:
42
- st.error(f"Error during emotion detection: {e}")
43
- return None, None
44
-
45
- # Function to analyze detected emotions with LLM
46
- def analyze_emotions_with_llm(emotion, emotions):
47
- emotion_analysis = f"{emotion}: {emotions[emotion]:.2f}"
48
- analysis_prompt = f"""
49
- ### As a mental health and emotional well-being expert, analyze the following detected emotions.
50
- ### Detected Emotions:
51
- {emotion_analysis}
52
- ### Analysis Output:
53
- 1. Identify any potential signs of depression based on the detected emotions.
54
- 2. Explain the reasoning behind your identification.
55
- 3. Provide recommendations for addressing any identified issues.
56
- """
57
- response = get_gemini_response(analysis_prompt)
58
- return response
59
-
60
- # Function to parse and display response content
61
- def display_response_content(response):
62
- st.subheader("Response Output")
63
- if response and hasattr(response, 'candidates'):
64
- response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else ""
65
- sections = response_content.split('###')
66
- for section in sections:
67
- if section.strip():
68
- section_lines = section.split('\n')
69
- section_title = section_lines[0].strip()
70
- section_body = '\n'.join(line.strip() for line in section_lines[1:] if line.strip())
71
- if section_title:
72
- st.markdown(f"**{section_title}**")
73
- if section_body:
74
- st.write(section_body)
75
- else:
76
- st.write("No response received from the model or quota exceeded.")
77
-
78
- # Streamlit App
79
- st.title("AI-Powered Depression and Emotion Detection System")
80
- st.text("Use the AI system for detecting depression and emotions from images.")
81
-
82
- # Tabs for different functionalities (only image analysis in this version)
83
- with st.container():
84
- st.header("Image Analysis")
85
- uploaded_file = st.file_uploader("Upload an image for analysis", type=["jpg", "jpeg", "png"], help="Please upload an image file.")
86
- submit_image = st.button('Analyze Image')
87
-
88
- if submit_image:
89
- if uploaded_file is not None:
90
- image = Image.open(uploaded_file) # Open the uploaded image
91
- emotion, emotions = detect_emotions(image) # Detect emotions using DeepFace
92
- if emotion: # If emotions are detected
93
- response = analyze_emotions_with_llm(emotion, emotions) # Analyze detected emotions with LLM
94
- display_response_content(response) # Display the analysis response
95
- else:
96
- st.write("No emotions detected in the image.") # If no emotion is detected
97
- else:
98
- st.write("Please upload an image first.") # Prompt for image upload if none is uploaded
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import json
3
+ import sqlite3
4
+ from datetime import datetime
5
+ import streamlit as st
6
+ from langchain_huggingface import HuggingFaceEmbeddings
7
+ from langchain_chroma import Chroma
8
+ from langchain_groq import ChatGroq
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain.chains import ConversationalRetrievalChain
11
+ from deep_translator import GoogleTranslator
12
+
13
+ # Directory paths and configurations
14
+ working_dir = os.path.dirname(os.path.abspath(__file__))
15
+ config_data = json.load(open(f"{working_dir}/config.json"))
16
+ GROQ_API_KEY = config_data["GROQ_API_KEY"]
17
+ os.environ["GROQ_API_KEY"] = GROQ_API_KEY
18
+
19
+ # Set up the database with check_same_thread=False
20
+ def setup_db():
21
+ conn = sqlite3.connect("chat_history.db", check_same_thread=False) # Ensure thread-safe connection
22
+ cursor = conn.cursor()
23
+ cursor.execute("""
24
+ CREATE TABLE IF NOT EXISTS chat_histories (
25
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
26
+ username TEXT,
27
+ timestamp TEXT,
28
+ day TEXT,
29
+ user_message TEXT,
30
+ assistant_response TEXT
31
+ )
32
+ """)
33
+ conn.commit()
34
+ return conn # Return the connection
35
+
36
+ # Function to save chat history to SQLite
37
+ def save_chat_history(conn, username, timestamp, day, user_message, assistant_response):
38
+ cursor = conn.cursor()
39
+ cursor.execute("""
40
+ INSERT INTO chat_histories (username, timestamp, day, user_message, assistant_response)
41
+ VALUES (?, ?, ?, ?, ?)
42
+ """, (username, timestamp, day, user_message, assistant_response))
43
+ conn.commit()
44
+
45
+ # Function to set up vectorstore for embeddings
46
+ def setup_vectorstore():
47
+ embeddings = HuggingFaceEmbeddings()
48
+ vectorstore = Chroma(persist_directory="vector_db_dir", embedding_function=embeddings)
49
+ return vectorstore
50
+
51
+ # Function to set up the chatbot chain
52
+ def chat_chain(vectorstore):
53
+ llm = ChatGroq(model="llama-3.1-70b-versatile", temperature=0)
54
+ retriever = vectorstore.as_retriever()
55
+ memory = ConversationBufferMemory(
56
+ llm=llm,
57
+ output_key="answer",
58
+ memory_key="chat_history",
59
+ return_messages=True
60
+ )
61
+ chain = ConversationalRetrievalChain.from_llm(
62
+ llm=llm,
63
+ retriever=retriever,
64
+ chain_type="stuff",
65
+ memory=memory,
66
+ verbose=True,
67
+ return_source_documents=True
68
+ )
69
+ return chain
70
+
71
+ # Streamlit UI setup
72
+ st.set_page_config(page_title="Bhagavad Gita Query Assistant", page_icon="📚", layout="centered")
73
+ st.title("📚 Bhagavad Gita & Yoga Sutras Query Assistant")
74
+ st.subheader("Ask questions and explore timeless wisdom!")
75
+
76
+ # Initialize session state
77
+ if "conn" not in st.session_state:
78
+ st.session_state.conn = setup_db()
79
+
80
+ if "username" not in st.session_state:
81
+ username = st.text_input("Enter your name to proceed:")
82
+ if username:
83
+ with st.spinner("Loading chatbot interface... Please wait."):
84
+ st.session_state.username = username
85
+ st.session_state.chat_history = [] # Initialize empty chat history in memory
86
+ st.session_state.vectorstore = setup_vectorstore()
87
+ st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore)
88
+ st.success(f"Welcome, {username}! The chatbot interface is ready.")
89
+ else:
90
+ username = st.session_state.username
91
+
92
+ # Language options (30 Indian languages)
93
+ languages = [
94
+ "English", "Hindi", "Bengali", "Telugu", "Marathi", "Tamil", "Urdu", "Gujarati", "Malayalam", "Kannada",
95
+ "Punjabi", "Odia", "Maithili", "Sanskrit", "Santali", "Kashmiri", "Nepali", "Dogri", "Manipuri", "Bodo",
96
+ "Sindhi", "Assamese", "Konkani", "Maithili", "Awadhi", "Rajasthani", "Haryanvi", "Bihari", "Chhattisgarhi", "Magahi"
97
+ ]
98
+
99
+ # Main interface
100
+ if "username" in st.session_state:
101
+ st.subheader(f"Hello {username}, start your query below!")
102
+
103
+ # Language selection for translation
104
+ selected_language = st.selectbox("Select the output language", languages, index=languages.index("English"))
105
+
106
+ # Input options for the user to type or use voice input
107
+ input_option = st.radio("Choose Input Method", ("Type your question",))
108
+
109
+ # Container to hold the chat interface (for scrolling)
110
+ chat_container = st.container()
111
+
112
+ with chat_container:
113
+ if "chat_history" in st.session_state:
114
+ for message in st.session_state.chat_history:
115
+ if message['role'] == 'user':
116
+ with st.chat_message("user"):
117
+ st.markdown(message["content"])
118
+ elif message['role'] == 'assistant':
119
+ with st.chat_message("assistant"):
120
+ st.markdown(message["content"])
121
+
122
+ # User input section for typing
123
+ user_query = None # Initialize user_query as None
124
+
125
+ if input_option == "Type your question":
126
+ user_query = st.chat_input("Ask AI about Bhagavad Gita or Yoga Sutras:") # Chat input for typing
127
+
128
+ # If user input is provided, process the query
129
+ if user_query:
130
+ with st.spinner("Processing your query... Please wait."):
131
+
132
+ # Save user input to chat history in memory
133
+ st.session_state.chat_history.append({"role": "user", "content": user_query})
134
+
135
+ # Display user's message in chatbot (for UI display)
136
+ with st.chat_message("user"):
137
+ st.markdown(user_query)
138
+
139
+ # Get assistant's response from the chain
140
+ with st.chat_message("assistant"):
141
+ response = st.session_state.conversational_chain({"question": user_query})
142
+ assistant_response = response["answer"]
143
+
144
+ # Save assistant's response to chat history in memory
145
+ st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})
146
+
147
+ # Format output in JSON
148
+ formatted_output = {
149
+ "book": "Bhagavad Gita", # or "PYS" for Yoga Sutras
150
+ "chapter_number": "2", # Example, replace with actual value from response
151
+ "verse_number": "47", # Example, replace with actual value from response
152
+ "shloka": "Yoga karmasu kaushalam", # Example, replace with actual shloka from response
153
+ "translation": assistant_response,
154
+ "commentary": "This is a commentary on the shloka.", # Replace with actual commentary
155
+ "summary": "This is a summary of the chapter." # Replace with actual summary
156
+ }
157
+
158
+ # Save the chat history to the database (SQLite)
159
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
160
+ day = datetime.now().strftime("%A") # Get the day of the week (e.g., Monday)
161
+ save_chat_history(st.session_state.conn, username, timestamp, day, user_query, assistant_response)
162
+
163
+ # Translate the assistant's response based on selected language
164
+ translator = GoogleTranslator(source="en", target=selected_language.lower())
165
+ translated_response = translator.translate(assistant_response)
166
+
167
+ # Display translated response
168
+ st.markdown(f"**Translated Answer ({selected_language}):** {translated_response}")
169
+
170
+ # Display the formatted output
171
+ st.json(formatted_output)
172
+
173
+ # Clear the input field after the query is processed
174
+ st.session_state.user_input = "" # Reset the input field for next use