arpita-23 commited on
Commit
20acc8f
·
verified ·
1 Parent(s): ebae085

Upload 3 files

Browse files
Files changed (3) hide show
  1. .env.txt +1 -0
  2. app.py +130 -0
  3. requirements.txt +8 -0
.env.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ api_key=("AIzaSyBP1kQr-80Aq_K5_9AVgD1MLJqs05Cg20Q")
app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import google.generativeai as genai
3
+ import os
4
+ from PIL import Image
5
+ import cv2
6
+ from io import BytesIO
7
+ import base64
8
+ from dotenv import load_dotenv
9
+ import numpy as np
10
+ from deepface import DeepFace # Replacing FER with DeepFace
11
+ print("DeepFace is installed and ready to use!")
12
+ import google.generativeai as genai
13
+ # Test if the module can be imported successfully
14
+ print("Google Generative AI module is successfully imported!")
15
+
16
+
17
+
18
+ load_dotenv()
19
+
20
+ genai.configure(api_key=("AIzaSyBP1kQr-80Aq_K5_9AVgD1MLJqs05Cg20Q"))
21
+
22
+ # gemini function for general content generation
23
+ def get_gemini_response(input):
24
+ try:
25
+ model = genai.GenerativeModel('gemini-pro')
26
+ response = model.generate_content(input)
27
+ return response
28
+ except Exception as e:
29
+ st.error(f"Error: {e}")
30
+ return None
31
+
32
+ # Function to analyze image for depression and emotion detection using DeepFace
33
+ def detect_emotions(image):
34
+ try:
35
+ # Use DeepFace to analyze emotions
36
+ analysis = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
37
+ # Return the dominant emotion and its score
38
+ return analysis[0]['dominant_emotion'], analysis[0]['emotion']
39
+ except Exception as e:
40
+ st.error(f"Error during emotion detection: {e}")
41
+ return None, None
42
+
43
+ # Function to analyze detected emotions with LLM
44
+ def analyze_emotions_with_llm(emotion, emotions):
45
+ emotion_analysis = f"{emotion}: {emotions[emotion]:.2f}"
46
+
47
+ analysis_prompt = f"""
48
+ ### As a mental health and emotional well-being expert, analyze the following detected emotions.
49
+ ### Detected Emotions:
50
+ {emotion_analysis}
51
+ ### Analysis Output:
52
+ 1. Identify any potential signs of depression based on the detected emotions.
53
+ 2. Explain the reasoning behind your identification.
54
+ 3. Provide recommendations for addressing any identified issues.
55
+ """
56
+ response = get_gemini_response(analysis_prompt)
57
+ return response
58
+
59
+ # Function to capture live video frame for analysis
60
+ def capture_video_frame():
61
+ video_capture = cv2.VideoCapture(0)
62
+ if not video_capture.isOpened():
63
+ st.error("Failed to access the webcam. Ensure you have allowed camera access in your browser.")
64
+ return None
65
+ ret, frame = video_capture.read()
66
+ video_capture.release()
67
+ if ret:
68
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
69
+ return Image.fromarray(frame_rgb)
70
+ else:
71
+ st.error("Failed to capture a frame from the webcam.")
72
+ return None
73
+
74
+ # Function to parse and display response content
75
+ def display_response_content(response):
76
+ st.subheader("Response Output")
77
+ if response and response.candidates:
78
+ response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else ""
79
+ sections = response_content.split('###')
80
+ for section in sections:
81
+ if section.strip():
82
+ section_lines = section.split('\n')
83
+ section_title = section_lines[0].strip()
84
+ section_body = '\n'.join(line.strip() for line in section_lines[1:] if line.strip())
85
+ if section_title:
86
+ st.markdown(f"**{section_title}**")
87
+ if section_body:
88
+ st.write(section_body)
89
+ else:
90
+ st.write("No response received from the model or quota exceeded.")
91
+
92
+ ## Streamlit App
93
+ st.title("AI-Powered Depression and Emotion Detection System")
94
+ st.text("Use the AI system for detecting depression and emotions from images and live video.")
95
+
96
+ # Tabs for different functionalities
97
+ tab1, tab2 = st.tabs(["Image Analysis", "Live Video Analysis"])
98
+
99
+ with tab1:
100
+ st.header("Image Analysis")
101
+ uploaded_file = st.file_uploader("Upload an image for analysis", type=["jpg", "jpeg", "png"], help="Please upload an image file.")
102
+ submit_image = st.button('Analyze Image')
103
+
104
+ if submit_image:
105
+ if uploaded_file is not None:
106
+ image = Image.open(uploaded_file)
107
+ emotion, emotions = detect_emotions(image)
108
+ if emotion:
109
+ response = analyze_emotions_with_llm(emotion, emotions)
110
+ # Parse and display response in a structured way
111
+ display_response_content(response)
112
+ else:
113
+ st.write("No emotions detected in the image.")
114
+
115
+ with tab2:
116
+ st.header("Live Video Analysis")
117
+ capture_frame = st.button('Capture and Analyze Frame')
118
+
119
+ if capture_frame:
120
+ image = capture_video_frame()
121
+ if image is not None:
122
+ emotion, emotions = detect_emotions(image)
123
+ if emotion:
124
+ response = analyze_emotions_with_llm(emotion, emotions)
125
+ # Parse and display response in a structured way
126
+ display_response_content(response)
127
+ else:
128
+ st.write("No emotions detected in the video frame.")
129
+ else:
130
+ st.write("Failed to capture video frame.")
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ streamlit #==1.18.0
2
+ google.generativeai
3
+ opencv-python-headless ##==4.5.5.64
4
+ Pillow #==8.4.0
5
+ deepface #==0.0.75
6
+ python-dotenv #==0.19.2
7
+ numpy #=1.23.4
8
+