abhilash88 commited on
Commit
ba069c3
Β·
verified Β·
1 Parent(s): 37a2268

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +516 -0
app.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import cv2
4
+ import numpy as np
5
+ from PIL import Image, ImageDraw, ImageFont
6
+ from transformers import pipeline
7
+ import logging
8
+ import time
9
+ from typing import Tuple, List, Dict, Optional
10
+
11
+ # Configure logging
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+ # Model configuration
16
+ MODEL_NAME = "abhilash88/face-emotion-detection"
17
+
18
+ # Emotion labels and colors for visualization
19
+ EMOTION_COLORS = {
20
+ 'angry': '#FF4444',
21
+ 'disgust': '#AA4444',
22
+ 'fear': '#4444FF',
23
+ 'happy': '#44FF44',
24
+ 'sad': '#4444AA',
25
+ 'surprise': '#FFAA44',
26
+ 'neutral': '#AAAAAA'
27
+ }
28
+
29
+ # Global variables for model
30
+ emotion_classifier = None
31
+ face_cascade = None
32
+
33
+ def load_models():
34
+ """Load the emotion detection model and face cascade"""
35
+ global emotion_classifier, face_cascade
36
+
37
+ try:
38
+ logger.info(f"Loading emotion detection model: {MODEL_NAME}")
39
+ emotion_classifier = pipeline(
40
+ "image-classification",
41
+ model=MODEL_NAME,
42
+ return_all_scores=True
43
+ )
44
+ logger.info("Emotion detection model loaded successfully")
45
+
46
+ # Load OpenCV face cascade
47
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
48
+ logger.info("Face detection cascade loaded successfully")
49
+
50
+ return True
51
+ except Exception as e:
52
+ logger.error(f"Error loading models: {e}")
53
+ return False
54
+
55
+ def detect_faces(image: np.ndarray) -> List[Tuple[int, int, int, int]]:
56
+ """Detect faces in the image using OpenCV"""
57
+ try:
58
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
59
+ faces = face_cascade.detectMultiScale(
60
+ gray,
61
+ scaleFactor=1.1,
62
+ minNeighbors=5,
63
+ minSize=(30, 30)
64
+ )
65
+ return faces.tolist()
66
+ except Exception as e:
67
+ logger.error(f"Error detecting faces: {e}")
68
+ return []
69
+
70
+ def predict_emotion(face_image: Image.Image) -> List[Dict]:
71
+ """Predict emotion for a single face"""
72
+ try:
73
+ if emotion_classifier is None:
74
+ return [{"label": "neutral", "score": 1.0}]
75
+
76
+ results = emotion_classifier(face_image)
77
+ return results
78
+ except Exception as e:
79
+ logger.error(f"Error predicting emotion: {e}")
80
+ return [{"label": "neutral", "score": 1.0}]
81
+
82
+ def draw_emotion_results(image: Image.Image, faces: List, emotions: List) -> Image.Image:
83
+ """Draw bounding boxes and emotion labels on the image"""
84
+ try:
85
+ draw = ImageDraw.Draw(image)
86
+
87
+ # Try to load a font, fallback to default if not available
88
+ try:
89
+ font = ImageFont.truetype("arial.ttf", 16)
90
+ except:
91
+ font = ImageFont.load_default()
92
+
93
+ for i, (x, y, w, h) in enumerate(faces):
94
+ if i < len(emotions):
95
+ # Get top emotion
96
+ top_emotion = max(emotions[i], key=lambda x: x['score'])
97
+ emotion_label = top_emotion['label']
98
+ confidence = top_emotion['score']
99
+
100
+ # Get color for this emotion
101
+ color = EMOTION_COLORS.get(emotion_label, '#FFFFFF')
102
+
103
+ # Draw bounding box
104
+ draw.rectangle([(x, y), (x + w, y + h)], outline=color, width=3)
105
+
106
+ # Draw emotion label
107
+ label_text = f"{emotion_label}: {confidence:.2f}"
108
+
109
+ # Calculate text size for background
110
+ bbox = draw.textbbox((0, 0), label_text, font=font)
111
+ text_width = bbox[2] - bbox[0]
112
+ text_height = bbox[3] - bbox[1]
113
+
114
+ # Draw background for text
115
+ draw.rectangle(
116
+ [(x, y - text_height - 5), (x + text_width + 10, y)],
117
+ fill=color
118
+ )
119
+
120
+ # Draw text
121
+ draw.text((x + 5, y - text_height - 2), label_text, fill='white', font=font)
122
+
123
+ return image
124
+ except Exception as e:
125
+ logger.error(f"Error drawing results: {e}")
126
+ return image
127
+
128
+ def process_single_image(image: Image.Image) -> Tuple[Image.Image, str]:
129
+ """Process a single image for emotion detection"""
130
+ try:
131
+ if image is None:
132
+ return None, "No image provided"
133
+
134
+ # Convert PIL to numpy array
135
+ image_np = np.array(image)
136
+
137
+ # Detect faces
138
+ faces = detect_faces(image_np)
139
+
140
+ if not faces:
141
+ return image, "No faces detected in the image"
142
+
143
+ # Process each face
144
+ emotions_list = []
145
+ for (x, y, w, h) in faces:
146
+ # Extract face region
147
+ face_region = image.crop((x, y, x + w, y + h))
148
+
149
+ # Predict emotion
150
+ emotions = predict_emotion(face_region)
151
+ emotions_list.append(emotions)
152
+
153
+ # Draw results
154
+ result_image = draw_emotion_results(image.copy(), faces, emotions_list)
155
+
156
+ # Create summary text
157
+ summary_lines = [f"Detected {len(faces)} face(s):"]
158
+ for i, emotions in enumerate(emotions_list):
159
+ top_emotion = max(emotions, key=lambda x: x['score'])
160
+ summary_lines.append(f"Face {i+1}: {top_emotion['label']} ({top_emotion['score']:.2f})")
161
+
162
+ summary = "\n".join(summary_lines)
163
+
164
+ return result_image, summary
165
+
166
+ except Exception as e:
167
+ logger.error(f"Error processing image: {e}")
168
+ return image, f"Error processing image: {str(e)}"
169
+
170
+ def process_webcam_frame(image: Image.Image) -> Image.Image:
171
+ """Process webcam frame for real-time emotion detection"""
172
+ try:
173
+ if image is None:
174
+ return None
175
+
176
+ # Convert PIL to numpy array
177
+ image_np = np.array(image)
178
+
179
+ # Detect faces
180
+ faces = detect_faces(image_np)
181
+
182
+ if not faces:
183
+ return image
184
+
185
+ # Process each face
186
+ emotions_list = []
187
+ for (x, y, w, h) in faces:
188
+ # Extract face region
189
+ face_region = image.crop((x, y, x + w, y + h))
190
+
191
+ # Predict emotion
192
+ emotions = predict_emotion(face_region)
193
+ emotions_list.append(emotions)
194
+
195
+ # Draw results
196
+ result_image = draw_emotion_results(image.copy(), faces, emotions_list)
197
+
198
+ return result_image
199
+
200
+ except Exception as e:
201
+ logger.error(f"Error processing webcam frame: {e}")
202
+ return image
203
+
204
+ def analyze_emotions_batch(images: List[Image.Image]) -> str:
205
+ """Analyze emotions in multiple images"""
206
+ try:
207
+ if not images:
208
+ return "No images provided"
209
+
210
+ all_results = []
211
+
212
+ for idx, image in enumerate(images):
213
+ if image is None:
214
+ continue
215
+
216
+ # Convert PIL to numpy array
217
+ image_np = np.array(image)
218
+
219
+ # Detect faces
220
+ faces = detect_faces(image_np)
221
+
222
+ if not faces:
223
+ all_results.append(f"Image {idx+1}: No faces detected")
224
+ continue
225
+
226
+ # Process each face
227
+ image_emotions = []
228
+ for (x, y, w, h) in faces:
229
+ # Extract face region
230
+ face_region = image.crop((x, y, x + w, y + h))
231
+
232
+ # Predict emotion
233
+ emotions = predict_emotion(face_region)
234
+ top_emotion = max(emotions, key=lambda x: x['score'])
235
+ image_emotions.append(f"{top_emotion['label']} ({top_emotion['score']:.2f})")
236
+
237
+ all_results.append(f"Image {idx+1}: {', '.join(image_emotions)}")
238
+
239
+ return "\n".join(all_results)
240
+
241
+ except Exception as e:
242
+ logger.error(f"Error in batch analysis: {e}")
243
+ return f"Error in batch analysis: {str(e)}"
244
+
245
+ def get_emotion_statistics(image: Image.Image) -> str:
246
+ """Get detailed emotion statistics for an image"""
247
+ try:
248
+ if image is None:
249
+ return "No image provided"
250
+
251
+ # Convert PIL to numpy array
252
+ image_np = np.array(image)
253
+
254
+ # Detect faces
255
+ faces = detect_faces(image_np)
256
+
257
+ if not faces:
258
+ return "No faces detected"
259
+
260
+ # Collect all emotions
261
+ all_emotions = {}
262
+
263
+ for (x, y, w, h) in faces:
264
+ # Extract face region
265
+ face_region = image.crop((x, y, x + w, y + h))
266
+
267
+ # Predict emotion
268
+ emotions = predict_emotion(face_region)
269
+
270
+ for emotion_data in emotions:
271
+ emotion = emotion_data['label']
272
+ score = emotion_data['score']
273
+
274
+ if emotion not in all_emotions:
275
+ all_emotions[emotion] = []
276
+ all_emotions[emotion].append(score)
277
+
278
+ # Calculate statistics
279
+ stats_lines = [f"**Emotion Analysis for {len(faces)} face(s):**\n"]
280
+
281
+ for emotion, scores in all_emotions.items():
282
+ avg_score = np.mean(scores)
283
+ max_score = np.max(scores)
284
+ count = len(scores)
285
+
286
+ stats_lines.append(f"**{emotion.title()}:**")
287
+ stats_lines.append(f" - Average confidence: {avg_score:.3f}")
288
+ stats_lines.append(f" - Maximum confidence: {max_score:.3f}")
289
+ stats_lines.append(f" - Detections: {count}")
290
+ stats_lines.append("")
291
+
292
+ return "\n".join(stats_lines)
293
+
294
+ except Exception as e:
295
+ logger.error(f"Error calculating statistics: {e}")
296
+ return f"Error calculating statistics: {str(e)}"
297
+
298
+ # Create Gradio interface
299
+ def create_interface():
300
+ # Custom CSS for modern styling
301
+ custom_css = """
302
+ .main-header {
303
+ text-align: center;
304
+ color: #2563eb;
305
+ margin-bottom: 2rem;
306
+ }
307
+ .emotion-box {
308
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
309
+ color: white;
310
+ padding: 1rem;
311
+ border-radius: 0.5rem;
312
+ margin: 1rem 0;
313
+ }
314
+ .stats-box {
315
+ background: #f8fafc;
316
+ border: 1px solid #e2e8f0;
317
+ border-radius: 0.5rem;
318
+ padding: 1rem;
319
+ }
320
+ """
321
+
322
+ with gr.Blocks(
323
+ title="Live Face Emotion Detection",
324
+ theme=gr.themes.Soft(),
325
+ css=custom_css
326
+ ) as iface:
327
+
328
+ # Header
329
+ gr.Markdown(
330
+ """
331
+ # 😊 Live Face Emotion Detection
332
+
333
+ ### Real-time emotion recognition powered by deep learning
334
+
335
+ This tool uses a fine-tuned model to detect and classify emotions in faces. It can identify
336
+ 7 different emotions: **angry**, **disgust**, **fear**, **happy**, **sad**, **surprise**, and **neutral**.
337
+ """,
338
+ elem_classes=["main-header"]
339
+ )
340
+
341
+ with gr.Tab("πŸ“· Single Image Analysis"):
342
+ with gr.Row():
343
+ with gr.Column(scale=1):
344
+ single_image_input = gr.Image(
345
+ label="Upload Image",
346
+ type="pil",
347
+ height=400
348
+ )
349
+ analyze_single_btn = gr.Button("Analyze Emotions", variant="primary", size="lg")
350
+
351
+ with gr.Column(scale=1):
352
+ single_image_output = gr.Image(
353
+ label="Emotion Detection Results",
354
+ height=400
355
+ )
356
+ single_result_text = gr.Textbox(
357
+ label="Detection Summary",
358
+ lines=5,
359
+ show_copy_button=True
360
+ )
361
+
362
+ with gr.Tab("πŸŽ₯ Live Webcam Detection"):
363
+ gr.Markdown(
364
+ """
365
+ ### Real-time Emotion Detection
366
+ Enable your webcam to see live emotion detection in action!
367
+ """
368
+ )
369
+
370
+ webcam_interface = gr.Interface(
371
+ fn=process_webcam_frame,
372
+ inputs=gr.Image(source="webcam", streaming=True, type="pil"),
373
+ outputs=gr.Image(label="Live Emotion Detection"),
374
+ live=True,
375
+ title="",
376
+ description=""
377
+ )
378
+
379
+ with gr.Tab("πŸ“Š Detailed Statistics"):
380
+ with gr.Row():
381
+ with gr.Column(scale=1):
382
+ stats_image_input = gr.Image(
383
+ label="Upload Image for Analysis",
384
+ type="pil",
385
+ height=400
386
+ )
387
+ analyze_stats_btn = gr.Button("Generate Statistics", variant="primary", size="lg")
388
+
389
+ with gr.Column(scale=1):
390
+ stats_output = gr.Markdown(
391
+ value="Upload an image and click 'Generate Statistics' to see detailed emotion analysis...",
392
+ label="Emotion Statistics"
393
+ )
394
+
395
+ with gr.Tab("πŸ”„ Batch Processing"):
396
+ with gr.Column():
397
+ batch_images_input = gr.File(
398
+ label="Upload Multiple Images",
399
+ file_count="multiple",
400
+ file_types=["image"]
401
+ )
402
+ batch_process_btn = gr.Button("Process Batch", variant="primary", size="lg")
403
+ batch_results_output = gr.Textbox(
404
+ label="Batch Processing Results",
405
+ lines=10,
406
+ show_copy_button=True
407
+ )
408
+
409
+ with gr.Tab("πŸ“š About & Model Info"):
410
+ gr.Markdown(
411
+ """
412
+ ## About This Model
413
+
414
+ This face emotion detection system uses a fine-tuned deep learning model specifically trained
415
+ for emotion recognition. The model can detect 7 different emotional states with high accuracy.
416
+
417
+ ### Supported Emotions
418
+
419
+ - 😠 **Angry** - Expressions of anger, frustration, or annoyance
420
+ - 🀒 **Disgust** - Expressions of revulsion or distaste
421
+ - 😨 **Fear** - Expressions of fear, anxiety, or worry
422
+ - 😊 **Happy** - Expressions of joy, contentment, or pleasure
423
+ - 😒 **Sad** - Expressions of sadness, sorrow, or melancholy
424
+ - 😲 **Surprise** - Expressions of surprise, shock, or amazement
425
+ - 😐 **Neutral** - Calm, neutral expressions with no strong emotion
426
+
427
+ ### Technical Details
428
+
429
+ - **Model:** Fine-tuned emotion classification model
430
+ - **Architecture:** Deep convolutional neural network
431
+ - **Training:** Specialized dataset for facial emotion recognition
432
+ - **Face Detection:** OpenCV Haar Cascade classifier
433
+ - **Real-time Processing:** Optimized for live webcam inference
434
+
435
+ ### Use Cases
436
+
437
+ - **Human-Computer Interaction:** Emotion-aware interfaces
438
+ - **Market Research:** Analyze customer emotional responses
439
+ - **Healthcare:** Monitor patient emotional states
440
+ - **Education:** Assess student engagement and understanding
441
+ - **Entertainment:** Emotion-responsive gaming and media
442
+ - **Security:** Detect emotional distress or suspicious behavior
443
+
444
+ ### Privacy & Ethics
445
+
446
+ - All processing is done locally in your browser
447
+ - No images are stored or transmitted to external servers
448
+ - Use responsibly and respect privacy in all applications
449
+ - Consider bias and fairness in emotion detection systems
450
+
451
+ ### Performance Tips
452
+
453
+ - Ensure good lighting for best results
454
+ - Face should be clearly visible and unobstructed
455
+ - Works best with frontal face views
456
+ - Multiple faces in one image are supported
457
+
458
+ ---
459
+
460
+ **Model Repository:** [abhilash88/face-emotion-detection](https://huggingface.co/abhilash88/face-emotion-detection)
461
+
462
+ Made with ❀️ for emotion AI research and applications
463
+ """
464
+ )
465
+
466
+ # Event handlers
467
+ analyze_single_btn.click(
468
+ fn=process_single_image,
469
+ inputs=single_image_input,
470
+ outputs=[single_image_output, single_result_text],
471
+ api_name="analyze_single_image"
472
+ )
473
+
474
+ analyze_stats_btn.click(
475
+ fn=get_emotion_statistics,
476
+ inputs=stats_image_input,
477
+ outputs=stats_output,
478
+ api_name="get_emotion_statistics"
479
+ )
480
+
481
+ # Example images
482
+ gr.Examples(
483
+ examples=[
484
+ ["https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?w=300&h=300&fit=crop&crop=face"],
485
+ ["https://images.unsplash.com/photo-1554151228-14d9def656e4?w=300&h=300&fit=crop&crop=face"],
486
+ ["https://images.unsplash.com/photo-1500648767791-00dcc994a43e?w=300&h=300&fit=crop&crop=face"],
487
+ ],
488
+ inputs=single_image_input,
489
+ label="Try these example images",
490
+ cache_examples=False
491
+ )
492
+
493
+ return iface
494
+
495
+ # Initialize and launch
496
+ if __name__ == "__main__":
497
+ # Load models
498
+ logger.info("Initializing Face Emotion Detection System...")
499
+
500
+ if load_models():
501
+ logger.info("Models loaded successfully!")
502
+
503
+ # Create interface
504
+ iface = create_interface()
505
+
506
+ # Launch
507
+ iface.launch(
508
+ share=False,
509
+ show_error=True,
510
+ server_name="0.0.0.0",
511
+ server_port=7860,
512
+ favicon_path=None,
513
+ show_api=True
514
+ )
515
+ else:
516
+ logger.error("Failed to load models. Please check your model configuration.")