ergosumdre commited on
Commit
0462bcc
·
1 Parent(s): 37d0eb1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import boto3
3
+ from botocore.exceptions import BotoCoreError, ClientError
4
+ from PIL import Image
5
+ import numpy as np
6
+ import io
7
+
8
+ aws_key_id = os.environ['aws_access_key_id']
9
+ aws_secret = os.environ['aws_secret_access_key']
10
+
11
+
12
+
13
+
14
+
15
+ # Initialize AWS Rekognition client
16
+ try:
17
+ client = boto3.client('rekognition',
18
+ region_name='us-east-1',
19
+ aws_access_key_id= aws_key_id,
20
+ aws_secret_access_key = aws_secret)
21
+ except (BotoCoreError, ClientError) as error:
22
+ print('Error: ', error)
23
+
24
+
25
+ def recognize_emotions(image):
26
+ """
27
+ This function takes an image as input, and returns the emotion with the highest confidence level in the face using AWS Rekognition
28
+ """
29
+ # Convert the NumPy array to PIL image
30
+ pil_image = Image.fromarray(np.uint8(image))
31
+
32
+ # Convert the PIL image to bytes
33
+ with io.BytesIO() as output:
34
+ pil_image.save(output, format="JPEG")
35
+ contents = output.getvalue()
36
+
37
+ # Perform detection on the image using AWS Rekognition
38
+ response = client.detect_faces(
39
+ Image={
40
+ 'Bytes': contents
41
+ },
42
+ Attributes=['ALL']
43
+ )
44
+
45
+ # If no faces are detected, return None
46
+ if not response['FaceDetails']:
47
+ return None
48
+
49
+ # Extract the emotions detected in the face
50
+ emotions = response['FaceDetails'][0]['Emotions']
51
+
52
+ # Find the emotion with the highest confidence level
53
+ max_confidence = 0
54
+ max_emotion = ''
55
+ for emotion in emotions:
56
+ if emotion['Confidence'] > max_confidence:
57
+ max_confidence = emotion['Confidence']
58
+ max_emotion = emotion['Type']
59
+
60
+ # Return the emotion with the highest confidence level as a string
61
+ return str(max_emotion)
62
+
63
+
64
+
65
+ # Create Gradio interface
66
+ iface = gr.Interface(recognize_emotions,
67
+ inputs=gr.Image(source="webcam", streaming=True),
68
+ outputs="text",
69
+ title="How does this person feel?",
70
+ description="Helping you understand what others think")
71
+
72
+ # Launch the interface
73
+ iface.launch()