Samantha Hipple commited on
Commit
c69102d
·
1 Parent(s): 0ab2544
Files changed (4) hide show
  1. app.py +52 -0
  2. beluga.py +44 -0
  3. emodeepface.py +55 -0
  4. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from beluga import load_model, process_emotions, generate_prompt
3
+ from emodeepface import check_image_rotation, process_photo
4
+
5
+ @st.cache_resource
6
+ def load_cached_model():
7
+ return load_model()
8
+
9
+ if 'model' not in st.session_state:
10
+ loading_message = st.empty()
11
+ loading_message.text("Loading model... Please wait.")
12
+
13
+ st.session_state.model, st.session_state.tokenizer = load_cached_model()
14
+ loading_message.empty()
15
+
16
+ st.title("Affective Journaling Assistant")
17
+
18
+ st.write("""
19
+ Welcome to the Affective Journaling Assistant!
20
+
21
+ For a tailored journaling experience, we analyze your facial expressions to gauge your emotions.
22
+ To proceed:
23
+
24
+ 1. Ensure the image is well-lit and of high quality.
25
+ 2. Your face should be fully visible without obstructions (e.g., no sunglasses or hats).
26
+ 3. By uploading, you acknowledge and consent to our data processing.
27
+
28
+ Let's get started!
29
+ """)
30
+
31
+ file_name = st.file_uploader("Please upload your photo.")
32
+
33
+ if file_name is not None:
34
+ image = check_image_rotation(file_name)
35
+
36
+ processing_message = st.empty()
37
+ processing_message.text("Analyzing your image... Please wait.")
38
+
39
+ emotion_predictions = process_photo(file_name)
40
+ result = process_emotions(st.session_state.model, st.session_state.tokenizer, emotion_predictions)
41
+ processing_message.empty()
42
+
43
+ prompt = generate_prompt(result)
44
+
45
+ # Create columns to place the image and the prompt side by side
46
+ col1, col2 = st.columns(2)
47
+
48
+ # Show image in the left column
49
+ col1.image(image, width=300)
50
+
51
+ # Show generated prompt in the right column
52
+ col2.write(prompt)
beluga.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import required libraries
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ def load_model():
6
+ # define the tokenizer for preprocessing any inputs
7
+ tokenizer = AutoTokenizer.from_pretrained("stabilityai/StableBeluga-7B", use_fast=False)
8
+ # define the LLM model for the generating output
9
+ model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga-7B", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
10
+ return (model, tokenizer)
11
+
12
+ def process_emotions(model, tokenizer, emotion_data):
13
+ # ensure emotion_data is a dictionary
14
+ if not isinstance(emotion_data, dict):
15
+ raise ValueError("Expected emotion_data to be a dictionary.")
16
+ # convert emotion_data dictionary to string format
17
+ emotions = ', '.join(f"{k}: {v:.2f}%" for k, v in emotion_data.items())
18
+ # prompt-engineering instead of fine-tuning
19
+ system_prompt = """### System:
20
+ You are StableBeluga, an AI programmed to follow instructions with high accuracy. Your current task is to assist with enhancing human emotional regulation and intelligence. You will receive information about the emotional ratios of a client. Your task is to identify the one or two emotions with the highest values from the information provided, and create a one or two-sentence original journal prompt for the client's self-reflection and emotional exploration. Do not discuss the emotions or their scores, and do not include any other introductory or explanatory text. Only provide the journal prompt, and ensure it is based on the emotions with the highest values. Here are examples of the desired format:
21
+
22
+ Example 1: 'Reflect on a recent situation when you felt anger. What was the trigger and how did it affect your actions?'
23
+
24
+ Example 2: 'Write about a moment when you felt sad. What thoughts or events led to this emotion and how did it impact your day?'
25
+
26
+ Provide a prompt following this format, based on the emotions with the highest values from the data provided.
27
+ """
28
+ # user input will come from deepface FER results
29
+ prompt = f"{system_prompt}### User: {emotions}\n\n### Assistant:\n"
30
+ # preprocess the inputs
31
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
32
+ output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
33
+ result = tokenizer.decode(output[0], skip_special_tokens=True)
34
+ # return decoded output
35
+ return result
36
+
37
+ def generate_prompt(result):
38
+ # split the text by the "Assistant:" label
39
+ response = result.split("### Assistant:")[1].strip()
40
+ # check if response includes uneeded prompt indifier (e.g., Journal prompt: ..., etc.)
41
+ if ":" in response:
42
+ split_response = response.split(":", 1)
43
+ response = split_response[1].strip()
44
+ return response
emodeepface.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from deepface import DeepFace
2
+ import numpy as np
3
+ from PIL import Image, ExifTags
4
+
5
+ # used to raise custom exceptions
6
+ class ImageProcessingError(Exception):
7
+ pass
8
+
9
+ # used to ensure upright photo orientation for face detection
10
+ def check_image_rotation(image_path):
11
+ try:
12
+ image = Image.open(image_path)
13
+ for orientation in ExifTags.TAGS.keys():
14
+ if ExifTags.TAGS[orientation] == 'Orientation':
15
+ break
16
+ exif = image._getexif()
17
+ if exif is not None:
18
+ if orientation in exif:
19
+ if exif[orientation] == 3:
20
+ image = image.rotate(180, expand=True)
21
+ elif exif[orientation] == 6:
22
+ image = image.rotate(270, expand=True)
23
+ elif exif[orientation] == 8:
24
+ image = image.rotate(90, expand=True)
25
+ return image
26
+ except (AttributeError, KeyError, IndexError):
27
+ # If the orientation tag is not found or any other error occurs, return the original image
28
+ return Image.open(image_path)
29
+
30
+ # used to process photo through deepface emotion model
31
+ def process_photo(file_name):
32
+ backends = ['opencv', 'mtcnn', 'retinaface', 'mediapipe', 'ssd']
33
+ attempt = 0
34
+
35
+ image = check_image_rotation(file_name)
36
+ image_data = np.array(image)
37
+
38
+ while attempt < len(backends):
39
+ try:
40
+ predictions = DeepFace.analyze(image_data, actions=['emotion'], detector_backend=backends[attempt])
41
+ print("detector backend:", backends[attempt])
42
+ if len(predictions) > 1:
43
+ faces = [(face, face['region']['w'] * face['region']['h']) for face in predictions]
44
+ new_predictions = sorted(faces, key=lambda x: x[1], reverse=True)[0][0]
45
+ emotion_dict = new_predictions['emotion']
46
+ return emotion_dict
47
+ return predictions[0]['emotion']
48
+ except Exception as e:
49
+ if attempt == len(backends) - 1:
50
+ error_message = f"Failed to analyze image after attempting all detector backends available. Please upload a new image."
51
+ raise ImageProcessingError(error_message)
52
+ else:
53
+ # log the error message for each failed backend here:
54
+ print(f"Retrying with backend `{backends[attempt+1]}` due to error: {str(e)}")
55
+ attempt += 1
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ sentencepiece
4
+ accelerate
5
+ deepface
6
+ mediapipe