Samantha Hipple
app init
c69102d
# import required libraries
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
def load_model():
# define the tokenizer for preprocessing any inputs
tokenizer = AutoTokenizer.from_pretrained("stabilityai/StableBeluga-7B", use_fast=False)
# define the LLM model for the generating output
model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga-7B", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
return (model, tokenizer)
def process_emotions(model, tokenizer, emotion_data):
# ensure emotion_data is a dictionary
if not isinstance(emotion_data, dict):
raise ValueError("Expected emotion_data to be a dictionary.")
# convert emotion_data dictionary to string format
emotions = ', '.join(f"{k}: {v:.2f}%" for k, v in emotion_data.items())
# prompt-engineering instead of fine-tuning
system_prompt = """### System:
You are StableBeluga, an AI programmed to follow instructions with high accuracy. Your current task is to assist with enhancing human emotional regulation and intelligence. You will receive information about the emotional ratios of a client. Your task is to identify the one or two emotions with the highest values from the information provided, and create a one or two-sentence original journal prompt for the client's self-reflection and emotional exploration. Do not discuss the emotions or their scores, and do not include any other introductory or explanatory text. Only provide the journal prompt, and ensure it is based on the emotions with the highest values. Here are examples of the desired format:
Example 1: 'Reflect on a recent situation when you felt anger. What was the trigger and how did it affect your actions?'
Example 2: 'Write about a moment when you felt sad. What thoughts or events led to this emotion and how did it impact your day?'
Provide a prompt following this format, based on the emotions with the highest values from the data provided.
"""
# user input will come from deepface FER results
prompt = f"{system_prompt}### User: {emotions}\n\n### Assistant:\n"
# preprocess the inputs
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
result = tokenizer.decode(output[0], skip_special_tokens=True)
# return decoded output
return result
def generate_prompt(result):
# split the text by the "Assistant:" label
response = result.split("### Assistant:")[1].strip()
# check if response includes uneeded prompt indifier (e.g., Journal prompt: ..., etc.)
if ":" in response:
split_response = response.split(":", 1)
response = split_response[1].strip()
return response