SocialEase2 / app.py
ergosumdre's picture
Update app.py
b5e41e1
import cv2
import gradio as gr
from deepface import DeepFace
from gtts import gTTS
# Define a function that takes a webcam frame and returns facial analysis results
def analyze_face(frame):
try:
# Save the webcam frame as an image file
cv2.imwrite("webcam_frame.jpg", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
# Use DeepFace to analyze the saved image file
result = DeepFace.analyze("webcam_frame.jpg")
# Extracting relevant information from the result
dominant_emotion = result[0]['dominant_emotion']
age = result[0]['age']
gender = result[0]['dominant_gender']
race = result[0]['dominant_race']
# Construct the output text
output_text = f"Emotion: {dominant_emotion}, Age: {age}, Gender: {gender}, Race: {race}"
# Convert text to speech
tts = gTTS(text=output_text, lang='en')
audio_path = "output.mp3"
tts.save(audio_path)
return audio_path
except Exception as e:
return str(e)
# Create a Gradio interface with a webcam input and audio output
iface = gr.Interface(fn=analyze_face, inputs="webcam", outputs="audio")
# Launch the interface
iface.launch(share=True)