labonny's picture
Using a new model trained on 224x224 images
546b90e
raw
history blame
1.14 kB
import gradio as gr
from fastai.vision.all import *
import cv2
import PIL
learn = load_learner('fec224-resnet34-v1.pkl')
labels = learn.dls.vocab
def predict(img):
image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY, dstCn=3 )
pred,pred_idx,probs = learn.predict(image)
return {labels[i]: float(probs[i]) for i in range(len(labels))}
title = "Facial Expression Classifier"
description = "A facial expression classifier, trained using the <a href='https://www.kaggle.com/datasets/msambare/fer2013'>FER-2013 dataset</a>. This dataset consists of 28,709 examples of faces: each one is 48x48 grayscale pixels and is labelled with one of the following expressions: anger, disgust, fear, happy, neutral, sad, surprise.<p><p>This was used to train a resnet34 model."
examples = ["angryExample.jpg", "disgustExample.jpg", "fearExample.jpg", "happyExample.jpg", "neutralExample.jpg", "sadExample.jpg", "surpriseExample.jpg"]
iface = gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(224,224)), outputs=gr.outputs.Label(num_top_classes=3), examples=examples, title=title, description=description,interpretation='default')
iface.launch()