File size: 1,941 Bytes
2540a54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
from keras.models import load_model
from keras.layers import Rescaling,Resizing
import tensorflow as tf
import numpy as np
import gradio as gr
from numpy import asarray


model = load_model('./checkpoints/checkpoint.model.keras')

scale = Rescaling(1./255)
resize = Resizing(224,224)


def action_recognition(image):

    preds = ['Calling','Clapping','Cycling','Dancing','Drinking','Eating','Fighting',
             'Hugging','Laughing','Listening to Music','Running or Walking','Sitting','Sleeping','Texting','Using Laptop'] 

    # Read image and showy
    img = asarray(image)
    
    # Preprocess image
    img = scale(img)
    img = resize(img)
    img = tf.reshape(img,(1,224,224,3))

    # prediction
    pred = model.predict(img)

    # Mapping indices to their respective class labels
    if np.argmax(pred) == 0:
        print('Calling')
    elif np.argmax(pred) == 1:
        print('Clapping')
    elif np.argmax(pred) == 2:
        print('Cycling')
    elif np.argmax(pred) == 3:
        print('Dancing')
    elif np.argmax(pred) == 4:
        print('Drinking')
    elif np.argmax(pred) == 5:
        print('Eating')
    elif np.argmax(pred) == 6:
        print('Fighting')
    elif np.argmax(pred) == 7:
        print('Hugging')
    elif np.argmax(pred) == 8:
        print('Laughing')
    elif np.argmax(pred) == 9:
        print('Listening to Music')
    elif np.argmax(pred) == 10:
        print('Running')
    elif np.argmax(pred) == 11:
        print('Sitting')
    elif np.argmax(pred) == 12:
        print('Sleeping')
    elif np.argmax(pred) == 13:
        print('Texting')
    elif np.argmax(pred) == 14:
        print('Using Laptop')
    
    # Return the predicted class index and prediction array
    return preds[np.argmax(pred)]
    

demo = gr.Interface(
    fn=action_recognition,
    inputs=[gr.Image(label="Image")],
    outputs=['text'],
    allow_flagging='never'
)


demo.launch(share=False,debug=False)