Abdulvajid commited on
Commit
2540a54
·
1 Parent(s): a00be44

Add application file

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.keras filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras.models import load_model
2
+ from keras.layers import Rescaling,Resizing
3
+ import tensorflow as tf
4
+ import numpy as np
5
+ import gradio as gr
6
+ from numpy import asarray
7
+
8
+
9
+ model = load_model('./checkpoints/checkpoint.model.keras')
10
+
11
+ scale = Rescaling(1./255)
12
+ resize = Resizing(224,224)
13
+
14
+
15
+ def action_recognition(image):
16
+
17
+ preds = ['Calling','Clapping','Cycling','Dancing','Drinking','Eating','Fighting',
18
+ 'Hugging','Laughing','Listening to Music','Running or Walking','Sitting','Sleeping','Texting','Using Laptop']
19
+
20
+ # Read image and showy
21
+ img = asarray(image)
22
+
23
+ # Preprocess image
24
+ img = scale(img)
25
+ img = resize(img)
26
+ img = tf.reshape(img,(1,224,224,3))
27
+
28
+ # prediction
29
+ pred = model.predict(img)
30
+
31
+ # Mapping indices to their respective class labels
32
+ if np.argmax(pred) == 0:
33
+ print('Calling')
34
+ elif np.argmax(pred) == 1:
35
+ print('Clapping')
36
+ elif np.argmax(pred) == 2:
37
+ print('Cycling')
38
+ elif np.argmax(pred) == 3:
39
+ print('Dancing')
40
+ elif np.argmax(pred) == 4:
41
+ print('Drinking')
42
+ elif np.argmax(pred) == 5:
43
+ print('Eating')
44
+ elif np.argmax(pred) == 6:
45
+ print('Fighting')
46
+ elif np.argmax(pred) == 7:
47
+ print('Hugging')
48
+ elif np.argmax(pred) == 8:
49
+ print('Laughing')
50
+ elif np.argmax(pred) == 9:
51
+ print('Listening to Music')
52
+ elif np.argmax(pred) == 10:
53
+ print('Running')
54
+ elif np.argmax(pred) == 11:
55
+ print('Sitting')
56
+ elif np.argmax(pred) == 12:
57
+ print('Sleeping')
58
+ elif np.argmax(pred) == 13:
59
+ print('Texting')
60
+ elif np.argmax(pred) == 14:
61
+ print('Using Laptop')
62
+
63
+ # Return the predicted class index and prediction array
64
+ return preds[np.argmax(pred)]
65
+
66
+
67
+ demo = gr.Interface(
68
+ fn=action_recognition,
69
+ inputs=[gr.Image(label="Image")],
70
+ outputs=['text'],
71
+ allow_flagging='never'
72
+ )
73
+
74
+
75
+ demo.launch(share=False,debug=False)
76
+
checkpoints/checkpoint.model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73f81dbd1ac7d76d93486714296ae819752fe60a3f4699d14cb829dde5e5a37c
3
+ size 263332280
requirements.txt ADDED
Binary file (3.77 kB). View file