LiuPengNGP commited on
Commit
9d1f88c
·
verified ·
1 Parent(s): 53fa185

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -18
app.py CHANGED
@@ -1,24 +1,122 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- import time
4
 
5
- pipe = pipeline("automatic-speech-recognition")
 
 
 
6
 
7
- def transcribe(audio, state=""):
8
- print(audio)
9
- time.sleep(2)
10
- text = pipe(audio)["text"]
11
- state += text + " "
12
- return state, state
13
 
 
 
 
 
 
 
 
14
 
15
- with gr.Blocks() as demo:
16
- state = gr.State(value="")
17
- with gr.Row():
18
- with gr.Column():
19
- audio = gr.Audio(source="microphone", type="filepath")
20
- with gr.Column():
21
- textbox = gr.Textbox()
22
- audio.stream(fn=transcribe, inputs=[audio, state], outputs=[textbox, state])
23
 
24
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
2
 
3
+ # Importing necessary components for the Gradio app
4
+ from app.description import DESCRIPTION_STATIC, DESCRIPTION_DYNAMIC
5
+ from app.authors import AUTHORS
6
+ from app.app_utils import preprocess_image_and_predict, preprocess_video_and_predict
7
 
 
 
 
 
 
 
8
 
9
+ def clear_static_info():
10
+ return (
11
+ gr.Image(value=None, type="pil"),
12
+ gr.Image(value=None, scale=1, elem_classes="dl5"),
13
+ gr.Image(value=None, scale=1, elem_classes="dl2"),
14
+ gr.Label(value=None, num_top_classes=3, scale=1, elem_classes="dl3"),
15
+ )
16
 
17
+ def clear_dynamic_info():
18
+ return (
19
+ gr.Video(value=None),
20
+ gr.Video(value=None),
21
+ gr.Video(value=None),
22
+ gr.Video(value=None),
23
+ gr.Plot(value=None),
24
+ )
25
 
26
+ with gr.Blocks(css="app.css") as demo:
27
+ with gr.Tab("Dynamic App"):
28
+ gr.Markdown(value=DESCRIPTION_DYNAMIC)
29
+ with gr.Row():
30
+ with gr.Column(scale=2):
31
+ input_video = gr.Video(elem_classes="video1")
32
+ with gr.Row():
33
+ clear_btn_dynamic = gr.Button(
34
+ value="Clear", interactive=True, scale=1
35
+ )
36
+ submit_dynamic = gr.Button(
37
+ value="Submit", interactive=True, scale=1, elem_classes="submit"
38
+ )
39
+ with gr.Column(scale=2, elem_classes="dl4"):
40
+ with gr.Row():
41
+ output_video = gr.Video(label="Original video", scale=1, elem_classes="video2")
42
+ output_face = gr.Video(label="Pre-processed video", scale=1, elem_classes="video3")
43
+ output_heatmaps = gr.Video(label="Heatmaps", scale=1, elem_classes="video4")
44
+ output_statistics = gr.Plot(label="Statistics of emotions", elem_classes="stat")
45
+ gr.Examples(
46
+ ["videos/video1.mp4",
47
+ "videos/video2.mp4",
48
+ ],
49
+ [input_video],
50
+ )
51
+
52
+ with gr.Tab("Static App"):
53
+ gr.Markdown(value=DESCRIPTION_STATIC)
54
+ with gr.Row():
55
+ with gr.Column(scale=2, elem_classes="dl1"):
56
+ input_image = gr.Image(label="Original image", type="pil")
57
+ with gr.Row():
58
+ clear_btn = gr.Button(
59
+ value="Clear", interactive=True, scale=1, elem_classes="clear"
60
+ )
61
+ submit = gr.Button(
62
+ value="Submit", interactive=True, scale=1, elem_classes="submit"
63
+ )
64
+ with gr.Column(scale=1, elem_classes="dl4"):
65
+ with gr.Row():
66
+ output_image = gr.Image(label="Face", scale=1, elem_classes="dl5")
67
+ output_heatmap = gr.Image(label="Heatmap", scale=1, elem_classes="dl2")
68
+ output_label = gr.Label(num_top_classes=3, scale=1, elem_classes="dl3")
69
+ gr.Examples(
70
+ [
71
+ "images/fig7.jpg",
72
+ "images/fig1.jpg",
73
+ "images/fig2.jpg",
74
+ "images/fig3.jpg",
75
+ "images/fig4.jpg",
76
+ "images/fig5.jpg",
77
+ "images/fig6.jpg",
78
+ ],
79
+ [input_image],
80
+ )
81
+ with gr.Tab("Authors"):
82
+ gr.Markdown(value=AUTHORS)
83
+
84
+ submit.click(
85
+ fn=preprocess_image_and_predict,
86
+ inputs=[input_image],
87
+ outputs=[output_image, output_heatmap, output_label],
88
+ queue=True,
89
+ )
90
+ clear_btn.click(
91
+ fn=clear_static_info,
92
+ inputs=[],
93
+ outputs=[input_image, output_image, output_heatmap, output_label],
94
+ queue=True,
95
+ )
96
+
97
+ submit_dynamic.click(
98
+ fn=preprocess_video_and_predict,
99
+ inputs=input_video,
100
+ outputs=[
101
+ output_video,
102
+ output_face,
103
+ output_heatmaps,
104
+ output_statistics
105
+ ],
106
+ queue=True,
107
+ )
108
+ clear_btn_dynamic.click(
109
+ fn=clear_dynamic_info,
110
+ inputs=[],
111
+ outputs=[
112
+ input_video,
113
+ output_video,
114
+ output_face,
115
+ output_heatmaps,
116
+ output_statistics
117
+ ],
118
+ queue=True,
119
+ )
120
+
121
+ if __name__ == "__main__":
122
+ demo.queue(api_open=False).launch(share=False)