anirudhmu commited on
Commit
e38d732
·
1 Parent(s): 644d54d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -15
app.py CHANGED
@@ -1,7 +1,6 @@
1
  from transformers import VideoMAEImageProcessor, VideoMAEForVideoClassification, pipeline
2
  import gradio as gr
3
  import magic
4
- from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
5
 
6
  pipe = pipeline("video-classification", model="anirudhmu/videomae-base-finetuned-soccer-action-recognition")
7
 
@@ -17,18 +16,7 @@ def predict_video(file_path):
17
  # Run inference
18
  results = pipe(file_path)
19
  label_to_score = {result["label"]: result["score"] for result in results}
20
-
21
- # Overlay prediction onto the video using moviepy
22
- prediction_text = ', '.join([f"{k}: {v:.2f}" for k, v in label_to_score.items()])
23
- clip = VideoFileClip(file_path)
24
- txt_clip = TextClip(prediction_text, fontsize=24, color='white').set_pos('bottom').set_duration(clip.duration)
25
- video = CompositeVideoClip([clip, txt_clip])
26
-
27
- # Saving video to temporary path and returning it
28
- output_path = "/tmp/predicted_video.mp4"
29
- video.write_videofile(output_path, codec='libx264')
30
-
31
- return output_path
32
 
33
- iface = gr.Interface(fn=predict_video, inputs="video", outputs="video")
34
- iface.launch()
 
1
  from transformers import VideoMAEImageProcessor, VideoMAEForVideoClassification, pipeline
2
  import gradio as gr
3
  import magic
 
4
 
5
  pipe = pipeline("video-classification", model="anirudhmu/videomae-base-finetuned-soccer-action-recognition")
6
 
 
16
  # Run inference
17
  results = pipe(file_path)
18
  label_to_score = {result["label"]: result["score"] for result in results}
19
+ return str(label_to_score)
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ iface = gr.Interface(fn=predict_video, inputs="video", outputs="text")
22
+ iface.launch()