Spaces:
Configuration error
Configuration error
Commit
·
a8e4601
1
Parent(s):
56ff0ac
Update app
Browse files
app.py
CHANGED
@@ -1093,18 +1093,18 @@ def get_embeddings(video_sequences, audio_sequences, model, calc_aud_emb=True):
|
|
1093 |
video_emb = []
|
1094 |
audio_emb = []
|
1095 |
|
1096 |
-
model = model.to(device)
|
1097 |
|
1098 |
for i in tqdm(range(0, len(video_sequences), batch_size)):
|
1099 |
video_inp = video_sequences[i:i+batch_size, ]
|
1100 |
-
vid_emb = model.forward_vid(video_inp
|
1101 |
vid_emb = torch.mean(vid_emb, axis=-1)
|
1102 |
|
1103 |
video_emb.append(vid_emb.detach().cpu())
|
1104 |
|
1105 |
if calc_aud_emb:
|
1106 |
audio_inp = audio_sequences[i:i+batch_size, ]
|
1107 |
-
aud_emb = model.forward_aud(audio_inp
|
1108 |
audio_emb.append(aud_emb.detach().cpu())
|
1109 |
|
1110 |
# torch.cuda.empty_cache()
|
|
|
1093 |
video_emb = []
|
1094 |
audio_emb = []
|
1095 |
|
1096 |
+
# model = model.to(device)
|
1097 |
|
1098 |
for i in tqdm(range(0, len(video_sequences), batch_size)):
|
1099 |
video_inp = video_sequences[i:i+batch_size, ]
|
1100 |
+
vid_emb = model.forward_vid(video_inp, return_feats=False)
|
1101 |
vid_emb = torch.mean(vid_emb, axis=-1)
|
1102 |
|
1103 |
video_emb.append(vid_emb.detach().cpu())
|
1104 |
|
1105 |
if calc_aud_emb:
|
1106 |
audio_inp = audio_sequences[i:i+batch_size, ]
|
1107 |
+
aud_emb = model.forward_aud(audio_inp)
|
1108 |
audio_emb.append(aud_emb.detach().cpu())
|
1109 |
|
1110 |
# torch.cuda.empty_cache()
|