init
Browse files- experiment_speaker_verification.py +6 -1
- model_clap.py +9 -3
experiment_speaker_verification.py
CHANGED
@@ -17,7 +17,7 @@ from datasets import load_dataset
|
|
17 |
from model_meta_voice import MetaVoiceSE
|
18 |
from model_pyannote_embedding import PyannoteSE
|
19 |
from model_w2v_bert import W2VBertSE
|
20 |
-
from model_clap import ClapSE
|
21 |
|
22 |
|
23 |
def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
|
@@ -119,26 +119,31 @@ if __name__ == '__main__':
|
|
119 |
get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split", "test")
|
120 |
get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
|
121 |
get_embedding(ClapSE, "clap_se", "asahi417/voxceleb1-test-split", "test")
|
|
|
122 |
|
123 |
get_embedding(MetaVoiceSE, "meta_voice_se", "ylacombe/expresso", "train")
|
124 |
get_embedding(PyannoteSE, "pyannote_se", "ylacombe/expresso", "train")
|
125 |
get_embedding(W2VBertSE, "w2v_bert_se", "ylacombe/expresso", "train")
|
126 |
get_embedding(ClapSE, "clap_se", "ylacombe/expresso", "train")
|
|
|
127 |
|
128 |
cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
129 |
cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
130 |
cluster_embedding("w2v_bert_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
131 |
cluster_embedding("clap_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
|
|
132 |
|
133 |
cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
|
134 |
cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
|
135 |
cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
|
136 |
cluster_embedding("clap_se", "ylacombe/expresso", "speaker_id")
|
|
|
137 |
|
138 |
cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
|
139 |
cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
|
140 |
cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
|
141 |
cluster_embedding("clap_se", "ylacombe/expresso", "style")
|
|
|
142 |
|
143 |
|
144 |
|
|
|
17 |
from model_meta_voice import MetaVoiceSE
|
18 |
from model_pyannote_embedding import PyannoteSE
|
19 |
from model_w2v_bert import W2VBertSE
|
20 |
+
from model_clap import ClapSE, ClapGeneralSE
|
21 |
|
22 |
|
23 |
def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
|
|
|
119 |
get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split", "test")
|
120 |
get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
|
121 |
get_embedding(ClapSE, "clap_se", "asahi417/voxceleb1-test-split", "test")
|
122 |
+
get_embedding(ClapGeneralSE, "clap_general_se", "asahi417/voxceleb1-test-split", "test")
|
123 |
|
124 |
get_embedding(MetaVoiceSE, "meta_voice_se", "ylacombe/expresso", "train")
|
125 |
get_embedding(PyannoteSE, "pyannote_se", "ylacombe/expresso", "train")
|
126 |
get_embedding(W2VBertSE, "w2v_bert_se", "ylacombe/expresso", "train")
|
127 |
get_embedding(ClapSE, "clap_se", "ylacombe/expresso", "train")
|
128 |
+
get_embedding(ClapGeneralSE, "clap_general_se", "ylacombe/expresso", "train")
|
129 |
|
130 |
cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
131 |
cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
132 |
cluster_embedding("w2v_bert_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
133 |
cluster_embedding("clap_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
134 |
+
cluster_embedding("clap_general_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
135 |
|
136 |
cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
|
137 |
cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
|
138 |
cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
|
139 |
cluster_embedding("clap_se", "ylacombe/expresso", "speaker_id")
|
140 |
+
cluster_embedding("clap_general_se", "ylacombe/expresso", "speaker_id")
|
141 |
|
142 |
cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
|
143 |
cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
|
144 |
cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
|
145 |
cluster_embedding("clap_se", "ylacombe/expresso", "style")
|
146 |
+
cluster_embedding("clap_general_se", "ylacombe/expresso", "style")
|
147 |
|
148 |
|
149 |
|
model_clap.py
CHANGED
@@ -11,12 +11,12 @@ from transformers import ClapModel, ClapProcessor
|
|
11 |
|
12 |
|
13 |
class ClapSE:
|
14 |
-
def __init__(self):
|
15 |
-
self.model = ClapModel.from_pretrained(
|
16 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
17 |
self.model.to(self.device)
|
18 |
self.model.eval()
|
19 |
-
self.processor = ClapProcessor.from_pretrained(
|
20 |
|
21 |
def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
|
22 |
if sampling_rate != self.processor.feature_extractor.sampling_rate:
|
@@ -27,3 +27,9 @@ class ClapSE:
|
|
27 |
with torch.no_grad():
|
28 |
outputs = self.model.get_audio_features(**{k: v.to(self.device) for k, v in inputs.items()})
|
29 |
return outputs.cpu().numpy()[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
class ClapSE:
|
14 |
+
def __init__(self, ckpt: str = "laion/larger_clap_music_and_speech"):
|
15 |
+
self.model = ClapModel.from_pretrained(ckpt)
|
16 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
17 |
self.model.to(self.device)
|
18 |
self.model.eval()
|
19 |
+
self.processor = ClapProcessor.from_pretrained(ckpt)
|
20 |
|
21 |
def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
|
22 |
if sampling_rate != self.processor.feature_extractor.sampling_rate:
|
|
|
27 |
with torch.no_grad():
|
28 |
outputs = self.model.get_audio_features(**{k: v.to(self.device) for k, v in inputs.items()})
|
29 |
return outputs.cpu().numpy()[0]
|
30 |
+
|
31 |
+
|
32 |
+
class ClapGeneralSE(ClapSE):
|
33 |
+
|
34 |
+
def __init__(self):
|
35 |
+
super().__init__(ckpt="laion/larger_clap_general")
|