catiR
commited on
Commit
·
2defee0
1
Parent(s):
e8d7f64
f0
Browse files- scripts/reaper2pass.py +15 -2
- scripts/runSQ.py +22 -33
scripts/reaper2pass.py
CHANGED
@@ -27,7 +27,8 @@ def reaper_soundfile(sound_path, orig_filetype):
|
|
27 |
|
28 |
|
29 |
|
30 |
-
|
|
|
31 |
|
32 |
f0_data = subprocess.run([reaper_path, "-i", wav_path, '-f', '/dev/stdout', '-x', maxf0, '-m', minf0, '-a'],capture_output=True).stdout
|
33 |
#print('PLAIN:',f0_data)
|
@@ -41,6 +42,18 @@ def get_reaper(wav_path, maxf0='700', minf0='50', reaper_path = "REAPER/build/re
|
|
41 |
|
42 |
return f0_data
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
|
46 |
# 2 pass pitch estimation
|
@@ -50,7 +63,7 @@ def estimate_pitch(sound_path):
|
|
50 |
if orig_ftype == '.wav':
|
51 |
wav_path = sound_path
|
52 |
else:
|
53 |
-
tmp_path = reaper_soundfile(sound_path)
|
54 |
wav_path = tmp_path
|
55 |
|
56 |
print('REAPER FILE PATH:', wav_path)
|
|
|
27 |
|
28 |
|
29 |
|
30 |
+
|
31 |
+
def get_reaper_data(wav_path, maxf0='700', minf0='50', reaper_path = "REAPER/build/reaper"):
|
32 |
|
33 |
f0_data = subprocess.run([reaper_path, "-i", wav_path, '-f', '/dev/stdout', '-x', maxf0, '-m', minf0, '-a'],capture_output=True).stdout
|
34 |
#print('PLAIN:',f0_data)
|
|
|
42 |
|
43 |
return f0_data
|
44 |
|
45 |
+
|
46 |
+
# currently,
|
47 |
+
# take the simplified list data from get_reaper_data,
|
48 |
+
# with format Time F0Val only at times with existing F0Val,
|
49 |
+
# and write that to a text file.
|
50 |
+
# alternate would be letting reaper write its own files
|
51 |
+
# instead of capturing the stdout...
|
52 |
+
def save_pitch(f0_data, save_path,hed=True):
|
53 |
+
with open(save_path,'w') as handle:
|
54 |
+
if hed:
|
55 |
+
handle.write('TIME\tF0\n')
|
56 |
+
handle.write(''.join(['\t'.join(l) + '\n' for l in f0_data]))
|
57 |
|
58 |
|
59 |
# 2 pass pitch estimation
|
|
|
63 |
if orig_ftype == '.wav':
|
64 |
wav_path = sound_path
|
65 |
else:
|
66 |
+
tmp_path = reaper_soundfile(sound_path, orig_ftype)
|
67 |
wav_path = tmp_path
|
68 |
|
69 |
print('REAPER FILE PATH:', wav_path)
|
scripts/runSQ.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import os, unicodedata
|
2 |
from scripts.ctcalign import aligner, wav16m
|
3 |
from scripts.tapi import tiro
|
4 |
-
from scripts.reaper2pass import estimate_pitch
|
5 |
|
6 |
# given a Sentence string,
|
7 |
# using a metadata file of SQ, // SQL1adult_metadata.tsv
|
@@ -14,6 +14,7 @@ def run(sentence, voices):
|
|
14 |
#voices = ['Alfur','Dilja','Karl', 'Dora']
|
15 |
# On tts.tiro.is speech marks are only available
|
16 |
# for the voices: Alfur, Dilja, Karl and Dora.
|
|
|
17 |
|
18 |
corpus_meta = '/home/user/app/human_data/SQL1adult10s_metadata.tsv'
|
19 |
speech_dir = '/home/user/app/human_data/audio/squeries/'
|
@@ -29,10 +30,10 @@ def run(sentence, voices):
|
|
29 |
meta = get_recordings(norm_sentence, corpus_meta)
|
30 |
if meta:
|
31 |
align_human(meta,speech_aligns,speech_dir,align_model_path)
|
32 |
-
f0_human(meta, speech_f0, speech_dir
|
33 |
if voices:
|
34 |
temp_a_sample = get_tts(sentence,voices,tts_dir)
|
35 |
-
f0_tts(sentence, voices, tts_dir
|
36 |
|
37 |
# by now, all the data to cluster and eval exists in the right place.
|
38 |
# (after the last todo of saving pitch to disk instead of only list)
|
@@ -112,7 +113,7 @@ def align_human(meta,align_dir,speech_dir,model_path):
|
|
112 |
|
113 |
# check if f0s exist for all of those files.
|
114 |
# if not, warn, and make them with TODO reaper
|
115 |
-
def f0_human(meta, f0_dir, speech_dir
|
116 |
no_f0 = []
|
117 |
|
118 |
for rec in meta:
|
@@ -126,31 +127,20 @@ def f0_human(meta, f0_dir, speech_dir, reaper_path):
|
|
126 |
os.makedirs(f0_dir)
|
127 |
for rec in no_f0:
|
128 |
wav_path = f'{speech_dir}{rec[2]}'
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
#whatever.
|
138 |
|
139 |
else:
|
140 |
print('All speech pitch trackings existed')
|
141 |
|
142 |
|
143 |
|
144 |
-
# # # # # # # # #
|
145 |
-
#################
|
146 |
-
# TODO
|
147 |
-
# IMPLEMENT GOOD 2 STEP PITCH ESTIMATION
|
148 |
-
# TODO
|
149 |
-
#################
|
150 |
-
# # # # # # # # #
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
|
155 |
# check if the TTS wavs + align jsons exist for this sentence
|
156 |
# if not, warn and make them with TAPI ******
|
@@ -188,7 +178,7 @@ def get_tts(sentence,voices,ttsdir):
|
|
188 |
# check if the TTS f0s exist
|
189 |
# if not warn + make
|
190 |
# TODO collapse functions
|
191 |
-
def f0_tts(sentence, voices, ttsdir
|
192 |
|
193 |
# assume the first 64 chars of sentence are enough
|
194 |
dpath = sentence.replace(' ','_')[:65]
|
@@ -202,7 +192,14 @@ def f0_tts(sentence, voices, ttsdir, reaper_path):
|
|
202 |
|
203 |
if no_f0:
|
204 |
print(f'Need to estimate pitch for {len(no_f0)} voices')
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
|
207 |
else:
|
208 |
print('All TTS pitch trackings existed')
|
@@ -211,14 +208,6 @@ def f0_tts(sentence, voices, ttsdir, reaper_path):
|
|
211 |
|
212 |
|
213 |
|
214 |
-
|
215 |
-
#run()
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
# https://colab.research.google.com/drive/1RApnJEocx3-mqdQC2h5SH8vucDkSlQYt?authuser=1#scrollTo=410ecd91fa29bc73
|
223 |
|
224 |
# CLUSTER the humans
|
|
|
1 |
import os, unicodedata
|
2 |
from scripts.ctcalign import aligner, wav16m
|
3 |
from scripts.tapi import tiro
|
4 |
+
from scripts.reaper2pass import estimate_pitch, save_pitch
|
5 |
|
6 |
# given a Sentence string,
|
7 |
# using a metadata file of SQ, // SQL1adult_metadata.tsv
|
|
|
14 |
#voices = ['Alfur','Dilja','Karl', 'Dora']
|
15 |
# On tts.tiro.is speech marks are only available
|
16 |
# for the voices: Alfur, Dilja, Karl and Dora.
|
17 |
+
# in practise, only for alfur and dilja.
|
18 |
|
19 |
corpus_meta = '/home/user/app/human_data/SQL1adult10s_metadata.tsv'
|
20 |
speech_dir = '/home/user/app/human_data/audio/squeries/'
|
|
|
30 |
meta = get_recordings(norm_sentence, corpus_meta)
|
31 |
if meta:
|
32 |
align_human(meta,speech_aligns,speech_dir,align_model_path)
|
33 |
+
f0_human(meta, speech_f0, speech_dir)
|
34 |
if voices:
|
35 |
temp_a_sample = get_tts(sentence,voices,tts_dir)
|
36 |
+
f0_tts(sentence, voices, tts_dir)
|
37 |
|
38 |
# by now, all the data to cluster and eval exists in the right place.
|
39 |
# (after the last todo of saving pitch to disk instead of only list)
|
|
|
113 |
|
114 |
# check if f0s exist for all of those files.
|
115 |
# if not, warn, and make them with TODO reaper
|
116 |
+
def f0_human(meta, f0_dir, speech_dir):
|
117 |
no_f0 = []
|
118 |
|
119 |
for rec in meta:
|
|
|
127 |
os.makedirs(f0_dir)
|
128 |
for rec in no_f0:
|
129 |
wav_path = f'{speech_dir}{rec[2]}'
|
130 |
+
fpath = f0_dir + rec[2].replace('.wav','.f0')
|
131 |
+
f0_data = estimate_pitch(wav_path)
|
132 |
+
save_pitch(f0_data,fpath)
|
133 |
+
|
134 |
+
|
135 |
+
print('2ND PASS PITCHES OF', fpath)
|
136 |
+
print(f0_data)
|
137 |
+
|
|
|
138 |
|
139 |
else:
|
140 |
print('All speech pitch trackings existed')
|
141 |
|
142 |
|
143 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
# check if the TTS wavs + align jsons exist for this sentence
|
146 |
# if not, warn and make them with TAPI ******
|
|
|
178 |
# check if the TTS f0s exist
|
179 |
# if not warn + make
|
180 |
# TODO collapse functions
|
181 |
+
def f0_tts(sentence, voices, ttsdir):
|
182 |
|
183 |
# assume the first 64 chars of sentence are enough
|
184 |
dpath = sentence.replace(' ','_')[:65]
|
|
|
192 |
|
193 |
if no_f0:
|
194 |
print(f'Need to estimate pitch for {len(no_f0)} voices')
|
195 |
+
for v in voices:
|
196 |
+
wav_path = f'{ttsdir}{dpath}/{v}.wav'
|
197 |
+
fpath = f'{ttsdir}{dpath}/{v}.f0'
|
198 |
+
f0_data = estimate_pitch(wav_path)
|
199 |
+
save_pitch(f0_data,fpath)
|
200 |
+
|
201 |
+
print('2ND PASS PITCHES OF', fpath)
|
202 |
+
print(f0_data)
|
203 |
|
204 |
else:
|
205 |
print('All TTS pitch trackings existed')
|
|
|
208 |
|
209 |
|
210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
# https://colab.research.google.com/drive/1RApnJEocx3-mqdQC2h5SH8vucDkSlQYt?authuser=1#scrollTo=410ecd91fa29bc73
|
212 |
|
213 |
# CLUSTER the humans
|