Spaces:
Running
on
Zero
Running
on
Zero
Upload app.py
Browse files
app.py
CHANGED
|
@@ -323,11 +323,15 @@ with gr.Blocks() as basic_tts:
|
|
| 323 |
text.submit(generate, inputs=[text, voice, in_ps, speed, trim, use_gpu], outputs=[audio, out_ps])
|
| 324 |
generate_btn.click(generate, inputs=[text, voice, in_ps, speed, trim, use_gpu], outputs=[audio, out_ps])
|
| 325 |
|
|
|
|
|
|
|
| 326 |
@torch.no_grad()
|
| 327 |
def lf_forward(token_lists, voices, speed, device='cpu'):
|
| 328 |
voicepack = torch.mean(torch.stack([VOICES[device][v] for v in voices]), dim=0)
|
| 329 |
outs = []
|
| 330 |
for tokens in token_lists:
|
|
|
|
|
|
|
| 331 |
ref_s = voicepack[len(tokens)]
|
| 332 |
s = ref_s[:, 128:]
|
| 333 |
tokens = torch.LongTensor([[0, *tokens, 0]]).to(device)
|
|
@@ -409,6 +413,8 @@ def segment_and_tokenize(text, voice, skip_square_brackets=True, newline_split=2
|
|
| 409 |
return [(i, *row) for i, row in enumerate(segments)]
|
| 410 |
|
| 411 |
def lf_generate(segments, voice, speed=1, trim=0, pad_between=0, use_gpu=True):
|
|
|
|
|
|
|
| 412 |
token_lists = list(map(tokenize, segments['Tokens']))
|
| 413 |
voices = resolve_voices(voice)
|
| 414 |
speed = clamp_speed(speed)
|
|
@@ -441,6 +447,10 @@ def lf_generate(segments, voice, speed=1, trim=0, pad_between=0, use_gpu=True):
|
|
| 441 |
yield (SAMPLE_RATE, out)
|
| 442 |
i += bs
|
| 443 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 444 |
def did_change_segments(segments):
|
| 445 |
x = len(segments) if segments['Length'].any() else 0
|
| 446 |
return [
|
|
@@ -458,15 +468,7 @@ def extract_text(file):
|
|
| 458 |
return '\n'.join([line for line in r])
|
| 459 |
return None
|
| 460 |
|
| 461 |
-
with gr.Blocks(
|
| 462 |
-
.square-stop-btn {
|
| 463 |
-
aspect-ratio: 1/1;
|
| 464 |
-
display: flex;
|
| 465 |
-
align-items: center;
|
| 466 |
-
justify-content: center;
|
| 467 |
-
padding: 0;
|
| 468 |
-
}
|
| 469 |
-
''') as lf_tts:
|
| 470 |
with gr.Row():
|
| 471 |
with gr.Column():
|
| 472 |
file_input = gr.File(file_types=['.pdf', '.txt'], label='Input File: pdf or txt')
|
|
@@ -495,13 +497,13 @@ with gr.Blocks(css='''
|
|
| 495 |
pad_between = gr.Slider(minimum=0, maximum=24000, value=0, step=1000, label='🔇 Pad Between', info='How much silence to insert between segments')
|
| 496 |
with gr.Row():
|
| 497 |
generate_btn = gr.Button('Generate x0', variant='secondary', interactive=False)
|
| 498 |
-
stop_btn = gr.Button('
|
| 499 |
with gr.Row():
|
| 500 |
segments = gr.Dataframe(headers=['#', 'Text', 'Tokens', 'Length'], row_count=(1, 'dynamic'), col_count=(4, 'fixed'), label='Segments', interactive=False, wrap=True)
|
| 501 |
segments.change(fn=did_change_segments, inputs=[segments], outputs=[segment_btn, generate_btn])
|
| 502 |
segment_btn.click(segment_and_tokenize, inputs=[text, voice, skip_square_brackets, newline_split], outputs=[segments])
|
| 503 |
generate_btn.click(lf_generate, inputs=[segments, voice, speed, trim, pad_between, use_gpu], outputs=[audio_stream])
|
| 504 |
-
stop_btn.click(
|
| 505 |
|
| 506 |
with gr.Blocks() as about:
|
| 507 |
gr.Markdown('''
|
|
|
|
| 323 |
text.submit(generate, inputs=[text, voice, in_ps, speed, trim, use_gpu], outputs=[audio, out_ps])
|
| 324 |
generate_btn.click(generate, inputs=[text, voice, in_ps, speed, trim, use_gpu], outputs=[audio, out_ps])
|
| 325 |
|
| 326 |
+
stop_event = threading.Event()
|
| 327 |
+
|
| 328 |
@torch.no_grad()
|
| 329 |
def lf_forward(token_lists, voices, speed, device='cpu'):
|
| 330 |
voicepack = torch.mean(torch.stack([VOICES[device][v] for v in voices]), dim=0)
|
| 331 |
outs = []
|
| 332 |
for tokens in token_lists:
|
| 333 |
+
if stop_event.is_set():
|
| 334 |
+
break
|
| 335 |
ref_s = voicepack[len(tokens)]
|
| 336 |
s = ref_s[:, 128:]
|
| 337 |
tokens = torch.LongTensor([[0, *tokens, 0]]).to(device)
|
|
|
|
| 413 |
return [(i, *row) for i, row in enumerate(segments)]
|
| 414 |
|
| 415 |
def lf_generate(segments, voice, speed=1, trim=0, pad_between=0, use_gpu=True):
|
| 416 |
+
global stop_event
|
| 417 |
+
stop_event.clear()
|
| 418 |
token_lists = list(map(tokenize, segments['Tokens']))
|
| 419 |
voices = resolve_voices(voice)
|
| 420 |
speed = clamp_speed(speed)
|
|
|
|
| 447 |
yield (SAMPLE_RATE, out)
|
| 448 |
i += bs
|
| 449 |
|
| 450 |
+
def lf_stop():
|
| 451 |
+
global stop_event
|
| 452 |
+
stop_event.set()
|
| 453 |
+
|
| 454 |
def did_change_segments(segments):
|
| 455 |
x = len(segments) if segments['Length'].any() else 0
|
| 456 |
return [
|
|
|
|
| 468 |
return '\n'.join([line for line in r])
|
| 469 |
return None
|
| 470 |
|
| 471 |
+
with gr.Blocks() as lf_tts:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 472 |
with gr.Row():
|
| 473 |
with gr.Column():
|
| 474 |
file_input = gr.File(file_types=['.pdf', '.txt'], label='Input File: pdf or txt')
|
|
|
|
| 497 |
pad_between = gr.Slider(minimum=0, maximum=24000, value=0, step=1000, label='🔇 Pad Between', info='How much silence to insert between segments')
|
| 498 |
with gr.Row():
|
| 499 |
generate_btn = gr.Button('Generate x0', variant='secondary', interactive=False)
|
| 500 |
+
stop_btn = gr.Button('Stop', variant='stop')
|
| 501 |
with gr.Row():
|
| 502 |
segments = gr.Dataframe(headers=['#', 'Text', 'Tokens', 'Length'], row_count=(1, 'dynamic'), col_count=(4, 'fixed'), label='Segments', interactive=False, wrap=True)
|
| 503 |
segments.change(fn=did_change_segments, inputs=[segments], outputs=[segment_btn, generate_btn])
|
| 504 |
segment_btn.click(segment_and_tokenize, inputs=[text, voice, skip_square_brackets, newline_split], outputs=[segments])
|
| 505 |
generate_btn.click(lf_generate, inputs=[segments, voice, speed, trim, pad_between, use_gpu], outputs=[audio_stream])
|
| 506 |
+
stop_btn.click(lf_stop)
|
| 507 |
|
| 508 |
with gr.Blocks() as about:
|
| 509 |
gr.Markdown('''
|