Spaces:
Running
Running
trY
Browse files
app.py
CHANGED
@@ -1339,6 +1339,8 @@ def _stylett2(text='Hallov worlds Far over the',
|
|
1339 |
|
1340 |
return speech_audio
|
1341 |
|
|
|
|
|
1342 |
description = (
|
1343 |
"Estimate **age**, **gender**, and **expression** "
|
1344 |
"of the speaker contained in an audio file or microphone recording. \n"
|
@@ -1350,12 +1352,8 @@ description = (
|
|
1350 |
"recognises the expression dimensions arousal, dominance, and valence. "
|
1351 |
)
|
1352 |
|
1353 |
-
# =============
|
1354 |
-
|
1355 |
with gr.Blocks(theme='huggingface') as demo:
|
1356 |
-
# This state will be used to hold the generated TTS file path
|
1357 |
tts_file = gr.State(value=None)
|
1358 |
-
# This state will hold the list of examples, including the generated one
|
1359 |
audio_examples_state = gr.State(
|
1360 |
value=[
|
1361 |
["wav/female-46-neutral.wav"],
|
@@ -1392,13 +1390,8 @@ with gr.Blocks(theme='huggingface') as demo:
|
|
1392 |
output_audio = gr.Audio(label="TTS Output")
|
1393 |
|
1394 |
def generate_and_update_state(text, choice, soundscape, kv, current_examples):
|
1395 |
-
# This function calls the TTS and updates the state
|
1396 |
audio_path = audionar_tts(text, choice, soundscape, kv)
|
1397 |
-
|
1398 |
-
# Append the new audio path to the existing list of examples
|
1399 |
updated_examples = current_examples + [[audio_path]]
|
1400 |
-
|
1401 |
-
# Return the generated audio path for the output and the updated list for the state
|
1402 |
return audio_path, updated_examples
|
1403 |
|
1404 |
generate_button.click(
|
@@ -1417,8 +1410,7 @@ with gr.Blocks(theme='huggingface') as demo:
|
|
1417 |
label="Audio input",
|
1418 |
min_length=0.025,
|
1419 |
)
|
1420 |
-
|
1421 |
-
# The gr.Examples component that will be dynamically updated
|
1422 |
audio_examples = gr.Examples(
|
1423 |
examples=[
|
1424 |
["wav/female-46-neutral.wav"],
|
@@ -1429,9 +1421,10 @@ with gr.Blocks(theme='huggingface') as demo:
|
|
1429 |
inputs=[input_audio_analysis],
|
1430 |
label="Examples from CREMA-D, ODbL v1.0 license",
|
1431 |
)
|
1432 |
-
|
1433 |
gr.Markdown("Only the first two seconds of the audio will be processed.")
|
1434 |
-
|
|
|
1435 |
with gr.Column():
|
1436 |
output_age = gr.Textbox(label="Age")
|
1437 |
output_gender = gr.Label(label="Gender")
|
@@ -1439,11 +1432,9 @@ with gr.Blocks(theme='huggingface') as demo:
|
|
1439 |
|
1440 |
outputs = [output_age, output_gender, output_expression]
|
1441 |
|
1442 |
-
# Function to update the examples from the state
|
1443 |
def load_examples_from_state(examples_list):
|
1444 |
-
return gr.Examples.update(examples=examples_list)
|
1445 |
|
1446 |
-
# This is the key: an event listener that triggers when the tab is selected
|
1447 |
demo.load(
|
1448 |
fn=load_examples_from_state,
|
1449 |
inputs=audio_examples_state,
|
@@ -1453,4 +1444,4 @@ with gr.Blocks(theme='huggingface') as demo:
|
|
1453 |
|
1454 |
submit_btn.click(recognize, input_audio_analysis, outputs)
|
1455 |
|
1456 |
-
demo.launch(debug=True)
|
|
|
1339 |
|
1340 |
return speech_audio
|
1341 |
|
1342 |
+
|
1343 |
+
|
1344 |
description = (
|
1345 |
"Estimate **age**, **gender**, and **expression** "
|
1346 |
"of the speaker contained in an audio file or microphone recording. \n"
|
|
|
1352 |
"recognises the expression dimensions arousal, dominance, and valence. "
|
1353 |
)
|
1354 |
|
|
|
|
|
1355 |
with gr.Blocks(theme='huggingface') as demo:
|
|
|
1356 |
tts_file = gr.State(value=None)
|
|
|
1357 |
audio_examples_state = gr.State(
|
1358 |
value=[
|
1359 |
["wav/female-46-neutral.wav"],
|
|
|
1390 |
output_audio = gr.Audio(label="TTS Output")
|
1391 |
|
1392 |
def generate_and_update_state(text, choice, soundscape, kv, current_examples):
|
|
|
1393 |
audio_path = audionar_tts(text, choice, soundscape, kv)
|
|
|
|
|
1394 |
updated_examples = current_examples + [[audio_path]]
|
|
|
|
|
1395 |
return audio_path, updated_examples
|
1396 |
|
1397 |
generate_button.click(
|
|
|
1410 |
label="Audio input",
|
1411 |
min_length=0.025,
|
1412 |
)
|
1413 |
+
|
|
|
1414 |
audio_examples = gr.Examples(
|
1415 |
examples=[
|
1416 |
["wav/female-46-neutral.wav"],
|
|
|
1421 |
inputs=[input_audio_analysis],
|
1422 |
label="Examples from CREMA-D, ODbL v1.0 license",
|
1423 |
)
|
1424 |
+
|
1425 |
gr.Markdown("Only the first two seconds of the audio will be processed.")
|
1426 |
+
|
1427 |
+
submit_btn = gr.Button(value="Submit", variant="primary")
|
1428 |
with gr.Column():
|
1429 |
output_age = gr.Textbox(label="Age")
|
1430 |
output_gender = gr.Label(label="Gender")
|
|
|
1432 |
|
1433 |
outputs = [output_age, output_gender, output_expression]
|
1434 |
|
|
|
1435 |
def load_examples_from_state(examples_list):
|
1436 |
+
return gr.Examples.update(examples=examples_list, label="Examples (including generated TTS)")
|
1437 |
|
|
|
1438 |
demo.load(
|
1439 |
fn=load_examples_from_state,
|
1440 |
inputs=audio_examples_state,
|
|
|
1444 |
|
1445 |
submit_btn.click(recognize, input_audio_analysis, outputs)
|
1446 |
|
1447 |
+
demo.launch(debug=True)
|