Dionyssos commited on
Commit
697d069
·
1 Parent(s): 17a3ebf
Files changed (1) hide show
  1. app.py +22 -20
app.py CHANGED
@@ -1341,16 +1341,21 @@ def _stylett2(text='Hallov worlds Far over the',
1341
 
1342
 
1343
 
1344
- description = (
1345
- "Estimate **age**, **gender**, and **expression** "
1346
- "of the speaker contained in an audio file or microphone recording. \n"
1347
- f"The model [{age_gender_model_name}]"
1348
- f"(https://huggingface.co/{age_gender_model_name}) "
1349
- "recognises age and gender, "
1350
- f"whereas [{expression_model_name}]"
1351
- f"(https://huggingface.co/{expression_model_name}) "
1352
- "recognises the expression dimensions arousal, dominance, and valence. "
1353
- )
 
 
 
 
 
1354
 
1355
  with gr.Blocks(theme='huggingface') as demo:
1356
  tts_file = gr.State(value=None)
@@ -1403,7 +1408,6 @@ with gr.Blocks(theme='huggingface') as demo:
1403
  with gr.Tab(label="Speech Analysis"):
1404
  with gr.Row():
1405
  with gr.Column():
1406
- gr.Markdown(description)
1407
  input_audio_analysis = gr.Audio(
1408
  sources=["upload", "microphone"],
1409
  type="filepath",
@@ -1412,12 +1416,7 @@ with gr.Blocks(theme='huggingface') as demo:
1412
  )
1413
 
1414
  audio_examples = gr.Examples(
1415
- examples=[
1416
- ["wav/female-46-neutral.wav"],
1417
- ["wav/female-20-happy.wav"],
1418
- ["wav/male-60-angry.wav"],
1419
- ["wav/male-27-sad.wav"],
1420
- ],
1421
  inputs=[input_audio_analysis],
1422
  label="Examples from CREMA-D, ODbL v1.0 license",
1423
  )
@@ -1432,13 +1431,16 @@ with gr.Blocks(theme='huggingface') as demo:
1432
 
1433
  outputs = [output_age, output_gender, output_expression]
1434
 
 
 
 
1435
  def load_examples_from_state(examples_list):
1436
- return gr.Examples.update(examples=examples_list, label="Examples (including generated TTS)")
1437
 
1438
  demo.load(
1439
  fn=load_examples_from_state,
1440
- inputs=audio_examples_state,
1441
- outputs=audio_examples,
1442
  queue=False,
1443
  )
1444
 
 
1341
 
1342
 
1343
 
1344
+
1345
+ import gradio as gr
1346
+
1347
+ # Dummy functions to make the code runnable for demonstration
1348
+ def audionar_tts(text, choice, soundscape, kv):
1349
+ # This function would generate an audio file and return its path
1350
+ return "dummy_audio.wav"
1351
+
1352
+ def recognize(audio_input_path):
1353
+ # This function would analyze the audio and return results
1354
+ return "30", "Male", {"Angry": 0.9}
1355
+
1356
+ # Assuming these are defined elsewhere in the user's code
1357
+ language_names = ["English", "Spanish"]
1358
+ VOICES = ["Voice 1", "Voice 2"]
1359
 
1360
  with gr.Blocks(theme='huggingface') as demo:
1361
  tts_file = gr.State(value=None)
 
1408
  with gr.Tab(label="Speech Analysis"):
1409
  with gr.Row():
1410
  with gr.Column():
 
1411
  input_audio_analysis = gr.Audio(
1412
  sources=["upload", "microphone"],
1413
  type="filepath",
 
1416
  )
1417
 
1418
  audio_examples = gr.Examples(
1419
+ examples=[], # Initialize with an empty list
 
 
 
 
 
1420
  inputs=[input_audio_analysis],
1421
  label="Examples from CREMA-D, ODbL v1.0 license",
1422
  )
 
1431
 
1432
  outputs = [output_age, output_gender, output_expression]
1433
 
1434
+ # Fix: This function should not update gr.Examples directly.
1435
+ # Instead, it should just return the updated examples list.
1436
+ # The `demo.load` event will handle the update.
1437
  def load_examples_from_state(examples_list):
1438
+ return gr.Examples.update(examples=examples_list)
1439
 
1440
  demo.load(
1441
  fn=load_examples_from_state,
1442
+ inputs=[audio_examples_state],
1443
+ outputs=[audio_examples],
1444
  queue=False,
1445
  )
1446