Spark Chou commited on
Commit
d8a6b5e
·
1 Parent(s): 9e1a1a7
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -16,7 +16,7 @@ from concurrent.futures import ThreadPoolExecutor, TimeoutError as FutureTimeout
16
  # Load dataset from HuggingFace
17
  dataset = load_dataset("intersteller2887/Turing-test-dataset-en", split="train")
18
  dataset = dataset.cast_column("audio", Audio(decode=False)) # Prevent calling 'torchcodec' from newer version of 'datasets'
19
-
20
  # Huggingface space working directory: "/home/user/app"
21
  target_audio_dir = "/home/user/app/audio"
22
  os.makedirs(target_audio_dir, exist_ok=True)
@@ -38,8 +38,8 @@ for item in dataset:
38
  all_data_audio_paths = local_audio_paths
39
 
40
  # Take first file of the datasets as sample
41
- sample1_audio_path = local_audio_paths[0]
42
- print(sample1_audio_path)
43
 
44
  # ==============================================================================
45
  # Data Definition
@@ -883,7 +883,8 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 960px
883
  sample_dimension_selector = gr.Radio(DIMENSION_TITLES, label="Select Learning Dimension", value=DIMENSION_TITLES[0])
884
  with gr.Row():
885
  with gr.Column(scale=1):
886
- sample_audio = gr.Audio(label="Sample Audio", value=DIMENSIONS_DATA[0]["audio"])
 
887
  with gr.Column(scale=2):
888
  with gr.Column(visible=True) as interactive_view:
889
  gr.Markdown("#### Please rate the following features (0-5 points. 0 - Feature not present; 1 - Machine; 3 - Neutral; 5 - Human)")
 
16
  # Load dataset from HuggingFace
17
  dataset = load_dataset("intersteller2887/Turing-test-dataset-en", split="train")
18
  dataset = dataset.cast_column("audio", Audio(decode=False)) # Prevent calling 'torchcodec' from newer version of 'datasets'
19
+ sample1_audio = dataset[0]["audio"]
20
  # Huggingface space working directory: "/home/user/app"
21
  target_audio_dir = "/home/user/app/audio"
22
  os.makedirs(target_audio_dir, exist_ok=True)
 
38
  all_data_audio_paths = local_audio_paths
39
 
40
  # Take first file of the datasets as sample
41
+ # sample1_audio_path = local_audio_paths[0]
42
+ # print(sample1_audio_path)
43
 
44
  # ==============================================================================
45
  # Data Definition
 
883
  sample_dimension_selector = gr.Radio(DIMENSION_TITLES, label="Select Learning Dimension", value=DIMENSION_TITLES[0])
884
  with gr.Row():
885
  with gr.Column(scale=1):
886
+ # sample_audio = gr.Audio(label="Sample Audio", value=DIMENSIONS_DATA[0]["audio"])
887
+ sample_audio = gr.Audio(label="Sample Audio", value=sample1_audio)
888
  with gr.Column(scale=2):
889
  with gr.Column(visible=True) as interactive_view:
890
  gr.Markdown("#### Please rate the following features (0-5 points. 0 - Feature not present; 1 - Machine; 3 - Neutral; 5 - Human)")