Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -47,54 +47,61 @@ print(sample1_audio_path)
|
|
| 47 |
|
| 48 |
DIMENSIONS_DATA = [
|
| 49 |
{
|
| 50 |
-
"title": "
|
| 51 |
-
"audio": sample1_audio_path,
|
| 52 |
"sub_dims": [
|
| 53 |
-
"
|
| 54 |
-
"
|
| 55 |
-
"
|
| 56 |
-
"
|
|
|
|
|
|
|
|
|
|
| 57 |
],
|
| 58 |
-
"reference_scores": [5, 5,
|
| 59 |
},
|
| 60 |
{
|
| 61 |
-
"title": "
|
| 62 |
-
"audio": sample1_audio_path,
|
| 63 |
"sub_dims": [
|
| 64 |
-
"
|
| 65 |
-
"
|
|
|
|
|
|
|
| 66 |
],
|
| 67 |
-
"reference_scores": [
|
| 68 |
},
|
| 69 |
{
|
| 70 |
-
"title": "
|
| 71 |
-
"audio": sample1_audio_path,
|
| 72 |
"sub_dims": [
|
| 73 |
-
"
|
| 74 |
-
"
|
|
|
|
| 75 |
],
|
| 76 |
-
"reference_scores": [
|
| 77 |
},
|
| 78 |
{
|
| 79 |
-
"title": "
|
| 80 |
-
"audio": sample1_audio_path,
|
| 81 |
"sub_dims": [
|
| 82 |
-
"
|
| 83 |
-
"
|
| 84 |
],
|
| 85 |
"reference_scores": [5, 5]
|
| 86 |
},
|
| 87 |
{
|
| 88 |
-
"title": "
|
| 89 |
-
"audio": sample1_audio_path,
|
| 90 |
"sub_dims": [
|
| 91 |
-
"
|
| 92 |
-
"
|
| 93 |
],
|
| 94 |
-
"reference_scores": [
|
| 95 |
}
|
| 96 |
]
|
| 97 |
|
|
|
|
| 98 |
DIMENSION_TITLES = [d["title"] for d in DIMENSIONS_DATA]
|
| 99 |
MAX_SUB_DIMS = max(len(d['sub_dims']) for d in DIMENSIONS_DATA)
|
| 100 |
|
|
@@ -543,7 +550,7 @@ def submit_question_and_advance(q_idx, d_idx, selections, final_choice, all_resu
|
|
| 543 |
return init_q_updates + (all_results, gr.update(value=""))
|
| 544 |
else:
|
| 545 |
# 准备完整结果数据
|
| 546 |
-
result_str = "###
|
| 547 |
for res in all_results:
|
| 548 |
result_str += f"##### 最终判断: **{res['selections'].get('final_choice', '未选择')}**\n"
|
| 549 |
for dim_title, dim_data in res['selections'].items():
|
|
@@ -756,11 +763,11 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 960px
|
|
| 756 |
}
|
| 757 |
|
| 758 |
with welcome_page:
|
| 759 |
-
gr.Markdown("# AI
|
| 760 |
-
start_btn = gr.Button("
|
| 761 |
|
| 762 |
with info_page:
|
| 763 |
-
gr.Markdown("##
|
| 764 |
username_input = gr.Textbox(label="用户名", placeholder="请输入你的昵称")
|
| 765 |
age_input = gr.Radio(["18岁以下", "18-25岁", "26-35岁", "36-50岁", "50岁以上"], label="年龄")
|
| 766 |
gender_input = gr.Radio(["男", "女", "其他"], label="性别")
|
|
@@ -770,7 +777,8 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 960px
|
|
| 770 |
submit_info_btn = gr.Button("提交并开始学习样例", variant="primary", interactive=False)
|
| 771 |
|
| 772 |
with sample_page:
|
| 773 |
-
|
|
|
|
| 774 |
gr.Markdown("## 样例分析\n请选择一个维度进行学习和打分练习。所有维度共用同一个样例音频。")
|
| 775 |
sample_dimension_selector = gr.Radio(DIMENSION_TITLES, label="选择学习维度", value=DIMENSION_TITLES[0])
|
| 776 |
with gr.Row():
|
|
@@ -801,7 +809,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 960px
|
|
| 801 |
"- 完成所有维度后,请根据整体印象对回应方的身份做出做出“人类”或“机器人”的 **最终判断**。\n"
|
| 802 |
"- 你可以使用“上一维度”和“下一维度”按钮在5个维度间自由切换和修改分数。")
|
| 803 |
go_to_test_btn = gr.Button("开始测试", variant="primary")
|
| 804 |
-
|
| 805 |
with test_page:
|
| 806 |
gr.Markdown("## 正式测试")
|
| 807 |
question_progress_text = gr.Markdown()
|
|
|
|
| 47 |
|
| 48 |
DIMENSIONS_DATA = [
|
| 49 |
{
|
| 50 |
+
"title": "Semantic and Pragmatic Features",
|
| 51 |
+
"audio": "sample1_audio_path",
|
| 52 |
"sub_dims": [
|
| 53 |
+
"Memory Consistency: Human memory in short contexts is usually consistent and self-correcting (e.g., by asking questions); machines may show inconsistent context memory and fail to notice or correct errors (e.g., forgetting key information and persisting in wrong answers).",
|
| 54 |
+
"Logical Coherence: Human logic is naturally coherent and allows reasonable leaps; machine logic is abrupt or self-contradictory (e.g., sudden topic shifts without transitions).",
|
| 55 |
+
"Pronunciation Accuracy: Humans generally pronounce words correctly and naturally, distinguishing polyphonic characters based on context; machines often mispronounce or lack contextual judgment for polyphonic words.",
|
| 56 |
+
"Multilingual Mixing: Humans mix multiple languages fluently and contextually; machines mix languages rigidly, lacking logical language switching.",
|
| 57 |
+
"Linguistic Vagueness: Human speech tends to include vague expressions (e.g., “more or less,” “I guess”) and self-corrections; machine responses are typically precise and assertive.",
|
| 58 |
+
"Filler Word Usage: Human filler words (e.g., 'uh', 'like') appear randomly and show signs of thinking; machine fillers are either repetitive and patterned or completely absent.",
|
| 59 |
+
"Metaphor and Pragmatic Intent: Humans use metaphors, irony, and euphemisms to express layered meanings; machines interpret literally or use rhetorical devices awkwardly, lacking semantic richness."
|
| 60 |
],
|
| 61 |
+
"reference_scores": [5, 5, 3, 3, 5, 5, 3]
|
| 62 |
},
|
| 63 |
{
|
| 64 |
+
"title": "Non-Physiological Paralinguistic Features",
|
| 65 |
+
"audio": "sample1_audio_path",
|
| 66 |
"sub_dims": [
|
| 67 |
+
"Rhythm: Human speech rate varies with meaning, occasionally hesitating or pausing; machine rhythm is uniform, with little or mechanical pauses.",
|
| 68 |
+
"Intonation: Humans naturally raise or lower pitch to express questions, surprise, or emphasis; machine intonation is monotonous or overly patterned, mismatching the context.",
|
| 69 |
+
"Emphasis: Humans consciously stress key words to highlight important information; machines have uniform word emphasis or stress incorrect parts.",
|
| 70 |
+
"Auxiliary Vocalizations: Humans produce context-appropriate non-verbal sounds (e.g., laughter, sighs); machine non-verbal sounds are contextually incorrect, mechanical, or absent."
|
| 71 |
],
|
| 72 |
+
"reference_scores": [4, 5, 4, 3]
|
| 73 |
},
|
| 74 |
{
|
| 75 |
+
"title": "Physiological Paralinguistic Features",
|
| 76 |
+
"audio": "sample1_audio_path",
|
| 77 |
"sub_dims": [
|
| 78 |
+
"Micro-physiological Noise: Human speech includes unconscious physiological sounds like breathing, saliva, or bubbling, naturally woven into rhythm; machine speech is overly clean or adds unnatural noises.",
|
| 79 |
+
"Pronunciation Instability: Human pronunciation includes irregularities (e.g., linking, tremors, slurring, nasal sounds); machine pronunciation is overly standard and uniform, lacking personality.",
|
| 80 |
+
"Accent: Humans naturally exhibit regional accents or speech traits; machine accents sound forced or unnatural."
|
| 81 |
],
|
| 82 |
+
"reference_scores": [3, 3, 4]
|
| 83 |
},
|
| 84 |
{
|
| 85 |
+
"title": "Mechanical Persona",
|
| 86 |
+
"audio": "sample1_audio_path",
|
| 87 |
"sub_dims": [
|
| 88 |
+
"Flattery: Humans assess context to agree or disagree, sometimes offering differing opinions; machines excessively agree, thank, or apologize, over-validating the other party and lacking authentic interaction.",
|
| 89 |
+
"Formalized Expression: Human speech is flexible; machine responses are formally structured, overly written, and use vague wording."
|
| 90 |
],
|
| 91 |
"reference_scores": [5, 5]
|
| 92 |
},
|
| 93 |
{
|
| 94 |
+
"title": "Emotional Expression",
|
| 95 |
+
"audio": "sample1_audio_path",
|
| 96 |
"sub_dims": [
|
| 97 |
+
"Semantic Level: Humans show appropriate emotional responses to contexts like sadness or joy; machines are emotionally flat, or use emotional words vaguely and out of context.",
|
| 98 |
+
"Acoustic Level: Human pitch, volume, and rhythm change dynamically with emotion; machine emotional tone is formulaic or mismatched with the context."
|
| 99 |
],
|
| 100 |
+
"reference_scores": [3, 3]
|
| 101 |
}
|
| 102 |
]
|
| 103 |
|
| 104 |
+
|
| 105 |
DIMENSION_TITLES = [d["title"] for d in DIMENSIONS_DATA]
|
| 106 |
MAX_SUB_DIMS = max(len(d['sub_dims']) for d in DIMENSIONS_DATA)
|
| 107 |
|
|
|
|
| 550 |
return init_q_updates + (all_results, gr.update(value=""))
|
| 551 |
else:
|
| 552 |
# 准备完整结果数据
|
| 553 |
+
result_str = "### Test Finished!\n\nOverview of the submission:\n"
|
| 554 |
for res in all_results:
|
| 555 |
result_str += f"##### 最终判断: **{res['selections'].get('final_choice', '未选择')}**\n"
|
| 556 |
for dim_title, dim_data in res['selections'].items():
|
|
|
|
| 763 |
}
|
| 764 |
|
| 765 |
with welcome_page:
|
| 766 |
+
gr.Markdown("# AI Detective\nListen to the following conversations. Please determine which respondent is an AI.")
|
| 767 |
+
start_btn = gr.Button("Start", variant="primary")
|
| 768 |
|
| 769 |
with info_page:
|
| 770 |
+
gr.Markdown("## Basic Information")
|
| 771 |
username_input = gr.Textbox(label="用户名", placeholder="请输入你的昵称")
|
| 772 |
age_input = gr.Radio(["18岁以下", "18-25岁", "26-35岁", "36-50岁", "50岁以上"], label="年龄")
|
| 773 |
gender_input = gr.Radio(["男", "女", "其他"], label="性别")
|
|
|
|
| 777 |
submit_info_btn = gr.Button("提交并开始学习样例", variant="primary", interactive=False)
|
| 778 |
|
| 779 |
with sample_page:
|
| 780 |
+
|
| 781 |
+
|
| 782 |
gr.Markdown("## 样例分析\n请选择一个维度进行学习和打分练习。所有维度共用同一个样例音频。")
|
| 783 |
sample_dimension_selector = gr.Radio(DIMENSION_TITLES, label="选择学习维度", value=DIMENSION_TITLES[0])
|
| 784 |
with gr.Row():
|
|
|
|
| 809 |
"- 完成所有维度后,请根据整体印象对回应方的身份做出做出“人类”或“机器人”的 **最终判断**。\n"
|
| 810 |
"- 你可以使用“上一维度”和“下一维度”按钮在5个维度间自由切换和修改分数。")
|
| 811 |
go_to_test_btn = gr.Button("开始测试", variant="primary")
|
| 812 |
+
|
| 813 |
with test_page:
|
| 814 |
gr.Markdown("## 正式测试")
|
| 815 |
question_progress_text = gr.Markdown()
|