cc_vad / examples /evaluation /step_2_show_metrics.py
HoneyTian's picture
update
d9015be
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import json
import os
from pathlib import Path
import sys
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, "../../"))
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--eval_file",
# default=r"native_silero_vad.jsonl",
type=str
)
args = parser.parse_args()
return args
evaluation_files = [
"native_silero_vad.jsonl",
"fsmn-vad.jsonl",
"silero-vad.jsonl"
]
def main():
# args = get_args()
for eval_file in evaluation_files:
eval_file = Path(eval_file)
total = 0
total_duration = 0
total_accuracy = 0
total_precision = 0
total_recall = 0
total_f1 = 0
average_accuracy = 0
average_precision = 0
average_recall = 0
average_f1 = 0
# progress_bar = tqdm(desc=eval_file.name)
with open(eval_file.as_posix(), "r", encoding="utf-8") as f:
for row in f:
row = json.loads(row)
duration = row["duration"]
accuracy = row["accuracy"]
precision = row["precision"]
recall = row["recall"]
f1 = row["f1"]
total += 1
total_duration += duration
total_accuracy += accuracy * duration
total_precision += precision * duration
total_recall += recall * duration
total_f1 += f1 * duration
average_accuracy = total_accuracy / total_duration
average_precision = total_precision / total_duration
average_recall = total_recall / total_duration
average_f1 = total_f1 / total_duration
# progress_bar.update(1)
# progress_bar.set_postfix({
# "total": total,
# "accuracy": average_accuracy,
# "precision": average_precision,
# "recall": average_recall,
# "f1": average_f1,
# "total_duration": f"{round(total_duration / 60, 4)}min",
# })
summary = (f"{eval_file.name}, "
f"total: {total}, "
f"accuracy: {average_accuracy}, "
f"precision: {average_precision}, "
f"recall: {average_recall}, "
f"f1: {average_f1}, "
f"total_duration: {f"{round(total_duration / 60, 4)}min"}, "
)
print(summary)
return
if __name__ == "__main__":
main()