Upload 7 files
Browse files- assets/benchmark.png +3 -0
- assets/category_sunburst.png +3 -0
- assets/example.png +3 -0
- assets/logo.png +3 -0
- assets/modality_pie.png +3 -0
- assets/pipeline.png +3 -0
- code/evaluation.py +131 -0
assets/benchmark.png
ADDED
|
Git LFS Details
|
assets/category_sunburst.png
ADDED
|
Git LFS Details
|
assets/example.png
ADDED
|
Git LFS Details
|
assets/logo.png
ADDED
|
Git LFS Details
|
assets/modality_pie.png
ADDED
|
Git LFS Details
|
assets/pipeline.png
ADDED
|
Git LFS Details
|
code/evaluation.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import pickle
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
import re
|
| 7 |
+
|
| 8 |
+
def string_match(answer, prediction, choices):
|
| 9 |
+
# Function to normalize and tokenize text
|
| 10 |
+
def tokenize(text):
|
| 11 |
+
# Convert to lowercase and find all word tokens
|
| 12 |
+
return set(re.findall(r'\b\w+\b', text.lower()))
|
| 13 |
+
|
| 14 |
+
# Tokenize prediction and answer
|
| 15 |
+
prediction_tokens = tokenize(prediction)
|
| 16 |
+
answer_tokens = tokenize(answer)
|
| 17 |
+
|
| 18 |
+
if not prediction_tokens:
|
| 19 |
+
return False
|
| 20 |
+
|
| 21 |
+
# Tokenize incorrect choices and exclude tokens present in the answer
|
| 22 |
+
incorrect_tokens = set()
|
| 23 |
+
for choice in choices:
|
| 24 |
+
choice_tokens = tokenize(choice)
|
| 25 |
+
if choice_tokens != answer_tokens:
|
| 26 |
+
incorrect_tokens.update(choice_tokens - answer_tokens)
|
| 27 |
+
|
| 28 |
+
# Condition 1: All tokens of the answer are in the prediction
|
| 29 |
+
cond1 = answer_tokens.issubset(prediction_tokens)
|
| 30 |
+
|
| 31 |
+
# Condition 2: Prediction does not contain any tokens from incorrect choices (excluding shared words)
|
| 32 |
+
cond2 = prediction_tokens.isdisjoint(incorrect_tokens)
|
| 33 |
+
|
| 34 |
+
return cond1 and cond2
|
| 35 |
+
|
| 36 |
+
if __name__ == "__main__":
|
| 37 |
+
|
| 38 |
+
parser = argparse.ArgumentParser(description="Process benchmark JSON and calculate accuracy.")
|
| 39 |
+
parser.add_argument('--input', type=str, required=True, help='Path to input JSON file to be evaluated')
|
| 40 |
+
|
| 41 |
+
args = parser.parse_args()
|
| 42 |
+
|
| 43 |
+
with open(args.input, 'r') as f:
|
| 44 |
+
input_data = json.load(f)
|
| 45 |
+
|
| 46 |
+
corr, total = 0, 0
|
| 47 |
+
|
| 48 |
+
# Track metrics for different categories:
|
| 49 |
+
modality_metrics = {'sound': [0, 0], 'music': [0, 0], 'speech': [0, 0], 'mix-sound-music': [0, 0], 'mix-sound-speech': [0, 0], 'mix-music-speech': [0, 0], 'mix-sound-music-speech': [0, 0]}
|
| 50 |
+
category_metrics = {'Signal Layer': [0, 0], 'Perception Layer': [0, 0], 'Semantic Layer': [0, 0], 'Cultural Layer': [0, 0]}
|
| 51 |
+
|
| 52 |
+
# Here is the new dict for sub-category metrics
|
| 53 |
+
subcat_metrics = {}
|
| 54 |
+
|
| 55 |
+
output_key = 'model_prediction' # The key that contains model output
|
| 56 |
+
no_pred_count = 0
|
| 57 |
+
matched_outputs = []
|
| 58 |
+
new_data = []
|
| 59 |
+
|
| 60 |
+
# for idx, sample in enumerate(tqdm(input_data)):
|
| 61 |
+
for idx, sample in enumerate(input_data):
|
| 62 |
+
|
| 63 |
+
# If there's no model output key, skip
|
| 64 |
+
if output_key not in sample:
|
| 65 |
+
continue
|
| 66 |
+
|
| 67 |
+
if output_key not in sample:
|
| 68 |
+
_prediction = ''
|
| 69 |
+
no_pred_count += 1
|
| 70 |
+
else:
|
| 71 |
+
_prediction = sample[output_key]
|
| 72 |
+
|
| 73 |
+
_answer = sample['answer']
|
| 74 |
+
modality = sample['modality']
|
| 75 |
+
category = sample['category']
|
| 76 |
+
choices = sample['choices']
|
| 77 |
+
|
| 78 |
+
# Get the sub-category
|
| 79 |
+
subcat = sample.get('sub-category', None)
|
| 80 |
+
if subcat is not None:
|
| 81 |
+
# If we haven't seen this sub-category before, initialize
|
| 82 |
+
if subcat not in subcat_metrics:
|
| 83 |
+
subcat_metrics[subcat] = [0, 0]
|
| 84 |
+
|
| 85 |
+
match_result = string_match(_answer, _prediction, choices)
|
| 86 |
+
|
| 87 |
+
if match_result:
|
| 88 |
+
modality_metrics[modality][0] += 1
|
| 89 |
+
category_metrics[category][0] += 1
|
| 90 |
+
if subcat is not None:
|
| 91 |
+
subcat_metrics[subcat][0] += 1
|
| 92 |
+
matched_outputs.append([_answer, _prediction])
|
| 93 |
+
corr += 1
|
| 94 |
+
sample['match'] = 1
|
| 95 |
+
else:
|
| 96 |
+
sample['match'] = 0
|
| 97 |
+
|
| 98 |
+
total += 1
|
| 99 |
+
new_data.append(sample)
|
| 100 |
+
modality_metrics[modality][1] += 1
|
| 101 |
+
category_metrics[category][1] += 1
|
| 102 |
+
if subcat is not None:
|
| 103 |
+
subcat_metrics[subcat][1] += 1
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Print results:
|
| 107 |
+
print("*"*30)
|
| 108 |
+
print("Modality-wise Accuracy:")
|
| 109 |
+
for modality in modality_metrics:
|
| 110 |
+
n_correct, n_total = modality_metrics[modality]
|
| 111 |
+
acc = (n_correct / n_total) * 100 if n_total > 0 else 0
|
| 112 |
+
print(f"{modality} : {acc:.2f}% over {n_total} samples")
|
| 113 |
+
|
| 114 |
+
print("*"*30)
|
| 115 |
+
print("Category-wise Accuracy:")
|
| 116 |
+
for category in category_metrics:
|
| 117 |
+
n_correct, n_total = category_metrics[category]
|
| 118 |
+
acc = (n_correct / n_total) * 100 if n_total > 0 else 0
|
| 119 |
+
print(f"{category} : {acc:.2f}% over {n_total} samples")
|
| 120 |
+
|
| 121 |
+
print("*"*30)
|
| 122 |
+
print("Sub-category-wise Accuracy:")
|
| 123 |
+
for subcat in subcat_metrics:
|
| 124 |
+
n_correct, n_total = subcat_metrics[subcat]
|
| 125 |
+
acc = (n_correct / n_total) * 100 if n_total > 0 else 0
|
| 126 |
+
print(f"{subcat} : {acc:.2f}% over {n_total} samples")
|
| 127 |
+
|
| 128 |
+
print("*"*30)
|
| 129 |
+
print(f"Total Accuracy: {(corr/total) * 100:.2f}% over {total} samples")
|
| 130 |
+
print("*"*30)
|
| 131 |
+
print(f"No prediction count: {no_pred_count}")
|