Datasets:

Formats:
parquet
Libraries:
Datasets
pandas
k-m-irfan commited on
Commit
b392c7a
·
verified ·
1 Parent(s): 465a346

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. lmms_eval/tasks/mintmcq/utils.py +9 -34
lmms_eval/tasks/mintmcq/utils.py CHANGED
@@ -16,14 +16,12 @@ from lmms_eval.tasks._task_utils.file_utils import generate_submission_file
16
 
17
  VIDEO_TYPE = ["short", "medium", "long"]
18
  CATEGORIES = ["Artistic Performance", "Culture", "Digital Content", "Knowledge", "Life Record", "Others", "Sports Competition"]
19
-
20
- replace_prompt = " Please answer yes or no."
21
 
22
  with open(Path(__file__).parent / "_default_template_yaml", "r") as f:
23
  raw_data = f.readlines()
24
  safe_data = []
25
  for i, line in enumerate(raw_data):
26
- # remove function definition since yaml load cannot handle it
27
  if "!function" not in line:
28
  safe_data.append(line)
29
 
@@ -37,7 +35,6 @@ cache_name = config["dataset_kwargs"]["cache_dir"]
37
  def convert_time_to_frame(time_in_seconds, fps):
38
  return int(time_in_seconds * fps)
39
 
40
-
41
  def mint_doc_to_visual(doc):
42
  cache_dir = os.path.join(base_cache_dir, cache_name)
43
  video_path = doc["mint_video_id"]
@@ -56,7 +53,7 @@ def mint_doc_to_visual(doc):
56
  def mint_doc_to_text(doc, lmms_eval_specific_kwargs=None):
57
  option_prompt = "Select the best answer to the following multiple-choice question based on the video and the subtitles. Respond with only the letter (A, B, C, or D) of the correct option."
58
  question = doc["question"]
59
- option = "\n".join([f"{opt}" for i, opt in enumerate(doc["options"])])
60
  question = question + "\n" + option
61
  post_prompt = lmms_eval_specific_kwargs["post_prompt"] if "post_prompt" in lmms_eval_specific_kwargs else "The best answer is:"
62
  full_prompt = option_prompt + "\n" + question + "\n" + post_prompt
@@ -69,7 +66,8 @@ def extract_characters_regex(s):
69
  "The correct answer is",
70
  "The answer is",
71
  "The answer",
72
- "The best option is" "The correct option is",
 
73
  "Best answer:" "Best option:",
74
  ]
75
  for answer_prefix in answer_prefixes:
@@ -102,13 +100,9 @@ def mint_process_results(doc, results):
102
  pred_ans = extract_characters_regex(pred)
103
 
104
  category = doc["video_category"]
105
- # sub_category = doc["sub_category"]
106
- # task_category = doc["task_type"]
107
- data_dict = {"question_id": doc["id"], "duration": doc["duration"], "category": category, "pred_answer": pred_ans, "answer": doc["answer"]}
108
-
109
- # return {f"mint_percetion_score": data_dict for metric in matrices}
110
- return {f"mint_percetion_score": data_dict}
111
-
112
 
113
  def mint_aggregate_results(results):
114
  """
@@ -127,11 +121,10 @@ def mint_aggregate_results(results):
127
  for result in results:
128
  video_type = result["duration"]
129
  category = result["category"]
130
- # sub_category = result["sub_category"]
131
- # task_category = result["task_category"]
132
  key = f"{video_type}_{category}"
133
  category2score[key]["answered"] += 1
134
- category2score[key]["correct"] += result["pred_answer"] == result["answer"]
 
135
 
136
  for video_type in VIDEO_TYPE:
137
  total_correct = 0
@@ -151,24 +144,6 @@ def mint_aggregate_results(results):
151
  total_answered += v["answered"]
152
  eval_logger.info(f"Evaluation on Categories: {category}: {100 * total_correct / total_answered if total_answered > 0 else 0 : .1f}%")
153
 
154
- # for sub_cate in SUB_CATEGORIES:
155
- # total_correct = 0
156
- # total_answered = 0
157
- # for k, v in category2score.items():
158
- # if sub_cate in k:
159
- # total_correct += v["correct"]
160
- # total_answered += v["answered"]
161
- # eval_logger.info(f"Evaluation on Video Sub Categories: {sub_cate}: {100 * total_correct / total_answered if total_answered > 0 else 0 : .1f}%")
162
-
163
- # for task_cate in TASK_CATEGORIES:
164
- # total_correct = 0
165
- # total_answered = 0
166
- # for k, v in category2score.items():
167
- # if task_cate in k:
168
- # total_correct += v["correct"]
169
- # total_answered += v["answered"]
170
- # eval_logger.info(f"Evaluation on Task Categories: {task_cate}: {100 * total_correct / total_answered if total_answered > 0 else 0 : .1f}%")
171
-
172
  total_correct = 0
173
  total_answered = 0
174
  for k, v in category2score.items():
 
16
 
17
  VIDEO_TYPE = ["short", "medium", "long"]
18
  CATEGORIES = ["Artistic Performance", "Culture", "Digital Content", "Knowledge", "Life Record", "Others", "Sports Competition"]
19
+ OPTION_MAP = ['A','B','C','D','E','F','G','H']
 
20
 
21
  with open(Path(__file__).parent / "_default_template_yaml", "r") as f:
22
  raw_data = f.readlines()
23
  safe_data = []
24
  for i, line in enumerate(raw_data):
 
25
  if "!function" not in line:
26
  safe_data.append(line)
27
 
 
35
  def convert_time_to_frame(time_in_seconds, fps):
36
  return int(time_in_seconds * fps)
37
 
 
38
  def mint_doc_to_visual(doc):
39
  cache_dir = os.path.join(base_cache_dir, cache_name)
40
  video_path = doc["mint_video_id"]
 
53
  def mint_doc_to_text(doc, lmms_eval_specific_kwargs=None):
54
  option_prompt = "Select the best answer to the following multiple-choice question based on the video and the subtitles. Respond with only the letter (A, B, C, or D) of the correct option."
55
  question = doc["question"]
56
+ option = "\n".join([f"{OPTION_MAP[i]}. {opt}" for i, opt in enumerate(doc["options"])])
57
  question = question + "\n" + option
58
  post_prompt = lmms_eval_specific_kwargs["post_prompt"] if "post_prompt" in lmms_eval_specific_kwargs else "The best answer is:"
59
  full_prompt = option_prompt + "\n" + question + "\n" + post_prompt
 
66
  "The correct answer is",
67
  "The answer is",
68
  "The answer",
69
+ "The best option is",
70
+ "The correct option is",
71
  "Best answer:" "Best option:",
72
  ]
73
  for answer_prefix in answer_prefixes:
 
100
  pred_ans = extract_characters_regex(pred)
101
 
102
  category = doc["video_category"]
103
+ doc["answer_option"] = OPTION_MAP[doc["options"].index(doc["answer"])]
104
+ data_dict = {"question_id": doc["id"], "duration": doc["duration"], "category": category, "pred_answer": pred_ans, "answer": doc["answer"], "answer_option": doc["answer_option"]}
105
+ return {f"mint_perception_score": data_dict}
 
 
 
 
106
 
107
  def mint_aggregate_results(results):
108
  """
 
121
  for result in results:
122
  video_type = result["duration"]
123
  category = result["category"]
 
 
124
  key = f"{video_type}_{category}"
125
  category2score[key]["answered"] += 1
126
+ # category2score[key]["correct"] += result["pred_answer"] == result["answer"]
127
+ category2score[key]["correct"] += result["pred_answer"] == result["answer_option"]
128
 
129
  for video_type in VIDEO_TYPE:
130
  total_correct = 0
 
144
  total_answered += v["answered"]
145
  eval_logger.info(f"Evaluation on Categories: {category}: {100 * total_correct / total_answered if total_answered > 0 else 0 : .1f}%")
146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  total_correct = 0
148
  total_answered = 0
149
  for k, v in category2score.items():