|
import pandas as pd |
|
from pathlib import Path |
|
import pyarrow |
|
import numpy as np |
|
import sys |
|
from tqdm.auto import tqdm |
|
import logging |
|
from datetime import datetime |
|
|
|
|
|
sys.path.append(str(Path(__file__).parent / "runs" / "api_models")) |
|
|
|
from compute_bootstrap_ci import ( |
|
load_inference_results_by_grader, |
|
extract_config_from_log, |
|
) |
|
from metrics import compute_metrics |
|
from omegaconf import OmegaConf |
|
|
|
|
|
log_dir = Path("logs") |
|
log_dir.mkdir(exist_ok=True) |
|
log_file = log_dir / f"create_parquet_files_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log" |
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(levelname)s - %(message)s', |
|
handlers=[ |
|
logging.FileHandler(log_file), |
|
logging.StreamHandler(sys.stdout) |
|
] |
|
) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
error_log_file = log_dir / f"create_parquet_files_errors_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log" |
|
error_handler = logging.FileHandler(error_log_file) |
|
error_handler.setLevel(logging.ERROR) |
|
error_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) |
|
logger.addHandler(error_handler) |
|
|
|
def simplify_experiment_name(name): |
|
"""Simplify experiment names according to the mapping rules.""" |
|
if pd.isna(name): |
|
return name |
|
|
|
|
|
name = str(name) |
|
|
|
|
|
mappings = { |
|
|
|
'sabia-3-zero-shot': 'sabia3-studentPrompt', |
|
'sabia-3-extractor-zero-shot': 'sabia3-extractor', |
|
'sabia-3-grader-zero-shot': 'sabia3-graderPrompt', |
|
|
|
|
|
'deepseek-reasoner-zero-shot': 'deepseekR1-studentPrompt', |
|
'deepseek-reasoner-extractor-zero-shot': 'deepseekR1-extractor', |
|
'deepseek-reasoner-grader-zero-shot': 'deepseekR1-graderPrompt', |
|
|
|
|
|
'gpt-4o-2024-11-20-zero-shot': 'gpt4o-studentPrompt', |
|
'gpt-4o-2024-11-20-extractor-zero-shot': 'gpt4o-extractor', |
|
'gpt-4o-2024-11-20-grader-zero-shot': 'gpt4o-graderPrompt', |
|
} |
|
|
|
|
|
for pattern, replacement in mappings.items(): |
|
if pattern in name: |
|
name = name.replace(pattern, replacement) |
|
|
|
|
|
if name.startswith('jbcs2025_'): |
|
|
|
name = name[9:] |
|
|
|
|
|
|
|
duplication_patterns = [ |
|
'llama31_classification_lora', |
|
'phi35_classification_lora', |
|
'phi4_classification_lora', |
|
'encoder_classification' |
|
'tucano_classification_lora' |
|
] |
|
|
|
for pattern in duplication_patterns: |
|
|
|
count = name.count(f'-{pattern}-') |
|
if count > 1: |
|
|
|
parts = name.split(f'-{pattern}-') |
|
|
|
if len(parts) > 2: |
|
name = parts[0] + '-' + parts[-1] |
|
|
|
|
|
if 'bert-base-portuguese-cased' in name: |
|
name = name.replace('bert-base-portuguese-cased', 'bertimbau-base') |
|
elif 'BERTugues-base-portuguese-cased' in name: |
|
name = name.replace('BERTugues-base-portuguese-cased', 'bertugues-base') |
|
elif 'bert-base-multilingual-cased' in name: |
|
name = name.replace('bert-base-multilingual-cased', 'mbert-base') |
|
elif 'bert-large-portuguese-cased' in name: |
|
name = name.replace('bert-large-portuguese-cased', 'bertimbau-large') |
|
elif 'albertina-1b5-portuguese-ptbr-encoder' in name: |
|
name = name.replace('albertina-1b5-portuguese-ptbr-encoder', 'albertina-1b5-ptbr') |
|
|
|
|
|
elif 'Llama-3.1-8B-llama31_classification_lora' in name: |
|
name = name.replace('Llama-3.1-8B-llama31_classification_lora', 'llama3.1-8b-lora') |
|
elif 'Llama-3.1-8B' in name: |
|
name = name.replace('Llama-3.1-8B', 'llama3.1-8b-lora') |
|
|
|
|
|
elif 'Tucano-2b4-Instruct-tucano_classification_lora' in name: |
|
name = name.replace('Tucano-2b4-Instruct-tucano_classification_lora', 'tucano2b4-lora') |
|
elif 'Tucano-2b4-Instruct' in name: |
|
name = name.replace('Tucano-2b4-Instruct', 'tucano2b4-lora') |
|
|
|
|
|
elif 'Phi-3.5-mini-instruct-phi35_classification_lora' in name: |
|
name = name.replace('Phi-3.5-mini-instruct-phi35_classification_lora', 'phi3.5-mini-lora') |
|
elif 'Phi-3.5-mini-instruct' in name: |
|
name = name.replace('Phi-3.5-mini-instruct', 'phi3.5-mini-lora') |
|
elif 'phi-4-phi4_classification_lora' in name: |
|
name = name.replace('phi-4-phi4_classification_lora', 'phi4-lora') |
|
elif 'phi-4' in name: |
|
name = name.replace('phi-4', 'phi4-lora') |
|
|
|
|
|
name = name.replace('-encoder_classification', '') |
|
name = name.replace('_classification_lora', '') |
|
name = name.replace('-llama31', '') |
|
name = name.replace('-phi35', '') |
|
name = name.replace('-phi4', '') |
|
name = name.replace('-tucano', '') |
|
|
|
|
|
parts = name.split('-') |
|
|
|
|
|
competency = None |
|
context = None |
|
lora_rank = None |
|
model_parts = [] |
|
|
|
i = 0 |
|
while i < len(parts): |
|
part = parts[i] |
|
if part in ['C1', 'C2', 'C3', 'C4', 'C5']: |
|
competency = part |
|
elif part == 'essay_only': |
|
context = 'essay-only' |
|
elif part == 'full_context': |
|
context = 'full-context' |
|
elif part in ['essay', 'full'] and i + 1 < len(parts): |
|
|
|
if parts[i+1] == 'only': |
|
context = 'essay-only' |
|
i += 1 |
|
elif parts[i+1] == 'context': |
|
context = 'full-context' |
|
i += 1 |
|
elif part in ['r8', 'r16']: |
|
lora_rank = part |
|
elif part and part not in ['only', 'context']: |
|
model_parts.append(part) |
|
i += 1 |
|
|
|
|
|
new_parts = model_parts |
|
if competency: |
|
new_parts.append(competency) |
|
if context: |
|
new_parts.append(context) |
|
if lora_rank: |
|
new_parts.append(lora_rank) |
|
|
|
name = '-'.join(new_parts) |
|
|
|
|
|
while '--' in name: |
|
name = name.replace('--', '-') |
|
|
|
return name |
|
|
|
def find_and_group_csvs(): |
|
base = Path(".") |
|
groups = { |
|
"evaluation_results": sorted(base.rglob("evaluation_results.csv")), |
|
"bootstrap_confidence_intervals": sorted( |
|
base.rglob("bootstrap_confidence_intervals.csv") |
|
), |
|
} |
|
for name, paths in groups.items(): |
|
logger.info(f"Found {len(paths)} files for '{name}'") |
|
if not paths: |
|
logger.warning(f"No files found for '{name}'") |
|
return groups |
|
|
|
|
|
def enhance_evaluation_results(eval_df, csv_paths): |
|
"""Enhance evaluation results with additional metrics from JSONL files.""" |
|
enhanced_rows = [] |
|
failed_count = 0 |
|
|
|
|
|
|
|
|
|
row_to_path = {} |
|
current_idx = 0 |
|
|
|
for path in csv_paths: |
|
df = pd.read_csv(path) |
|
for i in range(len(df)): |
|
row_to_path[current_idx + i] = path |
|
current_idx += len(df) |
|
|
|
for idx, row in tqdm( |
|
eval_df.iterrows(), desc="Processing evaluation rows", total=len(eval_df) |
|
): |
|
|
|
csv_path = row_to_path.get(idx) |
|
|
|
if csv_path is None: |
|
error_msg = f"CSV file not found for row {idx}" |
|
logger.error(error_msg) |
|
failed_count += 1 |
|
continue |
|
|
|
try: |
|
|
|
|
|
experiment_id = csv_path.parent.name |
|
|
|
|
|
experiment_id = simplify_experiment_name(experiment_id) |
|
|
|
|
|
jsonl_path = csv_path.parent / "inference_results.jsonl" |
|
if not jsonl_path.exists(): |
|
|
|
jsonl_files = list(csv_path.parent.glob("*_inference_results.jsonl")) |
|
if jsonl_files: |
|
jsonl_path = jsonl_files[0] |
|
else: |
|
raise FileNotFoundError(f"JSONL file not found in {csv_path.parent}") |
|
|
|
|
|
log_files = list(csv_path.parent.glob("*run_inference_experiment.log")) |
|
if not log_files: |
|
raise FileNotFoundError(f"Log file not found in {csv_path.parent}") |
|
|
|
log_path = log_files[0] |
|
|
|
|
|
|
|
config_dict = extract_config_from_log(log_path) |
|
|
|
cfg = OmegaConf.create(config_dict) |
|
|
|
|
|
grader_a_data, grader_b_data = load_inference_results_by_grader(jsonl_path) |
|
|
|
|
|
all_predictions_a = np.array( |
|
[data["prediction"] for data in grader_a_data.values()] |
|
) |
|
all_labels_a = np.array([data["label"] for data in grader_a_data.values()]) |
|
all_predictions_b = np.array( |
|
[data["prediction"] for data in grader_b_data.values()] |
|
) |
|
all_labels_b = np.array([data["label"] for data in grader_b_data.values()]) |
|
|
|
|
|
|
|
concat_predictions = np.concatenate([all_predictions_a, all_predictions_b]) |
|
concat_labels = np.concatenate([all_labels_a, all_labels_b]) |
|
metrics_concat = compute_metrics((concat_predictions, concat_labels), cfg) |
|
|
|
|
|
|
|
tolerance = 1e-6 |
|
for metric in ["accuracy", "QWK", "Macro_F1", "Weighted_F1"]: |
|
if metric in row and metric in metrics_concat: |
|
original_value = row[metric] |
|
computed_value = metrics_concat[metric] |
|
|
|
assert abs(original_value - computed_value) <= tolerance, ( |
|
f"Metric {metric} mismatch: CSV={original_value}, Computed={computed_value}" |
|
) |
|
|
|
|
|
concat_row = row.copy() |
|
concat_row["experiment_id"] = experiment_id |
|
concat_row["metric_group"] = "concat(A,B)" |
|
enhanced_rows.append(concat_row) |
|
|
|
|
|
metrics_a = compute_metrics((all_predictions_a, all_labels_a), cfg) |
|
metrics_b = compute_metrics((all_predictions_b, all_labels_b), cfg) |
|
|
|
|
|
avg_row = row.copy() |
|
avg_row["experiment_id"] = experiment_id |
|
avg_row["metric_group"] = "avg(A,B)" |
|
|
|
for metric in metrics_a: |
|
if metric in metrics_b and metric in avg_row: |
|
avg_value = (metrics_a[metric] + metrics_b[metric]) / 2 |
|
avg_row[metric] = avg_value |
|
enhanced_rows.append(avg_row) |
|
|
|
|
|
only_a_row = row.copy() |
|
only_a_row["experiment_id"] = experiment_id |
|
only_a_row["metric_group"] = "onlyA" |
|
|
|
for metric, value in metrics_a.items(): |
|
if metric in only_a_row: |
|
only_a_row[metric] = value |
|
enhanced_rows.append(only_a_row) |
|
|
|
|
|
only_b_row = row.copy() |
|
only_b_row["experiment_id"] = experiment_id |
|
only_b_row["metric_group"] = "onlyB" |
|
|
|
for metric, value in metrics_b.items(): |
|
if metric in only_b_row: |
|
only_b_row[metric] = value |
|
enhanced_rows.append(only_b_row) |
|
|
|
except Exception as e: |
|
failed_count += 1 |
|
error_msg = f"Failed to process {csv_path.parent if csv_path else 'unknown path'}: {str(e)}" |
|
logger.error(error_msg) |
|
|
|
import traceback |
|
logger.error(f"Traceback:\n{traceback.format_exc()}") |
|
|
|
continue |
|
|
|
logger.info(f"Successfully processed {len(enhanced_rows)//4} out of {len(eval_df)} rows") |
|
if failed_count > 0: |
|
logger.warning(f"Failed to process {failed_count} rows. Check error log: {error_log_file}") |
|
|
|
return pd.DataFrame(enhanced_rows) |
|
|
|
|
|
def combine(paths, out_path): |
|
if not paths: |
|
logger.info(f"No files to combine for {out_path}") |
|
return |
|
|
|
logger.info(f"Combining {len(paths)} files into {out_path}") |
|
dfs = [] |
|
|
|
for p in paths: |
|
df = pd.read_csv(p) |
|
|
|
|
|
experiment_id = p.parent.name |
|
experiment_id = simplify_experiment_name(experiment_id) |
|
df["experiment_id"] = experiment_id |
|
|
|
dfs.append(df) |
|
|
|
|
|
cols = {tuple(df.columns) for df in dfs} |
|
if len(cols) > 1: |
|
error_msg = f"{out_path}: header mismatch across shards" |
|
logger.error(error_msg) |
|
raise ValueError(error_msg) |
|
|
|
combined = pd.concat(dfs, ignore_index=True) |
|
|
|
|
|
if "evaluation_results" in out_path: |
|
logger.info("Enhancing evaluation results with additional metrics...") |
|
combined = enhance_evaluation_results(combined, paths) |
|
|
|
combined.to_parquet(out_path, engine="pyarrow", index=False) |
|
logger.info(f"Successfully written {out_path} with {len(combined)} rows") |
|
|
|
|
|
if __name__ == "__main__": |
|
logger.info(f"Starting parquet file creation. Logs will be saved to: {log_file}") |
|
logger.info(f"Error-only log will be saved to: {error_log_file}") |
|
|
|
groups = find_and_group_csvs() |
|
combine(groups["evaluation_results"], "evaluation_results-00000-of-00001.parquet") |
|
combine( |
|
groups["bootstrap_confidence_intervals"], |
|
"bootstrap_confidence_intervals-00000-of-00001.parquet", |
|
) |
|
|
|
logger.info("Parquet file creation completed") |
|
|