File size: 5,159 Bytes
77b792e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
#!/usr/bin/env python3
import argparse
import json
import os

from datasets import load_dataset
import openai
from openai import OpenAI
import pandas as pd
from tqdm import tqdm

# --- Functions ---
def evaluate_item(client, item, model_name, model_params):
    system_prompt = item["system_prompt"]
    user_prompt   = item["prompt"]
    answer        = item["answer"]
    question_points = item["question_points"]
    uid = item["uid"]

    temperature = model_params['temperature']
    max_tokens = model_params['max_tokens']
    top_p = model_params['top_p']
    seed = model_params['seed']

    messages = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": user_prompt}
    ]
    response = client.chat.completions.create(
        model=model_name,
        messages=messages,
        temperature=temperature,
        max_tokens=max_tokens,
        top_p=top_p,
        seed=seed
    )
    model_output = response.choices[0].message.content.strip()

    # Check if output matches the expected answer
    eval_passed = model_output.strip() == answer.strip()

    token_usage = response.usage if hasattr(response, "usage") else {}
    finish_reason = response.choices[0].finish_reason

    result = {
        "data_source_id": uid,
        "item": item,
        "sample": {
            "trajectory": messages,
            "outputs": [{"role": "assistant", "content": model_output}],
            "finish_reason": finish_reason,
            "sampled_model_name": model_name,
            "sampled_model_params": {"seed": seed, "temperature": temperature, "max_tokens": max_tokens, "top_p": top_p},
            "token_usage": dict(token_usage),
        },
        "grades": {"String check": question_points if eval_passed else 0.0},
        "grader_samples": {},
        "passes": {"String check": eval_passed},
    }
    return result

def evaluate_data(client, data, results, model_name, model_params):
    """
    Evaluates a list of items using the provided OpenAI client.
    """
    results = {} if results is None else results
    with tqdm(data) as pbar:
        for row in pbar:
            uid = row['uid']
            if results.get(uid):
                print(f"Skipping row with uid {uid} as it has already been evaluated.")
                continue
            try:
                result = evaluate_item(client, row, model_name, model_params)
                results[uid] = result
                pbar.set_description(f"Evaluated row with uid {uid}")
            except Exception as e:
                print(f"Error evaluating row: {e}")
    return results

def get_grades(results, model_name):
    """
    Returns a dictionary of grades for each test_id.
    """
    grades = {'sampled_model_name': model_name}
    for key, item in results.items():
        test_id = item['item']['test_id']
        point = item['grades']['String check']
        if test_id not in grades:
            grades[test_id] = {'points': 0, 'total': 0}
        grades[test_id]['points'] += int(point)
        grades[test_id]['total'] += 1
    return grades

def main():
    parser = argparse.ArgumentParser(description="Evaluate dataset using an OpenAI client")
    parser.add_argument("--model_name", type=str, required=True, help="Model name to use (e.g., mistralai/mistral-small-3.1-24b-instruct)")
    parser.add_argument("--eval_subset", type=str, default="all", help="Evaluation subset (default: all)")
    parser.add_argument("--output_path", type=str, required=True, help="Path for saving the results")
    args = parser.parse_args()

    # Read API key from environment variable
    API_KEY = os.environ.get('OPEN_ROUTER_API_KEY')
    if API_KEY is None:
        print("Error: OPEN_ROUTER_API_KEY environment variable not set.")
        exit(1)

    EVAL_DATASET = "Ekgren/swedish_skolprov"
    EVAL_SUBSET = args.eval_subset
    MODEL_NAME = args.model_name
    model_params = {'temperature': 1, 'max_tokens': 2048, 'top_p': 1, 'seed': 42}

    # Load dataset
    ds = load_dataset(EVAL_DATASET, EVAL_SUBSET)
    ds = ds['train']

    # Initialize client
    client = OpenAI(
        api_key=API_KEY,
        base_url="https://openrouter.ai/api/v1"
    )

    results = evaluate_data(client, ds, None, MODEL_NAME, model_params)

    # Build file names
    file_name = EVAL_DATASET.replace("/", "-") + "_" + EVAL_SUBSET + "_" + MODEL_NAME.replace("/", "-") + ".jsonl"
    file_name = file_name.lower()
    grade_file_name = file_name.replace(".jsonl", "_grades.json")

    # Ensure output directory exists
    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    results_file_path = os.path.join(args.output_path, file_name)
    grade_file_path = os.path.join(args.output_path, grade_file_name)

    grades = get_grades(results, MODEL_NAME)
    print(grades)

    # Save results
    with open(results_file_path, "w", encoding="utf-8") as f:
        for key, item in results.items():
            f.write(json.dumps(item) + "\n")

    # Save grades
    with open(grade_file_path, "w", encoding="utf-8") as f:
        f.write(json.dumps(grades) + "\n")

if __name__ == "__main__":
    main()