import time import numpy as np import pandas as pd from tqdm import tqdm # ✅ Ajout ici from datasets import load_dataset from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, log_loss from inference import ( zero_shot_inference, few_shot_inference, base_model_inference, ) # Dictionnaire des fonctions à évaluer models_to_evaluate = { "Base model": base_model_inference, "Zero-shot": zero_shot_inference, "Few-shot": few_shot_inference, } label_map = {0: "World", 1: "Sports", 2: "Business", 3: "Sci/Tech"} # Charger un sous-ensemble du jeu de test AG News dataset = load_dataset("ag_news", split="test[:10%]") def evaluate_model(name, inference_func): print(f"\n🔍 Évaluation du modèle : {name}") true_labels = [] pred_labels = [] all_probs = [] start = time.time() for example in tqdm(dataset, desc=f"Modèle : {name}"): text = example["text"] true_label = example["label"] true_class = label_map[true_label] try: pred_class, scores = inference_func(text) except Exception as e: print(f"⚠️ Erreur sur un exemple : {e}") continue # Scores pour les 4 classes dans le même ordre prob_dist = [scores.get(c, 0.0) for c in label_map.values()] pred_index = list(label_map.values()).index(pred_class) pred_labels.append(pred_index) true_labels.append(true_label) all_probs.append(prob_dist) end = time.time() runtime = round(end - start, 2) acc = accuracy_score(true_labels, pred_labels) f1 = f1_score(true_labels, pred_labels, average='weighted') prec = precision_score(true_labels, pred_labels, average='weighted') rec = recall_score(true_labels, pred_labels, average='weighted') loss = log_loss(true_labels, all_probs) print(f"✅ Résultats {name} :") print(f"- Accuracy : {acc:.4f}") print(f"- F1 Score : {f1:.4f}") print(f"- Precision : {prec:.4f}") print(f"- Recall : {rec:.4f}") print(f"- Log Loss : {loss:.4f}") print(f"- Runtime : {runtime:.2f} sec\n") return { "model": name, "accuracy": acc, "f1_score": f1, "precision": prec, "recall": rec, "loss": loss, "runtime": runtime } # Évaluer tous les modèles results = [] for name, func in models_to_evaluate.items(): results.append(evaluate_model(name, func)) # Affichage résumé df = pd.DataFrame(results) print(df)