{ "cells": [ { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\yuxua\\AppData\\Local\\Temp\\ipykernel_14096\\1296094769.py:21: UserWarning: FigureCanvasAgg is non-interactive, and thus cannot be shown\n", " plt.show()\n" ] } ], "source": [ "from data_visualization import create_horizontal_bar_chart\n", "from leaderboard_utils import get_mario_leaderboard\n", "import matplotlib.pyplot as plt\n", "import json\n", "\n", "# Load the rank data\n", "with open('rank_data_03_25_2025.json', 'r') as f:\n", " rank_data = json.load(f)\n", "\n", "# Get the leaderboard data\n", "df = get_mario_leaderboard(rank_data)\n", "\n", "# Create the bar chart\n", "game_name = \"Super Mario Bros\"\n", "fig = create_horizontal_bar_chart(df, game_name)\n", "\n", "# Save the figure in current directory\n", "fig.savefig('mario_performance.png', bbox_inches='tight', dpi=300)\n", "\n", "# Display the chart\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\yuxua\\AppData\\Local\\Temp\\ipykernel_14096\\1659021522.py:153: UserWarning: FigureCanvasAgg is non-interactive, and thus cannot be shown\n", " plt.show()\n" ] } ], "source": [ "from leaderboard_utils import get_combined_leaderboard, GAME_ORDER\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", "import numpy as np\n", "import json\n", "\n", "# Load the rank data\n", "with open('rank_data_03_25_2025.json', 'r') as f:\n", " rank_data = json.load(f)\n", "\n", "# Get combined leaderboard with all games\n", "selected_games = {game: True for game in GAME_ORDER}\n", "df = get_combined_leaderboard(rank_data, selected_games)\n", "\n", "# Create figure and axis with better styling\n", "sns.set_style(\"whitegrid\")\n", "plt.figure(figsize=(16, 9))\n", "ax = plt.gca()\n", "\n", "# Get unique models\n", "models = df['Player'].unique()\n", "n_games = len(GAME_ORDER)\n", "\n", "# Define a professional color palette\n", "colors = ['#2E86C1', '#E74C3C', '#27AE60', '#F39C12', '#8E44AD',\n", " '#16A085', '#D35400', '#2980B9', '#C0392B', '#2ECC71']\n", "\n", "def normalize_values(values, mean, std):\n", " \"\"\"\n", " Normalize values using z-score and scale to 0-100 range\n", " \"\"\"\n", " if std == 0:\n", " return [50 if v > 0 else 0 for v in values]\n", " \n", " z_scores = [(v - mean) / std for v in values]\n", " scaled_values = [max(0, min(100, (z * 30) + 50)) for z in z_scores]\n", " return scaled_values\n", "\n", "# Calculate normalized scores for each game\n", "for game_idx, game in enumerate(GAME_ORDER):\n", " # Get all scores for this game\n", " game_scores = []\n", " for model in models:\n", " score = df[df['Player'] == model][f'{game} Score'].values[0]\n", " if score != '_' and float(score) > 0: # Only include non-zero scores\n", " game_scores.append((model, float(score)))\n", " \n", " # Sort scores from highest to lowest\n", " game_scores.sort(key=lambda x: x[1], reverse=True)\n", " \n", " # Extract sorted models and scores\n", " sorted_models = [x[0] for x in game_scores]\n", " scores = [x[1] for x in game_scores]\n", " \n", " # Calculate mean and std for normalization\n", " mean = np.mean(scores)\n", " std = np.std(scores)\n", " \n", " # Normalize scores\n", " normalized_scores = normalize_values(scores, mean, std)\n", " \n", " # Calculate bar width based on number of models in this game\n", " n_models_in_game = len(sorted_models)\n", " bar_width = 0.8 / n_models_in_game\n", " \n", " # Plot bars for each model\n", " for i, (model, score) in enumerate(zip(sorted_models, normalized_scores)):\n", " ax.bar(game_idx + i*bar_width, score, \n", " width=bar_width, \n", " label=model if game_idx == 0 else \"\", # Only add label for first game\n", " color=colors[i % len(colors)],\n", " alpha=0.8)\n", "\n", "# Customize the plot\n", "ax.set_xticks(np.arange(n_games))\n", "ax.set_xticklabels(GAME_ORDER, rotation=45, ha='right', fontsize=10)\n", "ax.set_ylabel('Normalized Performance Score', fontsize=12)\n", "ax.set_title('AI Model Performance Comparison Across Gaming Tasks\\n(Showing only participating models for each game)', \n", " fontsize=14, pad=20)\n", "\n", "# Add grid lines\n", "ax.grid(True, axis='y', linestyle='--', alpha=0.3)\n", "\n", "# Create legend with unique entries\n", "handles, labels = ax.get_legend_handles_labels()\n", "by_label = dict(zip(labels, handles))\n", "ax.legend(by_label.values(), by_label.keys(), \n", " bbox_to_anchor=(1.05, 1), \n", " loc='upper left',\n", " fontsize=9,\n", " title='AI Models',\n", " title_fontsize=10)\n", "\n", "# Add a note about the visualization\n", "plt.figtext(0.5, 0.01, \n", " 'Note: Scores are normalized within each game. Only models that participated in each game are shown.',\n", " ha='center', fontsize=9, style='italic')\n", "\n", "# Adjust layout to prevent label cutoff\n", "plt.tight_layout()\n", "\n", "# Save the figure\n", "plt.savefig('ai_model_gaming_performance_clean.png', bbox_inches='tight', dpi=300)\n", "\n", "# Show the plot\n", "plt.show()" ] } ], "metadata": { "kernelspec": { "display_name": "local_cua", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 2 }