{ "cells": [ { "cell_type": "markdown", "id": "d28f887e", "metadata": {}, "source": [ "# COLLIDE-2V — Six‑Class Classifier\n", "\n", "This notebook streams COLLIDE-1m (1 million event subset of COLLIDE-2V) dataset from the Hugging Face Hub and builds a **fixed, physics‑aware feature vector** per event from **FullReco** variables only:\n", "\n", "**Per event features**\n", "- **Particles (PUPPI, top‑20 by pT):** for each particle we keep *(pT, η, φ, charge, mass, PID, PuppiW)* → 7 × 20 = **140**\n", "- **Jets (AK4, top‑4 by pT):** *(pT, η, φ, mass, btag, charge)* → 6 × 4 = **24**\n", "- **Leading leptons/photons:** \n", " - Electron: *(pT, η, φ, EhadOverEem, IsoRhoCorr)* → **5** \n", " - MuonTight: *(pT, η, φ, IsoRhoCorr)* → **4** \n", " - PhotonTight: *(pT, η, φ)* → **3**\n", "- **MET:** *(PUPPIMET_MET, PUPPIMET_φ, MET_MET, MET_φ)* → **4**\n", "- **Primary Vertex:** *(Z, SumPT2 of best PV)* → **2**\n", "- **Counts:** *(N_PUPPIPart, N_JetAK4)* → **2**\n", "\n", "Total vector length = **184**.\n", "\n", "We then train a tiny MLP classifier on **six classes** (one per family):\n", "- DY: `DY to ll`\n", "- QCD: `QCD inclusive`\n", "- SingleHiggs: `VBFHtautau`\n", "- top: `tt all-lept`\n", "- diboson: `WZ (semi-leptonic)`\n", "- diHiggs: `HH bbtautau`\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "56d3eb16", "metadata": {}, "outputs": [], "source": [ "# If needed (Colab/Kaggle/etc.). Comment out if your env already has these.\n", "%pip -q install datasets==2.21.0 huggingface_hub==0.24.6 fsspec==2024.6.1 pyarrow==16.1.0 torch --extra-index-url https://download.pytorch.org/whl/cpu\n" ] }, { "cell_type": "code", "execution_count": null, "id": "e0301175", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using device: cpu\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from typing import List, Dict, Any, Iterable, Optional, Tuple\n", "import random\n", "\n", "import torch\n", "from torch import nn\n", "from torch.utils.data import DataLoader, IterableDataset as TorchIterable\n", "\n", "import pyarrow as pa\n", "import pyarrow.parquet as pq\n", "\n", "from datasets import IterableDataset, interleave_datasets, Features, Sequence, Value, ClassLabel\n", "from huggingface_hub import HfApi, HfFileSystem\n", "\n", "# ====== USER CONFIG ======\n", "HF_REPO = \"fastmachinelearning/collide-1m\" \n", "\n", "SELECTED_6 = {\n", " \"DY\": \"DY to ll\",\n", " \"QCD\": \"QCD inclusive\",\n", " \"SingleHiggs\": \"VBFHtautau\",\n", " \"top\": \"tt all-lept\",\n", " \"diboson\": \"WZ (semi-leptonic)\",\n", " \"diHiggs\": \"HH bbtautau\",\n", "}\n", "\n", "# Feature packing hyperparams\n", "K_PART = 20 # top-K PUPPI particles by pT\n", "K_JET = 4 # top-J AK4 jets by pT\n", "\n", "# Training config\n", "TRAIN_PER_CLASS = 512\n", "VAL_PER_CLASS = 100\n", "BATCH_SIZE = 256\n", "EPOCHS = 10\n", "LR = 2e-3\n", "SEED = 42\n", "DEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "print(f\"Using device: {DEVICE}\")\n", "random.seed(SEED)\n", "torch.manual_seed(SEED)\n" ] }, { "cell_type": "code", "execution_count": 2, "id": "3292efac", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Classes: ['DY', 'QCD', 'SingleHiggs', 'top', 'diboson', 'diHiggs']\n", "Pretty per class: {'DY': 'DY to ll', 'QCD': 'QCD inclusive', 'SingleHiggs': 'VBFHtautau', 'top': 'tt all-lept', 'diboson': 'WZ (semi-leptonic)', 'diHiggs': 'HH bbtautau'}\n", "Folder per class: {'DY': 'DYJetsToLL_13TeV-madgraphMLM-pythia8', 'QCD': 'QCD_HT50toInf', 'SingleHiggs': 'VBFHtautau', 'top': 'tt0123j_5f_ckm_LO_MLM_leptonic', 'diboson': 'WZ_semileptonic', 'diHiggs': 'HH_bbtautau'}\n" ] } ], "source": [ "# Pretty name -> folder name (from your mapping)\n", "PROCESS_TO_FOLDER = {\n", " # DY / Z / W\n", " \"DY to ll\": \"DYJetsToLL_13TeV-madgraphMLM-pythia8\",\n", " \"Z -> vv + jet\": \"ZJetsTovv_13TeV-madgraphMLM-pythia8\",\n", " \"Z -> qq (uds)\": \"ZJetsToQQ_13TeV-madgraphMLM-pythia8\",\n", " \"Z -> bb\": \"ZJetsTobb_13TeV-madgraphMLM-pythia8\",\n", " \"Z -> cc\": \"ZJetsTocc_13TeV-madgraphMLM-pythia8\",\n", " \"W -> lv\": \"WJetsToLNu_13TeV-madgraphMLM-pythia8\",\n", " \"W -> qq\": \"WJetsToQQ_13TeV-madgraphMLM-pythia8\",\n", " \"gamma\": \"gamma\",\n", " \"gamma + V\": \"gamma_V\",\n", " \"tri-gamma\": \"tri_gamma\",\n", "\n", " # QCD\n", " \"QCD inclusive\": \"QCD_HT50toInf\",\n", " \"QCD bb\": \"QCD_HT50tobb\",\n", " \"Minbias / Soft QCD\": \"minbias\",\n", "\n", " # top\n", " \"tt all-hadr\": \"tt0123j_5f_ckm_LO_MLM_hadronic\",\n", " \"tt semi-lept\": \"tt0123j_5f_ckm_LO_MLM_semiLeptonic\",\n", " \"tt all-lept\": \"tt0123j_5f_ckm_LO_MLM_leptonic\",\n", " \"ttH incl\": \"ttH_incl\",\n", " \"tttt\": \"tttt_incl\",\n", " \"ttW incl\": \"ttW_incl\",\n", " \"ttZ incl\": \"ttZ_incl\",\n", "\n", " # dibosons\n", " \"WW (all-leptonic)\": \"WW_leptonic\",\n", " \"WW (all-hadronic)\": \"WW_hadronic\",\n", " \"WW (semi-leptonic)\": \"WW_semileptonic\",\n", " \"WZ (all-leptonic)\": \"WZ_leptonic\",\n", " \"WZ (all-hadronic)\": \"WZ_hadronic\",\n", " \"WZ (semi-leptonic)\": \"WZ_semileptonic\",\n", " \"ZZ (all-leptonic)\": \"ZZ_leptonic\",\n", " \"ZZ (all-hadronic)\": \"ZZ_hadronic\",\n", " \"ZZ (semi-leptonic)\": \"ZZ_semileptonic\",\n", " \"VVV\": \"VVV_incl\",\n", " \"VH incl\": \"VH_incl\",\n", "\n", " # single-Higgs\n", " \"ggHbb\": \"ggHbb\",\n", " \"ggHcc\": \"ggHcc\",\n", " \"ggHgammagamma\": \"ggHgammagamma\",\n", " \"ggHgluglu\": \"ggHgluglu\",\n", " \"ggHtautau\": \"ggHtautau\",\n", " \"ggHWW\": \"ggHWW\",\n", " \"ggHZZ\": \"ggHZZ\",\n", " \"VBFHbb\": \"VBFHbb\",\n", " \"VBFHcc\": \"VBFHcc\",\n", " \"VBFHgammagamma\": \"VBFHgammagamma\",\n", " \"VBFHgluglu\": \"VBFHgluglu\",\n", " \"VBFHtautau\": \"VBFHtautau\",\n", " \"VBFHWW\": \"VBFHWW\",\n", " \"VBFHZZ\": \"VBFHZZ\",\n", "\n", " # di-Higgs\n", " \"HH 4b\": \"HH_4b\",\n", " \"HH bbtautau\": \"HH_bbtautau\",\n", " \"HH bbWW\": \"HH_bbWW\",\n", " \"HH bbZZ\": \"HH_bbZZ\",\n", " \"HH bbgammagamma\": \"HH_bbgammagamma\",\n", "}\n", "\n", "CLASS_NAMES = list(SELECTED_6.keys())\n", "PRETTY = {c: SELECTED_6[c] for c in CLASS_NAMES}\n", "FOLDER = {c: PROCESS_TO_FOLDER[PRETTY[c]] for c in CLASS_NAMES}\n", "LABELS = {c: i for i, c in enumerate(CLASS_NAMES)}\n", "print(\"Classes:\", CLASS_NAMES)\n", "print(\"Pretty per class:\", PRETTY)\n", "print(\"Folder per class:\", FOLDER)\n" ] }, { "cell_type": "code", "execution_count": 3, "id": "2f3d1a00", "metadata": {}, "outputs": [], "source": [ "# Columns to read (FullReco only)\n", "PUPPI_PART_COLS = [\n", " 'FullReco_PUPPIPart_PT','FullReco_PUPPIPart_Eta','FullReco_PUPPIPart_Phi',\n", " 'FullReco_PUPPIPart_Charge','FullReco_PUPPIPart_Mass','FullReco_PUPPIPart_PID',\n", " 'FullReco_PUPPIPart_PuppiW'\n", "]\n", "\n", "JET_AK4_COLS = [\n", " 'FullReco_JetAK4_PT','FullReco_JetAK4_Eta','FullReco_JetAK4_Phi',\n", " 'FullReco_JetAK4_Mass','FullReco_JetAK4_BTag','FullReco_JetAK4_Charge'\n", "]\n", "\n", "ELEC_COLS = [\n", " 'FullReco_Electron_PT','FullReco_Electron_Eta','FullReco_Electron_Phi',\n", " 'FullReco_Electron_EhadOverEem','FullReco_Electron_IsolationVarRhoCorr'\n", "]\n", "\n", "MUON_COLS = [\n", " 'FullReco_MuonTight_PT','FullReco_MuonTight_Eta','FullReco_MuonTight_Phi',\n", " 'FullReco_MuonTight_IsolationVarRhoCorr'\n", "]\n", "\n", "PHOT_COLS = [\n", " 'FullReco_PhotonTight_PT','FullReco_PhotonTight_Eta','FullReco_PhotonTight_Phi'\n", "]\n", "\n", "MET_COLS = [\n", " 'FullReco_PUPPIMET_MET','FullReco_PUPPIMET_Phi',\n", " 'FullReco_MET_MET','FullReco_MET_Phi'\n", "]\n", "\n", "PV_COLS = [\n", " 'FullReco_PrimaryVertex_Z','FullReco_PrimaryVertex_SumPT2'\n", "]\n", "\n", "ALL_COLS = PUPPI_PART_COLS + JET_AK4_COLS + ELEC_COLS + MUON_COLS + PHOT_COLS + MET_COLS + PV_COLS\n", "\n", "# Fixed vector length: 184\n", "VLEN = 184\n" ] }, { "cell_type": "code", "execution_count": 4, "id": "7afc8005", "metadata": {}, "outputs": [], "source": [ "api = HfApi()\n", "fs = HfFileSystem()\n", "\n", "def list_repo_parquet_files(repo_id: str, subfolder: str) -> List[str]:\n", " files = api.list_repo_files(repo_id=repo_id, repo_type='dataset')\n", " prefix = f\"{subfolder.strip('/')}/\"\n", " return [f for f in files if f.startswith(prefix) and f.endswith('.parquet')]\n", "\n", "def _safe_list(x):\n", " if x is None:\n", " return []\n", " if isinstance(x, list):\n", " return x\n", " return [x]\n", "\n", "def _pack_topk_by_pt(pt, *others, k: int, fill: List[float]):\n", " idx = sorted(range(len(pt)), key=lambda i: pt[i] if pt[i] is not None else -1.0, reverse=True)\n", " out = []\n", " for j in range(k):\n", " if j < len(idx):\n", " i = idx[j]\n", " vals = [pt[i]] + [arr[i] if i < len(arr) else 0.0 for arr in others]\n", " else:\n", " vals = fill\n", " out.extend([float(v if v is not None else 0.0) for v in vals])\n", " return out\n", "\n", "def _pack_leading(vals: List[List[float]], fill: List[float]) -> List[float]:\n", " if not vals or not vals[0]:\n", " return fill\n", " pt = vals[0]\n", " if len(pt) == 0:\n", " return fill\n", " i = max(range(len(pt)), key=lambda j: pt[j] if pt[j] is not None else -1.0)\n", " chosen = [arr[i] if i < len(arr) else 0.0 for arr in vals]\n", " return [float(v if v is not None else 0.0) for v in chosen]\n", "\n", "def _best_pv(z_list, sumpt2_list) -> Tuple[float, float]:\n", " if not sumpt2_list:\n", " z = z_list[0] if z_list else 0.0\n", " s = sumpt2_list[0] if sumpt2_list else 0.0\n", " return float(z if z is not None else 0.0), float(s if s is not None else 0.0)\n", " j = max(range(len(sumpt2_list)), key=lambda i: sumpt2_list[i] if sumpt2_list[i] is not None else -1.0)\n", " z = z_list[j] if j < len(z_list) else 0.0\n", " s = sumpt2_list[j]\n", " return float(z if z is not None else 0.0), float(s if s is not None else 0.0)\n", "\n", "def build_vector(ev: Dict[str, Any]) -> List[float]:\n", " # PUPPI particles\n", " p_pt = _safe_list(ev.get('FullReco_PUPPIPart_PT'))\n", " p_eta = _safe_list(ev.get('FullReco_PUPPIPart_Eta'))\n", " p_phi = _safe_list(ev.get('FullReco_PUPPIPart_Phi'))\n", " p_ch = _safe_list(ev.get('FullReco_PUPPIPart_Charge'))\n", " p_m = _safe_list(ev.get('FullReco_PUPPIPart_Mass'))\n", " p_pid = _safe_list(ev.get('FullReco_PUPPIPart_PID'))\n", " p_w = _safe_list(ev.get('FullReco_PUPPIPart_PuppiW'))\n", " part = _pack_topk_by_pt(p_pt, p_eta, p_phi, p_ch, p_m, p_pid, p_w, k=20, fill=[0.0]*7)\n", "\n", " # AK4 jets\n", " j_pt = _safe_list(ev.get('FullReco_JetAK4_PT'))\n", " j_eta = _safe_list(ev.get('FullReco_JetAK4_Eta'))\n", " j_phi = _safe_list(ev.get('FullReco_JetAK4_Phi'))\n", " j_m = _safe_list(ev.get('FullReco_JetAK4_Mass'))\n", " j_bt = _safe_list(ev.get('FullReco_JetAK4_BTag'))\n", " j_ch = _safe_list(ev.get('FullReco_JetAK4_Charge'))\n", " jets = _pack_topk_by_pt(j_pt, j_eta, j_phi, j_m, j_bt, j_ch, k=4, fill=[0.0]*6)\n", "\n", " # Leading leptons/photons\n", " e_pt = _safe_list(ev.get('FullReco_Electron_PT'))\n", " e_eta = _safe_list(ev.get('FullReco_Electron_Eta'))\n", " e_phi = _safe_list(ev.get('FullReco_Electron_Phi'))\n", " e_hoe = _safe_list(ev.get('FullReco_Electron_EhadOverEem'))\n", " e_iso = _safe_list(ev.get('FullReco_Electron_IsolationVarRhoCorr'))\n", " elec = _pack_leading([e_pt, e_eta, e_phi, e_hoe, e_iso], fill=[0.0]*5)\n", "\n", " m_pt = _safe_list(ev.get('FullReco_MuonTight_PT'))\n", " m_eta = _safe_list(ev.get('FullReco_MuonTight_Eta'))\n", " m_phi = _safe_list(ev.get('FullReco_MuonTight_Phi'))\n", " m_iso = _safe_list(ev.get('FullReco_MuonTight_IsolationVarRhoCorr'))\n", " muon = _pack_leading([m_pt, m_eta, m_phi, m_iso], fill=[0.0]*4)\n", "\n", " g_pt = _safe_list(ev.get('FullReco_PhotonTight_PT'))\n", " g_eta = _safe_list(ev.get('FullReco_PhotonTight_Eta'))\n", " g_phi = _safe_list(ev.get('FullReco_PhotonTight_Phi'))\n", " phot = _pack_leading([g_pt, g_eta, g_phi], fill=[0.0]*3)\n", "\n", " # MET\n", " pmet = float(_safe_list(ev.get('FullReco_PUPPIMET_MET'))[0]) if _safe_list(ev.get('FullReco_PUPPIMET_MET')) else 0.0\n", " pphi = float(_safe_list(ev.get('FullReco_PUPPIMET_Phi'))[0]) if _safe_list(ev.get('FullReco_PUPPIMET_Phi')) else 0.0\n", " met = float(_safe_list(ev.get('FullReco_MET_MET'))[0]) if _safe_list(ev.get('FullReco_MET_MET')) else 0.0\n", " mphi = float(_safe_list(ev.get('FullReco_MET_Phi'))[0]) if _safe_list(ev.get('FullReco_MET_Phi')) else 0.0\n", "\n", " # Primary vertex\n", " pvz_list = _safe_list(ev.get('FullReco_PrimaryVertex_Z'))\n", " pvsp2_list = _safe_list(ev.get('FullReco_PrimaryVertex_SumPT2'))\n", " pvz, pvsp2 = _best_pv(pvz_list, pvsp2_list)\n", "\n", " # Counts\n", " n_part = float(len(p_pt))\n", " n_jet = float(len(j_pt))\n", "\n", " vec = part + jets + elec + muon + phot + [pmet, pphi, met, mphi] + [pvz, pvsp2] + [n_part, n_jet]\n", " if len(vec) != 184:\n", " if len(vec) < 184:\n", " vec = vec + [0.0]*(184-len(vec))\n", " else:\n", " vec = vec[:184]\n", " return vec\n", "\n", "def generate_examples(repo_id: str, process_folder: str, label_id: int,\n", " per_class_limit: int, seed: int = 42):\n", " files = list_repo_parquet_files(repo_id, process_folder)\n", " if not files:\n", " raise RuntimeError(f\"No parquet under '{process_folder}' in {repo_id}\")\n", " rng = random.Random(seed)\n", " rng.shuffle(files)\n", " emitted = 0\n", " for rel in files:\n", " path = f\"hf://datasets/{repo_id}/{rel}\"\n", " with fs.open(path, 'rb') as fh:\n", " pqf = pq.ParquetFile(fh)\n", " for batch in pqf.iter_batches(columns=ALL_COLS):\n", " tbl = pa.Table.from_batches([batch])\n", " pyd = tbl.to_pydict()\n", " n = tbl.num_rows\n", " cols = list(pyd.keys())\n", " for i in range(n):\n", " ev = {k: pyd[k][i] for k in cols}\n", " x = build_vector(ev)\n", " yield {\"x\": x, \"label\": label_id}\n", " emitted += 1\n", " if emitted >= per_class_limit:\n", " return\n" ] }, { "cell_type": "code", "execution_count": 5, "id": "8089bf8b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "IterableDataset({\n", " features: ['x', 'label'],\n", " n_shards: 1\n", "})\n", "IterableDataset({\n", " features: ['x', 'label'],\n", " n_shards: 1\n", "})\n" ] } ], "source": [ "# Build datasets with a FIXED schema\n", "features = Features({\n", " 'x': Sequence(Value('float32'), length=184),\n", " 'label': ClassLabel(names=CLASS_NAMES),\n", "})\n", "\n", "def make_split(repo_id: str, per_class: int, seed: int) -> IterableDataset:\n", " parts = []\n", " for cname in CLASS_NAMES:\n", " ds = IterableDataset.from_generator(\n", " generate_examples,\n", " gen_kwargs=dict(\n", " repo_id=repo_id,\n", " process_folder=FOLDER[cname],\n", " label_id=LABELS[cname],\n", " per_class_limit=per_class,\n", " seed=seed + LABELS[cname],\n", " ),\n", " features=features,\n", " )\n", " parts.append(ds)\n", " return interleave_datasets(parts, seed=seed)\n", "\n", "train_stream = make_split(HF_REPO, TRAIN_PER_CLASS, SEED)\n", "val_stream = make_split(HF_REPO, VAL_PER_CLASS, SEED+1000)\n", "print(train_stream)\n", "print(val_stream)\n" ] }, { "cell_type": "code", "execution_count": 6, "id": "e283afd8", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Estimated mean/std for 184 features.\n" ] } ], "source": [ "# Compute mean/std for scaling\n", "def estimate_mean_std(hf_stream: IterableDataset, max_samples: int = 5000):\n", " count = 0\n", " mean = torch.zeros(184)\n", " M2 = torch.zeros(184)\n", " for ex in hf_stream.take(max_samples):\n", " x = torch.tensor(ex['x'], dtype=torch.float32)\n", " count += 1\n", " delta = x - mean\n", " mean += delta / max(count, 1)\n", " delta2 = x - mean\n", " M2 += delta * delta2\n", " var = (M2 / max(count - 1, 1))\n", " std = torch.sqrt(var + 1e-6)\n", " return mean, std\n", "\n", "stats_stream = make_split(HF_REPO, per_class=min(256, TRAIN_PER_CLASS), seed=SEED+222)\n", "MEAN, STD = estimate_mean_std(stats_stream, max_samples=512)\n", "print(\"Estimated mean/std for\", len(MEAN), \"features.\")\n" ] }, { "cell_type": "code", "execution_count": 7, "id": "f31332e0", "metadata": {}, "outputs": [], "source": [ "class HFToTorch(TorchIterable):\n", " def __init__(self, hf_stream: IterableDataset):\n", " self.hf_stream = hf_stream\n", " def __iter__(self):\n", " return iter(self.hf_stream)\n", "\n", "class CollateCLF:\n", " def __init__(self, mean: torch.Tensor, std: torch.Tensor):\n", " self.mean = mean\n", " self.std = std\n", " def __call__(self, batch: List[Dict[str, Any]]):\n", " xs, ys = [], []\n", " for ex in batch:\n", " x = torch.tensor(ex['x'], dtype=torch.float32)\n", " x = (x - self.mean) / self.std\n", " y = int(ex['label'])\n", " xs.append(x); ys.append(y)\n", " return {\n", " 'x': torch.stack(xs, dim=0),\n", " 'y': torch.tensor(ys, dtype=torch.long),\n", " }\n", "\n", "train_loader = DataLoader(HFToTorch(train_stream), batch_size=BATCH_SIZE, collate_fn=CollateCLF(MEAN, STD))\n" ] }, { "cell_type": "code", "execution_count": 8, "id": "26cacf8b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Val set: torch.Size([595, 184]) torch.Size([595])\n" ] } ], "source": [ "# Materialize validation once\n", "Xv, Yv = [], []\n", "for ex in val_stream:\n", " Xv.append(((torch.tensor(ex['x']) - MEAN) / STD).unsqueeze(0))\n", " Yv.append(int(ex['label']))\n", "X_val = torch.cat(Xv, dim=0)\n", "y_val = torch.tensor(Yv, dtype=torch.long)\n", "print(\"Val set:\", X_val.shape, y_val.shape)\n" ] }, { "cell_type": "code", "execution_count": 9, "id": "ea94521c", "metadata": {}, "outputs": [], "source": [ "class TinyMLP(nn.Module):\n", " def __init__(self, d=184, h=256, num_classes=6):\n", " super().__init__()\n", " self.net = nn.Sequential(\n", " nn.Linear(d, h), nn.ReLU(),\n", " nn.Linear(h, h//2), nn.ReLU(),\n", " nn.Linear(h//2, num_classes),\n", " )\n", " def forward(self, x):\n", " return self.net(x)\n", "\n", "model = TinyMLP(d=184, h=256, num_classes=len(CLASS_NAMES)).to(DEVICE)\n", "opt = torch.optim.AdamW(model.parameters(), lr=LR)\n", "loss_fn = nn.CrossEntropyLoss()\n" ] }, { "cell_type": "code", "execution_count": null, "id": "fec8c721", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[epoch 1] val acc: 49.24% | classes: ['DY', 'QCD', 'SingleHiggs', 'top', 'diboson', 'diHiggs']\n", "epoch 2 step 20 | loss 0.4491\n", "[epoch 2] val acc: 52.77% | classes: ['DY', 'QCD', 'SingleHiggs', 'top', 'diboson', 'diHiggs']\n", "[epoch 3] val acc: 55.13% | classes: ['DY', 'QCD', 'SingleHiggs', 'top', 'diboson', 'diHiggs']\n" ] } ], "source": [ "def evaluate(model, X, y, batch=2048):\n", " model.eval()\n", " correct = 0\n", " total = 0\n", " with torch.no_grad():\n", " for i in range(0, len(X), batch):\n", " xb = X[i:i+batch].to(DEVICE)\n", " yb = y[i:i+batch].to(DEVICE)\n", " logits = model(xb)\n", " pred = logits.argmax(dim=1)\n", " correct += int((pred == yb).sum().item())\n", " total += int(len(yb))\n", " model.train()\n", " return correct / max(total, 1)\n", "\n", "steps = 0\n", "for epoch in range(1, EPOCHS+1):\n", " running = 0.0\n", " for batch in train_loader:\n", " x = batch['x'].to(DEVICE, non_blocking=True)\n", " y = batch['y'].to(DEVICE, non_blocking=True)\n", "\n", " logits = model(x)\n", " loss = loss_fn(logits, y)\n", " opt.zero_grad(set_to_none=True)\n", " loss.backward()\n", " opt.step()\n", "\n", " running += float(loss.item())\n", " steps += 1\n", " if steps % 20 == 0:\n", " print(f\"epoch {epoch} step {steps} | loss {running/20:.4f}\")\n", " running = 0.0\n", "\n", " acc = evaluate(model, X_val, y_val)\n", " print(f\"[epoch {epoch}] val acc: {acc*100:.2f}% | classes: {CLASS_NAMES}\")\n", "print(\"Training done.\")\n" ] } ], "metadata": { "kernelspec": { "display_name": "collide2v", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.18" } }, "nbformat": 4, "nbformat_minor": 5 }