diff --git a/docs/api/datasets.rst b/docs/api/datasets.rst index b02439d26..4fa3c0d6d 100644 --- a/docs/api/datasets.rst +++ b/docs/api/datasets.rst @@ -238,6 +238,7 @@ Available Datasets datasets/pyhealth.datasets.BMDHSDataset datasets/pyhealth.datasets.COVID19CXRDataset datasets/pyhealth.datasets.ChestXray14Dataset + datasets/pyhealth.datasets.PTBXLDataset datasets/pyhealth.datasets.TUABDataset datasets/pyhealth.datasets.TUEVDataset datasets/pyhealth.datasets.ClinVarDataset diff --git a/docs/api/datasets/pyhealth.datasets.PTBXLDataset.rst b/docs/api/datasets/pyhealth.datasets.PTBXLDataset.rst new file mode 100644 index 000000000..9ef9b3cea --- /dev/null +++ b/docs/api/datasets/pyhealth.datasets.PTBXLDataset.rst @@ -0,0 +1,11 @@ +pyhealth.datasets.PTBXLDataset +============================== + +PTB-XL is a publically available electrocardiography dataset. Contains 21837 samples from 18885 patients, all approximately 10 seconds in duration. For more information see `here `_. + +Kaggle: https://www.kaggle.com/datasets/physionet/ptbxl-electrocardiography-database + +.. autoclass:: pyhealth.datasets.PTBXLDataset + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/models.rst b/docs/api/models.rst index 7368dec94..79854c027 100644 --- a/docs/api/models.rst +++ b/docs/api/models.rst @@ -187,7 +187,11 @@ API Reference models/pyhealth.models.EHRMamba models/pyhealth.models.JambaEHR models/pyhealth.models.ContraWR + models/pyhealth.models.LambdaResNet18ECG + models/pyhealth.models.ResNet18ECG + models/pyhealth.models.SEResNet50ECG models/pyhealth.models.SparcNet + models/pyhealth.models.BiLSTMECG models/pyhealth.models.StageNet models/pyhealth.models.StageAttentionNet models/pyhealth.models.AdaCare diff --git a/docs/api/models/pyhealth.models.BiLSTMECG.rst b/docs/api/models/pyhealth.models.BiLSTMECG.rst new file mode 100644 index 000000000..92acca58b --- /dev/null +++ b/docs/api/models/pyhealth.models.BiLSTMECG.rst @@ -0,0 +1,9 @@ +pyhealth.models.BiLSTMECG +=================================== + +Bidirectional LSTM for 12-lead ECG multi-label classification. + +.. autoclass:: pyhealth.models.BiLSTMECG + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/models/pyhealth.models.LambdaResNet18ECG.rst b/docs/api/models/pyhealth.models.LambdaResNet18ECG.rst new file mode 100644 index 000000000..23714f18e --- /dev/null +++ b/docs/api/models/pyhealth.models.LambdaResNet18ECG.rst @@ -0,0 +1,24 @@ +pyhealth.models.LambdaResNet18ECG +=================================== + +1-D Lambda-ResNet-18 for ECG classification: Nonaka N. & Seita J., In-depth Benchmarking of Deep Neural Network Architectures for ECG Diagnosis. PMLR 149, 2021; lambda layer from Bello I., LambdaNetworks: Modeling Long-Range Interactions Without Attention. ICLR 2021. + +.. autoclass:: pyhealth.models.LambdaConv1d + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyhealth.models.LambdaBottleneck1d + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyhealth.models.LambdaResNet1d + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyhealth.models.LambdaResNet18ECG + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/models/pyhealth.models.ResNet18ECG.rst b/docs/api/models/pyhealth.models.ResNet18ECG.rst new file mode 100644 index 000000000..2a5d65e58 --- /dev/null +++ b/docs/api/models/pyhealth.models.ResNet18ECG.rst @@ -0,0 +1,29 @@ +pyhealth.models.ResNet18ECG +=================================== + +1-D ResNet-18 for ECG classification: Nonaka N. & Seita J., In-depth Benchmarking of Deep Neural Network Architectures for ECG Diagnosis. PMLR 149, 2021. + +.. autoclass:: pyhealth.models.BasicBlock1d + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyhealth.models.Bottleneck1d + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyhealth.models.ResNet1d + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyhealth.models.ECGBackboneModel + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyhealth.models.ResNet18ECG + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/models/pyhealth.models.SEResNet50ECG.rst b/docs/api/models/pyhealth.models.SEResNet50ECG.rst new file mode 100644 index 000000000..077da0dc0 --- /dev/null +++ b/docs/api/models/pyhealth.models.SEResNet50ECG.rst @@ -0,0 +1,19 @@ +pyhealth.models.SEResNet50ECG +=================================== + +1-D SE-ResNet-50 for ECG classification: Nonaka N. & Seita J., In-depth Benchmarking of Deep Neural Network Architectures for ECG Diagnosis. PMLR 149, 2021; SE block from Hu J. et al., Squeeze-and-Excitation Networks. CVPR 2018. + +.. autoclass:: pyhealth.models.SEModule1d + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyhealth.models.SEResNetBottleneck1d + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyhealth.models.SEResNet50ECG + :members: + :undoc-members: + :show-inheritance: diff --git a/examples/ptbxl_superdiagnostic_se_resnet.ipynb b/examples/ptbxl_superdiagnostic_se_resnet.ipynb new file mode 100644 index 000000000..3623acda9 --- /dev/null +++ b/examples/ptbxl_superdiagnostic_se_resnet.ipynb @@ -0,0 +1,31030 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e06e22e2", + "metadata": {}, + "source": [ + "# PTB-XL Multi-Label ECG Classification — Model & Ablation Study\n", + "\n", + "**Course:** CS-598 Deep Learning for Healthcare \n", + "**Dataset:** PTB-XL (PhysioNet / CinC Challenge 2020, v1.0.2) \n", + "**Models:** ResNet-18 (control), SE-ResNet-50, Lambda-ResNet-18, BiLSTM (Nonaka & Seita 2021)\n", + "\n", + "---\n", + "\n", + "## Background & Motivation\n", + "\n", + "PTB-XL is the largest publicly available clinical 12-lead ECG dataset, containing\n", + "21,837 recordings from 18,885 patients at 500 Hz (≈ 10 s per recording).\n", + "Each recording is annotated with one or more *SNOMED-CT* codes.\n", + "\n", + "We frame ECG diagnosis as **multi-label classification**: given a signal\n", + "$X \\in \\mathbb{R}^{C \\times T}$ ($C=12$ leads, $T$ time-steps), predict a\n", + "binary label vector $y \\in \\{0,1\\}^K$ for $K$ diagnostic classes.\n", + "\n", + "### Mathematical Framing\n", + "\n", + "| Symbol | Meaning |\n", + "|--------|---------|\n", + "| $C = 12$ | ECG leads |\n", + "| $T$ | Time-steps: **1 000** at 100 Hz or **5 000** at 500 Hz |\n", + "| $K$ | Label classes: **5** (superdiagnostic) or **27** (diagnostic) |\n", + "| $f_\\theta$ | Model backbone (ResNet-18, SE-ResNet-50, Lambda-ResNet-18, or BiLSTM) |\n", + "\n", + "**Forward pass:**\n", + "$$\\hat{y} = \\sigma\\!\\left(f_\\theta(X)\\,W^\\top + b\\right) \\in [0,1]^K$$\n", + "\n", + "**Training loss (Binary Cross-Entropy per label):**\n", + "$$\\mathcal{L}_{\\text{BCE}} = -\\frac{1}{K}\\sum_{k=1}^{K}\\left[y_k\\log\\hat{y}_k + (1-y_k)\\log(1-\\hat{y}_k)\\right]$$\n", + "\n", + "**Evaluation — macro-averaged ROC-AUC:**\n", + "$$\\overline{\\text{AUC}} = \\frac{1}{K}\\sum_{k=1}^{K}\\int_0^1 \\text{TPR}_k(t)\\,d\\,\\text{FPR}_k(t)$$\n", + "\n", + "**Evaluation — macro-averaged F1 (threshold = 0.5):**\n", + "$$\\overline{F_1} = \\frac{1}{K}\\sum_{k=1}^{K}\\frac{2\\,\\text{TP}_k}{2\\,\\text{TP}_k + \\text{FP}_k + \\text{FN}_k}$$\n", + "\n", + "---\n", + "\n", + "## Ablation Design\n", + "\n", + "We vary two axes simultaneously (as done in Strodthoff *et al.* 2020) and\n", + "compare **four model architectures** across a $2 \\times 2$ task grid:\n", + "\n", + "| Config | `label_type` | `sampling_rate` | $K$ | $T$ |\n", + "|--------|-------------|-----------------|-----|-----|\n", + "| **A** (baseline) | superdiagnostic | 100 Hz | 5 | 1 000 |\n", + "| **B** | superdiagnostic | 500 Hz | 5 | 5 000 |\n", + "| **C** | diagnostic | 100 Hz | 27 | 1 000 |\n", + "| **D** | diagnostic | 500 Hz | 27 | 5 000 |\n", + "\n", + "Holding the hyper-parameters **constant** across all runs isolates the effect of:\n", + "(a) label granularity, (b) temporal resolution, and (c) model architecture.\n", + "\n", + "**Hypotheses:**\n", + "* Finer label granularity (27 classes) is a harder task → lower absolute AUC.\n", + "* Higher temporal resolution (500 Hz) provides more information → higher AUC\n", + " at the cost of increased input size and training time.\n", + "* **SE-ResNet-50** (CNN + channel attention) is expected to outperform the plain\n", + " **ResNet-18** control by learning which channels to emphasise.\n", + "* **Lambda-ResNet-18** replaces SE attention with Lambda layers (content- and\n", + " position-based linear attention), and may close or exceed SE-ResNet-50 performance\n", + " while using fewer parameters.\n", + "* **BiLSTM** captures global temporal context but may underperform CNN variants on\n", + " short-context morphological features; it is expected to be more competitive at\n", + " 500 Hz where longer sequences provide more temporal signal." + ] + }, + { + "cell_type": "markdown", + "id": "89438d66", + "metadata": {}, + "source": [ + "## 0. Environment Setup\n", + "\n", + "Install dependencies if running on a fresh Colab runtime." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "b1dc5e9d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Python 3.9.6 (default, Dec 2 2025, 07:27:58) \n", + "[Clang 17.0.0 (clang-1700.6.3.2)]\n", + "PyTorch 2.8.0 | CUDA available: False\n", + "Apple MPS GPU detected — using Metal backend (~11× faster than CPU)\n", + "Using device: mps\n" + ] + } + ], + "source": [ + "# Uncomment the lines below to install on Colab / a fresh environment\n", + "# !pip install pyhealth scipy wfdb --quiet\n", + "\n", + "import sys\n", + "print(f'Python {sys.version}')\n", + "\n", + "import torch\n", + "print(f'PyTorch {torch.__version__} | CUDA available: {torch.cuda.is_available()}')\n", + "\n", + "if torch.cuda.is_available():\n", + " DEVICE = 'cuda'\n", + "elif torch.backends.mps.is_available():\n", + " DEVICE = 'mps'\n", + " print('Apple MPS GPU detected — using Metal backend (~11× faster than CPU)')\n", + "else:\n", + " DEVICE = 'cpu'\n", + "print(f'Using device: {DEVICE}')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d88687c4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Repo root on path: /Users/anuragd/CS-598_HealthCareAssignment/DLH598_repo/PyHealth\n" + ] + } + ], + "source": [ + "import sys, pathlib\n", + "# Add the repo root so 'pyhealth' is importable from this notebook\n", + "_REPO_ROOT = str(pathlib.Path(__file__).resolve().parents[1]) if \"__file__\" in dir() else \"/Users/anuragd/CS-598_HealthCareAssignment/DLH598_repo/PyHealth\"\n", + "if _REPO_ROOT not in sys.path:\n", + " sys.path.insert(0, _REPO_ROOT)\n", + "print(f\"Repo root on path: {_REPO_ROOT}\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "39342fa9", + "metadata": {}, + "source": [ + "## 1. Dataset Path\n", + "\n", + "Point `PTBXL_ROOT` to the `training/ptb-xl/` sub-directory of the\n", + "PhysioNet Challenge 2020 download (v1.0.2). \n", + "It should contain group sub-directories `g1/`, `g2/`, …, `g22/`, each\n", + "holding pairs of WFDB files (`.hea` header + `.mat` signal matrix).\n", + "\n", + "```\n", + "training/ptb-xl/\n", + " g1/\n", + " HR00001.hea\n", + " HR00001.mat\n", + " ...\n", + " g2/ ...\n", + " ...\n", + " g22/\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "5ce43a37", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PTB-XL root: /Users/anuragd/CS-598_HealthCareAssignment/DLH_Project/WFDB\n", + "ptbxl_database.csv: /Users/anuragd/CS-598_HealthCareAssignment/DLH_Project/WFDB/ptbxl_database.csv\n", + "Found 0 group directories\n" + ] + } + ], + "source": [ + "import os\n", + "from pathlib import Path\n", + "\n", + "# -----------------------------------------------------------------------\n", + "# PTB-XL data root — contains g1/…g22/ sub-dirs AND ptbxl_database.csv\n", + "# -----------------------------------------------------------------------\n", + "PTBXL_ROOT = str(\n", + " Path(\"/Users/anuragd/CS-598_HealthCareAssignment/DLH_Project/WFDB\")\n", + " .resolve()\n", + ")\n", + "\n", + "if not Path(PTBXL_ROOT).exists():\n", + " raise FileNotFoundError(\n", + " f\"PTB-XL root not found: {PTBXL_ROOT}\\n\"\n", + " \"Please set PTBXL_ROOT to the training/ptb-xl/ directory.\"\n", + " )\n", + "\n", + "csv_path = Path(PTBXL_ROOT) / \"ptbxl_database.csv\"\n", + "if not csv_path.exists():\n", + " raise FileNotFoundError(f\"ptbxl_database.csv not found in {PTBXL_ROOT}\")\n", + "\n", + "print(f'PTB-XL root: {PTBXL_ROOT}')\n", + "print(f'ptbxl_database.csv: {csv_path}')\n", + "n_groups = len([d for d in Path(PTBXL_ROOT).iterdir() if d.is_dir() and d.name.startswith('g')])\n", + "print(f'Found {n_groups} group directories')\n" + ] + }, + { + "cell_type": "markdown", + "id": "d27efc8a", + "metadata": {}, + "source": [ + "## 2. Shared Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0d594aad", + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib.ticker as mticker\n", + "from sklearn.metrics import roc_auc_score, f1_score\n", + "\n", + "from pyhealth.datasets import PTBXLDataset, split_by_patient, split_by_sample, get_dataloader\n", + "from pyhealth.tasks import PTBXLMultilabelClassification\n", + "from pyhealth.models import ResNet18ECG, SEResNet50ECG, LambdaResNet18ECG, BiLSTMECG\n", + "from pyhealth.trainer import Trainer\n", + "from pyhealth.metrics import multilabel_metrics_fn\n" + ] + }, + { + "cell_type": "markdown", + "id": "fdef182a", + "metadata": {}, + "source": [ + "## 3. Hyper-parameters\n", + "\n", + "Following the grid-search described in Nonaka & Seita (2021), we fix the\n", + "best-found hyper-parameters for all four ablation runs so that the only\n", + "difference is the task configuration." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "45a40dcb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Batch size: 64 | LR: 0.001 | Epochs: 35\n", + "Dev mode: False | Max patients: 7500 | Run phase: ALL\n", + "Models: ['ResNet-18', 'SE-ResNet-50', 'Lambda-ResNet-18', 'BiLSTM']\n" + ] + } + ], + "source": [ + "# Training hyper-parameters (fixed across all model x config combinations)\n", + "BATCH_SIZE = 64\n", + "LEARNING_RATE = 1e-3 # lowered from 0.01 — avoids early overshooting for CNNs\n", + "EPOCHS = 35 # increased from 20 — ResNet needs more epochs to converge\n", + "SPLIT = [0.7, 0.1, 0.2]\n", + "MONITOR = 'roc_auc_macro'\n", + "\n", + "DEV_MODE = False\n", + "RUN_PHASE = 'ALL'\n", + "MAX_PATIENTS = 7500 # increased from 2100 — better rare-class coverage (~4.3 hrs)\n", + "QUICK_MODE = False\n", + "\n", + "MODELS = [\n", + " {'name': 'ResNet-18', 'cls': ResNet18ECG, 'kwargs': {}},\n", + " {'name': 'SE-ResNet-50', 'cls': SEResNet50ECG, 'kwargs': {}},\n", + " {'name': 'Lambda-ResNet-18', 'cls': LambdaResNet18ECG,'kwargs': {}},\n", + " {\n", + " 'name': 'BiLSTM',\n", + " 'cls': BiLSTMECG,\n", + " 'kwargs': {\n", + " 'feature_keys': ['signal'],\n", + " 'label_key': 'labels',\n", + " 'mode': 'multilabel',\n", + " 'hidden_size': 64,\n", + " 'n_layers': 1,\n", + " },\n", + " },\n", + "]\n", + "\n", + "print(f'Batch size: {BATCH_SIZE} | LR: {LEARNING_RATE} | Epochs: {EPOCHS}')\n", + "print(f'Dev mode: {DEV_MODE} | Max patients: {MAX_PATIENTS} | Run phase: {RUN_PHASE}')\n", + "print(f'Models: {[m[\"name\"] for m in MODELS]}')\n" + ] + }, + { + "cell_type": "markdown", + "id": "18e4a2d1", + "metadata": {}, + "source": [ + "## 4. Load the PTBXLDataset (shared)\n", + "\n", + "`PTBXLDataset` calls `load_data()` internally, which does the following:\n", + "\n", + "1. **Scans `.hea` files** — iterates over every WFDB header file in the root directory and parses patient metadata (age, sex) and SNOMED-CT diagnosis codes directly from each header.\n", + "2. **Reads `ptbxl_database.csv`** — the CSV you supply is read once via `pd.read_csv()` solely to obtain the `strat_fold` column defined by the PTB-XL authors. Fold values 1–8 map to `\"train\"`, fold 9 to `\"val\"`, and fold 10 to `\"test\"`.\n", + "3. **Writes a parquet cache** — on first run, the parsed event DataFrame is saved to a hash-keyed directory under `~/Library/Caches/pyhealth/` (e.g. `global_event_df.parquet`). Subsequent runs load from this cache automatically, making re-runs fast.\n", + "\n", + "> **Note:** The CSV file must already exist in the same directory as the `.hea` / `.mat` files. The path used here is:\n", + "> ```\n", + "> .../training/ptb-xl/ptbxl_database.csv\n", + "> ```" + ] + }, + { + "cell_type": "markdown", + "id": "382acc8a", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "19663067", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Initializing ptbxl dataset from /Users/anuragd/CS-598_HealthCareAssignment/DLH_Project/WFDB (dev mode: False)\n", + "No cache_dir provided. Using default cache dir: /Users/anuragd/Library/Caches/pyhealth/e06c1785-82da-5d10-8959-fb69e8e75b53\n", + "Found cached event dataframe: /Users/anuragd/Library/Caches/pyhealth/e06c1785-82da-5d10-8959-fb69e8e75b53/global_event_df.parquet\n", + "Found 21799 unique patient IDs\n", + "Full-dataset patients: 21799\n", + "Capping to 7500 patients.\n", + "Custom cache already exists: /Users/anuragd/Library/Caches/pyhealth/ptbxl_cap7500/e06c1785-82da-5d10-8959-fb69e8e75b53/global_event_df.parquet\n", + "Initializing ptbxl dataset from /Users/anuragd/CS-598_HealthCareAssignment/DLH_Project/WFDB (dev mode: False)\n", + "Using provided cache_dir: /Users/anuragd/Library/Caches/pyhealth/ptbxl_cap7500/e06c1785-82da-5d10-8959-fb69e8e75b53\n", + "Found cached event dataframe: /Users/anuragd/Library/Caches/pyhealth/ptbxl_cap7500/e06c1785-82da-5d10-8959-fb69e8e75b53/global_event_df.parquet\n", + "Dataset: ptbxl\n", + "Dev mode: False\n", + "Number of patients: 7500\n", + "Number of events: 7500\n" + ] + } + ], + "source": [ + "import pathlib, polars as pl\n", + "\n", + "# Reuse the existing full-dataset parquet (e06c1785) — no rebuild, no deletion.\n", + "_bootstrap_ds = PTBXLDataset(root=PTBXL_ROOT, dev=False)\n", + "_full_uuid = _bootstrap_ds.cache_dir.name\n", + "_full_parquet_dir = _bootstrap_ds.cache_dir / 'global_event_df.parquet'\n", + "print(f'Full-dataset patients: {len(_bootstrap_ds.unique_patient_ids)}')\n", + "\n", + "_pids_cap = sorted(_bootstrap_ds.unique_patient_ids)[:MAX_PATIENTS]\n", + "print(f'Capping to {len(_pids_cap)} patients.')\n", + "\n", + "# Build a new independent cache at ~/Library/Caches/pyhealth/ptbxl_cap2100//\n", + "_custom_root = pathlib.Path.home() / 'Library' / 'Caches' / 'pyhealth' / f'ptbxl_cap{MAX_PATIENTS}'\n", + "_custom_parquet_dir = _custom_root / _full_uuid / 'global_event_df.parquet'\n", + "\n", + "if _custom_parquet_dir.is_dir() and any(_custom_parquet_dir.glob('*.parquet')):\n", + " print(f'Custom cache already exists: {_custom_parquet_dir}')\n", + "else:\n", + " print('Building 2100-patient parquet cache...')\n", + " _custom_parquet_dir.mkdir(parents=True, exist_ok=True)\n", + " (\n", + " pl.scan_parquet(_full_parquet_dir)\n", + " .filter(pl.col('patient_id').is_in(_pids_cap))\n", + " .collect()\n", + " .write_parquet(str(_custom_parquet_dir / 'data.parquet'))\n", + " )\n", + " print('Done.')\n", + "\n", + "base_dataset = PTBXLDataset(root=PTBXL_ROOT, dev=False, cache_dir=_custom_root)\n", + "base_dataset.stats()\n" + ] + }, + { + "cell_type": "markdown", + "id": "e018207f", + "metadata": {}, + "source": [ + "## 5. Ablation Configurations\n", + "\n", + "Define all four task variants covering the $2 \\times 2$ ablation grid." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "ae5daef7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Phase [ALL] -- running 4 config(s):\n", + " A -- superdiagnostic / 100 Hz (baseline) -> K=5, T=1000\n", + " B -- superdiagnostic / 500 Hz -> K=5, T=5000\n", + " C -- diagnostic (27-class) / 100 Hz -> K=27, T=1000\n", + " D -- diagnostic (27-class) / 500 Hz -> K=27, T=5000\n" + ] + } + ], + "source": [ + "_ALL_CONFIGS = [\n", + " {\n", + " 'name': 'A -- superdiagnostic / 100 Hz (baseline)',\n", + " 'label_type': 'superdiagnostic',\n", + " 'sampling_rate': 100,\n", + " 'n_classes': 5,\n", + " 'T': 1000,\n", + " },\n", + " {\n", + " 'name': 'B -- superdiagnostic / 500 Hz',\n", + " 'label_type': 'superdiagnostic',\n", + " 'sampling_rate': 500,\n", + " 'n_classes': 5,\n", + " 'T': 5000,\n", + " },\n", + " {\n", + " 'name': 'C -- diagnostic (27-class) / 100 Hz',\n", + " 'label_type': 'diagnostic',\n", + " 'sampling_rate': 100,\n", + " 'n_classes': 27,\n", + " 'T': 1000,\n", + " },\n", + " {\n", + " 'name': 'D -- diagnostic (27-class) / 500 Hz',\n", + " 'label_type': 'diagnostic',\n", + " 'sampling_rate': 500,\n", + " 'n_classes': 27,\n", + " 'T': 5000,\n", + " },\n", + "]\n", + "\n", + "# Filter configs based on RUN_PHASE\n", + "_PHASE_MAP = {\n", + " 'AC': [_ALL_CONFIGS[0], _ALL_CONFIGS[2]], # A + C (100 Hz only)\n", + " 'BD': [_ALL_CONFIGS[1], _ALL_CONFIGS[3]], # B + D (500 Hz only)\n", + " 'ALL': _ALL_CONFIGS,\n", + "}\n", + "ABLATION_CONFIGS = _PHASE_MAP.get(RUN_PHASE, _ALL_CONFIGS)\n", + "\n", + "print(f'Phase [{RUN_PHASE}] -- running {len(ABLATION_CONFIGS)} config(s):')\n", + "for cfg in ABLATION_CONFIGS:\n", + " print(f\" {cfg['name']} -> K={cfg['n_classes']}, T={cfg['T']}\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "1d48882c", + "metadata": {}, + "source": [ + "## 6. Training Loop\n", + "\n", + "For each **model × configuration** pair (4 models × 4 configs = 16 runs) we:\n", + "\n", + "1. **Define task** — `PTBXLMultilabelClassification(label_type, sampling_rate)`\n", + "2. **Apply task** — `base_dataset.set_task(task)` → `SampleDataset`\n", + "3. **Split** — 70 % train / 10 % val / 20 % test (by sample; equivalent to by-patient in dev mode)\n", + "4. **Instantiate model** — `ResNet18ECG`, `SEResNet50ECG`, `LambdaResNet18ECG`, or `BiLSTMECG` from the `MODELS` registry\n", + "5. **Train** with `Trainer`, monitoring macro ROC-AUC on the validation set\n", + "6. **Evaluate** on the held-out test set: macro ROC-AUC + macro F1" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "f1c34a69", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "======================================================================\n", + "Model : ResNet-18\n", + "Config: A -- superdiagnostic / 100 Hz (baseline)\n", + " label_type=superdiagnostic, sampling_rate=100 Hz\n", + " K=5 classes, T=1000 time-steps per lead\n", + "======================================================================\n", + "Setting task PTBXLSuperDiagnostic_100Hz for ptbxl base dataset...\n", + "Task cache paths: task_df=/Users/anuragd/Library/Caches/pyhealth/ptbxl_cap7500/e06c1785-82da-5d10-8959-fb69e8e75b53/tasks/PTBXLSuperDiagnostic_100Hz_a435b25e-18d8-5d3b-b000-8457f09e6895/task_df.ld, samples=/Users/anuragd/Library/Caches/pyhealth/ptbxl_cap7500/e06c1785-82da-5d10-8959-fb69e8e75b53/tasks/PTBXLSuperDiagnostic_100Hz_a435b25e-18d8-5d3b-b000-8457f09e6895/samples_cdbbc602-34e2-5a41-8643-4c76b08829f6.ld\n", + "Found cached processed samples at /Users/anuragd/Library/Caches/pyhealth/ptbxl_cap7500/e06c1785-82da-5d10-8959-fb69e8e75b53/tasks/PTBXLSuperDiagnostic_100Hz_a435b25e-18d8-5d3b-b000-8457f09e6895/samples_cdbbc602-34e2-5a41-8643-4c76b08829f6.ld, skipping processing.\n", + " Total ML samples: 7471\n", + " signal shape : (12, 1000)\n", + " labels : tensor([0., 1., 0., 1., 0.])\n", + " Train/Val/Test samples: 5229/747/1495\n", + " Steps per epoch: 81\n", + "ResNet18ECG(\n", + " (backbone): ResNet1d(\n", + " (stem): Sequential(\n", + " (0): Conv1d(12, 64, kernel_size=(7,), stride=(2,), padding=(3,), bias=False)\n", + " (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU(inplace=True)\n", + " (3): MaxPool1d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n", + " )\n", + " (stages): ModuleList(\n", + " (0): Sequential(\n", + " (0): BasicBlock1d(\n", + " (conv1): Conv1d(64, 64, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv1d(64, 64, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (1): BasicBlock1d(\n", + " (conv1): Conv1d(64, 64, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv1d(64, 64, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Sequential(\n", + " (0): BasicBlock1d(\n", + " (conv1): Conv1d(64, 128, kernel_size=(3,), stride=(2,), padding=(1,), bias=False)\n", + " (bn1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv1d(64, 128, kernel_size=(1,), stride=(2,), bias=False)\n", + " (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock1d(\n", + " (conv1): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): BasicBlock1d(\n", + " (conv1): Conv1d(128, 256, kernel_size=(3,), stride=(2,), padding=(1,), bias=False)\n", + " (bn1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv1d(256, 256, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv1d(128, 256, kernel_size=(1,), stride=(2,), bias=False)\n", + " (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock1d(\n", + " (conv1): Conv1d(256, 256, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv1d(256, 256, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (3): Sequential(\n", + " (0): BasicBlock1d(\n", + " (conv1): Conv1d(256, 512, kernel_size=(3,), stride=(2,), padding=(1,), bias=False)\n", + " (bn1): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv1d(512, 512, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn2): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv1d(256, 512, kernel_size=(1,), stride=(2,), bias=False)\n", + " (1): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock1d(\n", + " (conv1): Conv1d(512, 512, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn1): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv1d(512, 512, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n", + " (bn2): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " )\n", + " (gap): AdaptiveAvgPool1d(output_size=1)\n", + " (proj): Linear(in_features=512, out_features=256, bias=True)\n", + " )\n", + " (head): Sequential(\n", + " (0): Linear(in_features=256, out_features=128, bias=True)\n", + " (1): ReLU(inplace=True)\n", + " (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (3): Dropout(p=0.25, inplace=False)\n", + " (4): Linear(in_features=128, out_features=5, bias=True)\n", + " )\n", + ")\n", + "Metrics: ['roc_auc_macro', 'f1_macro']\n", + "Device: mps\n", + "\n", + "Training:\n", + "Batch size: 64\n", + "Optimizer: \n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/anuragd/CS-598_HealthCareAssignment/DLH598_repo/PyHealth/pyhealth/datasets/base_dataset.py:1077: UserWarning: A newer version of litdata is available (0.2.61). Please consider upgrading with `pip install -U litdata`. Not all functionalities of the platform can be guaranteed to work with the current version.\n", + " return SampleDataset(\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "91c91b004cd44ba69a957f5a9bc8654e", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/81 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c05c04f3af2942948e936529651cc1ad", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/81 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2a2bfea6840f446fa0ab557edf3c886d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/80 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "08e30779960947469ef34b96141a4c86", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/80 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "98353d05c8b644bcaac34038b10314e2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/81 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4d8610a55c4848e097c2ab41bd4771e7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/81 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b875fe5abb104f789dfc7c95be1716f6", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/80 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "9731e7d9cb2d4558ac15a6b2e36b9321", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/80 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "70f5cac4cb0d47b8a959945203fbaa5a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/81 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "fd3c461f9a534d818ca17881569d0a77", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/81 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "a77713eabfc74992938d7cfa5801ead8", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/80 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2c07844b1cf94065afab8dda9ea4ff07", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/80 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b0a56472bec24042b469f781835bcd4d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/81 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "32ad74d053ba4cbd8017177e4b6b3b7b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/81 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "3dd8e773ca764945b9c2d3e65b98ec7a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/80 [00:00\n", + "Optimizer params: {'lr': 0.001}\n", + "Weight decay: 0.0001\n", + "Max grad norm: None\n", + "Val dataloader: \n", + "Monitor: roc_auc_macro\n", + "Monitor criterion: max\n", + "Epochs: 35\n", + "Patience: None\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5394fdc3fd854915a771dd5f9eefccbb", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Epoch 0 / 35: 0%| | 0/80 [00:00 examples/ptbxl_results_phase_ALL.csv\n", + " model config roc_auc_macro f1_macro train_time_s\n", + " ResNet-18 A -- superdiagnostic / 100 Hz (baseline) 0.846381 0.686147 134.944618\n", + " ResNet-18 B -- superdiagnostic / 500 Hz 0.879256 0.698460 575.779286\n", + " ResNet-18 C -- diagnostic (27-class) / 100 Hz 0.848717 0.382102 1213.200153\n", + " ResNet-18 D -- diagnostic (27-class) / 500 Hz 0.892381 0.383594 835.039841\n", + " SE-ResNet-50 A -- superdiagnostic / 100 Hz (baseline) 0.863307 0.684056 10027.422704\n", + " SE-ResNet-50 B -- superdiagnostic / 500 Hz 0.884526 0.721236 14136.546827\n", + " SE-ResNet-50 C -- diagnostic (27-class) / 100 Hz 0.845416 0.325626 778.648032\n", + " SE-ResNet-50 D -- diagnostic (27-class) / 500 Hz 0.837064 0.274920 4718.397009\n", + "Lambda-ResNet-18 A -- superdiagnostic / 100 Hz (baseline) 0.796305 0.616217 1035.476037\n", + "Lambda-ResNet-18 B -- superdiagnostic / 500 Hz 0.786086 0.615013 5308.669516\n", + "Lambda-ResNet-18 C -- diagnostic (27-class) / 100 Hz 0.795265 0.262887 1034.279625\n", + "Lambda-ResNet-18 D -- diagnostic (27-class) / 500 Hz 0.783410 0.199866 5246.245391\n", + " BiLSTM A -- superdiagnostic / 100 Hz (baseline) 0.818348 0.654491 281.082750\n", + " BiLSTM B -- superdiagnostic / 500 Hz 0.839531 0.672561 1426.656572\n", + " BiLSTM C -- diagnostic (27-class) / 100 Hz 0.814146 0.196637 277.374086\n", + " BiLSTM D -- diagnostic (27-class) / 500 Hz 0.799358 0.195175 1379.068789\n" + ] + } + ], + "source": [ + "# -- Stash results for this phase to CSV -----------------------------------\n", + "# Saves after each phase so results survive kernel restarts / interrupts.\n", + "# Phase 1 (AC) -> examples/ptbxl_results_phase_AC.csv\n", + "# Phase 2 (BD) -> examples/ptbxl_results_phase_BD.csv\n", + "import pathlib, pandas as pd\n", + "\n", + "_csv_dir = pathlib.Path('examples')\n", + "_csv_dir.mkdir(exist_ok=True)\n", + "_phase_csv = _csv_dir / f'ptbxl_results_phase_{RUN_PHASE}.csv'\n", + "\n", + "_phase_df = pd.DataFrame(results)\n", + "_phase_df.to_csv(_phase_csv, index=False)\n", + "print(f'Stashed {len(_phase_df)} results -> {_phase_csv}')\n", + "print(_phase_df[['model', 'config', 'roc_auc_macro', 'f1_macro', 'train_time_s']].to_string(index=False))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "stash001", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stashed 16 results → examples/ptbxl_results_phase_ALL.csv\n", + " model config roc_auc_macro f1_macro train_time_s\n", + " ResNet-18 A -- superdiagnostic / 100 Hz (baseline) 0.846381 0.686147 134.944618\n", + " ResNet-18 B -- superdiagnostic / 500 Hz 0.879256 0.698460 575.779286\n", + " ResNet-18 C -- diagnostic (27-class) / 100 Hz 0.848717 0.382102 1213.200153\n", + " ResNet-18 D -- diagnostic (27-class) / 500 Hz 0.892381 0.383594 835.039841\n", + " SE-ResNet-50 A -- superdiagnostic / 100 Hz (baseline) 0.863307 0.684056 10027.422704\n", + " SE-ResNet-50 B -- superdiagnostic / 500 Hz 0.884526 0.721236 14136.546827\n", + " SE-ResNet-50 C -- diagnostic (27-class) / 100 Hz 0.845416 0.325626 778.648032\n", + " SE-ResNet-50 D -- diagnostic (27-class) / 500 Hz 0.837064 0.274920 4718.397009\n", + "Lambda-ResNet-18 A -- superdiagnostic / 100 Hz (baseline) 0.796305 0.616217 1035.476037\n", + "Lambda-ResNet-18 B -- superdiagnostic / 500 Hz 0.786086 0.615013 5308.669516\n", + "Lambda-ResNet-18 C -- diagnostic (27-class) / 100 Hz 0.795265 0.262887 1034.279625\n", + "Lambda-ResNet-18 D -- diagnostic (27-class) / 500 Hz 0.783410 0.199866 5246.245391\n", + " BiLSTM A -- superdiagnostic / 100 Hz (baseline) 0.818348 0.654491 281.082750\n", + " BiLSTM B -- superdiagnostic / 500 Hz 0.839531 0.672561 1426.656572\n", + " BiLSTM C -- diagnostic (27-class) / 100 Hz 0.814146 0.196637 277.374086\n", + " BiLSTM D -- diagnostic (27-class) / 500 Hz 0.799358 0.195175 1379.068789\n" + ] + } + ], + "source": [ + "# ── Stash results for this phase to CSV ──────────────────────────────\n", + "# Saves after each phase so results survive kernel restarts / interrupts.\n", + "# Phase 1 (AC) → ptbxl_results_phase_AC.csv\n", + "# Phase 2 (BD) → ptbxl_results_phase_BD.csv\n", + "import pathlib, pandas as pd\n", + "\n", + "_csv_dir = pathlib.Path('examples')\n", + "_csv_dir.mkdir(exist_ok=True)\n", + "_phase_csv = _csv_dir / f'ptbxl_results_phase_{RUN_PHASE}.csv'\n", + "\n", + "_phase_df = pd.DataFrame(results)\n", + "_phase_df.to_csv(_phase_csv, index=False)\n", + "print(f'Stashed {len(_phase_df)} results → {_phase_csv}')\n", + "print(_phase_df[['model', 'config', 'roc_auc_macro', 'f1_macro', 'train_time_s']].to_string(index=False))\n" + ] + }, + { + "cell_type": "markdown", + "id": "b2a47542", + "metadata": {}, + "source": [ + "## 7. Results Summary" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "1eb2e622", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded 28 results from ['ptbxl_results_phase_AC.csv', 'ptbxl_results_phase_ALL.csv', 'ptbxl_results_phase_BD.csv']\n", + "\n", + " model config K T roc_auc_macro f1_macro train_time_s\n", + " ResNet-18 A -- superdiagnostic / 100 Hz (baseline) 5 1000 0.882280 0.577331 245.333493\n", + " ResNet-18 C -- diagnostic (27-class) / 100 Hz 27 1000 0.797653 0.173184 249.207059\n", + " SE-ResNet-50 A -- superdiagnostic / 100 Hz (baseline) 5 1000 0.875632 0.675094 1258.272661\n", + " SE-ResNet-50 C -- diagnostic (27-class) / 100 Hz 27 1000 0.717975 0.125620 1236.129843\n", + "Lambda-ResNet-18 A -- superdiagnostic / 100 Hz (baseline) 5 1000 0.871029 0.671461 1764.986753\n", + "Lambda-ResNet-18 C -- diagnostic (27-class) / 100 Hz 27 1000 0.719476 0.126444 1827.354280\n", + " BiLSTM A -- superdiagnostic / 100 Hz (baseline) 5 1000 0.838571 0.672886 515.175012\n", + " BiLSTM C -- diagnostic (27-class) / 100 Hz 27 1000 0.820281 0.227032 621.552368\n", + " ResNet-18 A -- superdiagnostic / 100 Hz (baseline) 5 1000 0.846381 0.686147 134.944618\n", + " ResNet-18 B -- superdiagnostic / 500 Hz 5 5000 0.879256 0.698460 575.779286\n", + " ResNet-18 C -- diagnostic (27-class) / 100 Hz 27 1000 0.848717 0.382102 1213.200153\n", + " ResNet-18 D -- diagnostic (27-class) / 500 Hz 27 5000 0.892381 0.383594 835.039841\n", + " SE-ResNet-50 A -- superdiagnostic / 100 Hz (baseline) 5 1000 0.863307 0.684056 10027.422704\n", + " SE-ResNet-50 B -- superdiagnostic / 500 Hz 5 5000 0.884526 0.721236 14136.546827\n", + " SE-ResNet-50 C -- diagnostic (27-class) / 100 Hz 27 1000 0.845416 0.325626 778.648032\n", + " SE-ResNet-50 D -- diagnostic (27-class) / 500 Hz 27 5000 0.837064 0.274920 4718.397009\n", + "Lambda-ResNet-18 A -- superdiagnostic / 100 Hz (baseline) 5 1000 0.796305 0.616217 1035.476037\n", + "Lambda-ResNet-18 B -- superdiagnostic / 500 Hz 5 5000 0.786086 0.615013 5308.669516\n", + "Lambda-ResNet-18 C -- diagnostic (27-class) / 100 Hz 27 1000 0.795265 0.262887 1034.279625\n", + "Lambda-ResNet-18 D -- diagnostic (27-class) / 500 Hz 27 5000 0.783410 0.199866 5246.245391\n", + " BiLSTM A -- superdiagnostic / 100 Hz (baseline) 5 1000 0.818348 0.654491 281.082750\n", + " BiLSTM B -- superdiagnostic / 500 Hz 5 5000 0.839531 0.672561 1426.656572\n", + " BiLSTM C -- diagnostic (27-class) / 100 Hz 27 1000 0.814146 0.196637 277.374086\n", + " BiLSTM D -- diagnostic (27-class) / 500 Hz 27 5000 0.799358 0.195175 1379.068789\n", + " ResNet-18 B -- superdiagnostic / 500 Hz 5 5000 0.791043 0.528033 80.286776\n", + " ResNet-18 D -- diagnostic (27-class) / 500 Hz 27 5000 NaN 0.044667 74.486725\n", + " BiLSTM B -- superdiagnostic / 500 Hz 5 5000 0.793496 0.631577 262.473322\n", + " BiLSTM D -- diagnostic (27-class) / 500 Hz 27 5000 NaN 0.116216 261.981205\n", + "\n", + "Combined results saved -> examples/ptbxl_ablation_results.csv\n" + ] + } + ], + "source": [ + "# -- Combine results from all completed phases ----------------------------\n", + "import pathlib, pandas as pd\n", + "\n", + "_csv_dir = pathlib.Path('examples')\n", + "_phase_files = sorted(_csv_dir.glob('ptbxl_results_phase_*.csv'))\n", + "\n", + "if _phase_files:\n", + " results_df = pd.concat([pd.read_csv(f) for f in _phase_files], ignore_index=True)\n", + " print(f'Loaded {len(results_df)} results from {[f.name for f in _phase_files]}')\n", + "else:\n", + " # Fall back to in-memory results if no CSVs saved yet\n", + " results_df = pd.DataFrame(results)\n", + " print(f'Using in-memory results ({len(results_df)} rows)')\n", + "\n", + "display_cols = ['model', 'config', 'K', 'T', 'roc_auc_macro', 'f1_macro', 'train_time_s']\n", + "print()\n", + "print(results_df[display_cols].to_string(index=False))\n", + "\n", + "# Also save the combined CSV for git / report\n", + "_combined_csv = _csv_dir / 'ptbxl_ablation_results.csv'\n", + "results_df.to_csv(_combined_csv, index=False)\n", + "print(f'\\nCombined results saved -> {_combined_csv}')\n" + ] + }, + { + "cell_type": "markdown", + "id": "2ba48914", + "metadata": {}, + "source": [ + "## 8. Visualisation — Ablation Results\n", + "\n", + "Bar charts comparing macro ROC-AUC and macro F1 across the four configs." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "88814df1", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABv4AAAIDCAYAAADMsGn8AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8ekN5oAAAACXBIWXMAAA9hAAAPYQGoP6dpAADHaklEQVR4nOzdB5gT1frH8XfpHZQiVUBFEKQ3GyCKICBFRVCqDZWLDbCAYkVELlKuIooi2C4qKkXpSlFs9KqAoDQpUlSq9Pyf3/k/k5tks5Vdks1+P88T2MxMJtMzc95z3hPn8/l8BgAAAAAAAAAAACBDyxLpBQAAAAAAAAAAAABw5gj8AQAAAAAAAAAAADGAwB8AAAAAAAAAAAAQAwj8AQAAAAAAAAAAADGAwB8AAAAAAAAAAAAQAwj8AQAAAAAAAAAAADGAwB8AAAAAAAAAAAAQAwj8AQAAAAAAAAAAADGAwB8AAAAAAAAAAAAQAwj8AQAAAEgXV199tV166aWp/vz8+fMtLi7O/Q+cKR1Lzz77bKQXI+bdfvvtVq5cuVRfM/SKRTr2dAzu3bs3Xb9n8+bN7ntefvnlZC9TWuK6DQAAEHkE/gAAiCHvvPOOK2zxXtmyZbNSpUq5Qrjt27en2/d6BUfnnXeeHTlyJN54FQDecMMNqZr3qFGj3Hol16FDh+yZZ56x66+/3s4991y3XIl9fsKECXbZZZdZoUKFrHDhwtaoUSObNm2aRYM9e/bYQw89ZJUqVbLcuXNbsWLFrF69evb444+79fRo/wbu98BXrly5kvwejpv/FVSGe/3444/xpv/+++/tqquusjx58ljx4sXtwQcfDNonkeKtx6effhrpRYlaoedLzpw57eKLL7ann37ajh49mm7fq2CGvq9Vq1ZnVFAfSueOzqWUFLKvX7/eevXqZVdccYW7Rui7tQzhaJsMGjTIKleu7I53XRtuueUW++mnnyzSQq9deuk62bhxY5sxY0ayP79kyZIzuhYndv0IfYUu97fffhvv+3w+n5UpU8aNT+01ULzvuPvuu8OOf/LJJ/3TpHcwKhqFO37CvVIbxEwv+n3TctWvXz/iy5GS39lYomMi9LrToEEDmzRpUqqOLe9+yHtlyZLFSpQo4c7/cPcgocaMGeM+9+6778Yb98MPP7j5PfLII2lWKQcAAES/bJFeAAAAkPaef/55K1++vCuwVYGBCiBUuLhmzZpkBYJSa/fu3fb6669bnz590rRgqUiRIq6wPjlUeKn1P//886169eqJFoa/+uqrLmDTsmVLe+mll9z20rZSQctnn31mN910k0XKn3/+aXXq1LEDBw7YnXfe6Qqc9+3bZ6tWrXLbuEePHpYvXz7/9ApeqOAnVNasWZP9nZn5uPHoeKhbt27QsIsuuijo/YoVK+zaa6+1Sy65xIYNG2a///67C9hs2LAhWcEGRF7g+bJ//36bMmWKDRgwwH799Vf773//m67fPXXqVFu6dKnVrl07TeanwN9zzz3n/k5uSykVBL/yyisumKfjWMd0Qjp16mSff/65de/e3WrVqmU7duyw1157zS6//HJbvXq1lS1b1iLNu3YpYPbHH3+4a1eLFi3siy++CAqc/fPPP65iQ1pfi7UN33///aDP9evXz12jFVxLiK6r48ePd5UIAn399dfuuqLj9EzpO/R7pmtijhw5gsZ9+OGHbnx6BryjWcOGDePtNwVJFdS95557/MMCf2ujga5RChgtWrTINm7cGO836mxJ6HdW21XnWujxFmtq1Kjhv2/RdXH06NHuvlHXhaZNm6bq2NJnNez06dO2bds2e+utt9z21L7W9yXkrrvuckE/Bfd0zVNFNjlx4oT7PlUk8H4nAABA5kDgDwCAGNS8eXNXUOkVNKhgZvDgwa7wtn379un2vSqUGDJkiP3rX/9yrSIiQTWkd+7c6VphqRVHaBAnNPCn8Soc9lpiqGBXLVpUgBLJwN/bb79tW7dute+++861ygmkAujQAjUVZnfu3PmMvjMzHzce1dhv165dotM88cQTds4557igcoECBdwwFcIqMDJ79mxX4IfoFnq+6NjTeaZAiIK5aoWaHlQh4eDBg64AVudVpLRu3dr+/vtvy58/vwtaJxT4U4vfiRMnusJknaOB58k111zjxqnlYKQFXru8QnDtQ+3PwMBfaiowJOdarPmGXn9VmUTX0MSuywpOfvLJJy4IGxiQVDBQgeG0aIWn1u861lQpoU2bNkGtljdt2mQ333yzCwxmRGrZ3KRJE9diP9SuXbvcPUBiLSYvuOAC9wp03333uWFn+nuaXrTPtO907t17770uCKgsB9FErcvSs7JQtNC9YuBx0rVrVxeEHT58uP84SumxpfsPXTc8bdu2dS3zdJ1ILPCne1gFHjWNrtfjxo1zw4cOHeoqb+kakDdv3jNcYwAAkJGQ6hMAgExAhbSi1iyB1q1b5woZlBJThTQqOA0tjFZtYRVSV6hQwU2jWsRqnfDll1/G+x6lylNrC9VYTopqM48YMcKqVKni5qtCWhVi/fXXX/5pFExROjm1fvDSHyXVokUtJBT0Sw4V2io9U2D/NgrkqLZ1YgEobRNtszvuuCPsPLU+gSmVFGDUeipNngJG2s4q2E2M9pVa6ykNaSgt49koVMtMx00gBWZOnjwZdpz2r9ZBBXde0M8r8NNxo9SxCdE6qnA/XK17pV7Uco4cOTLF2y81FOxREEPz1bGuIEO49KBapvvvv98VOqp1mKb1WnqJChpV0Kll1DZOKF2kWrjp+/R5tcx644034k2jFk4q5FThpM5LBZSOHTsWb7oFCxa4VJMKoul8V0sGTasWJqml9dT2VYux3377LWicAiY6F7RcCpSphXBomksFGXQ9KF26tFsmVUBQkCV0e+jzWlZVNli2bFmSy6Xg3MMPP+zWUfPVtlYwXueBaP5FixZ1f+t48Y73pPqx07mrZUnOuSChgVCtnyR2nVTQJaHUc7NmzXLj1PrR+x6tp85draf2/3XXXZesbRSOAkFattDWfanp4y89r8W33Xabaz0YeF4fP37cnYsdO3YM+xlVbNE1WNeI5AYn1GIo9DdHAaOqVasmmO5P57yuC9qOXgAzXOrnyZMnu3loO+h/L9Vhaq7dKaFjRi2ZFNgLTRWt86ZZs2Yu0KLteSbU4lO/59pWusZrnyvQvHLlynjTpua3fsuWLe681rbTb0RStN80b12H9DucVAtlBaHUKlf7UanMFQRKioJGCuzrPNT5qGt/6O9zYr+zCfXxl5xjSq0HtZ01XL8H+lvXOO2DU6dOWXJbImo/aNlLlixpPXv2dMdEIC/V5c8//+xSA3tpjP/9739bauneU61/FZxNK979bHJaKms/Pfroo67Fs/aLlkOtoVWJLVyKaQAAENsI/AEAkAl4hc8qLPKowEYFmWvXrrW+ffu6WsEq2FZBS2DBnQpJVaCsghEFJZS2TAXu4QpkvVYgKjhJqhBeBX4qoLjyyivtP//5jys0VwGWCuu8Ak0VEqogXanVlDJJr8TSpqWUCn5mzpzpCuu0jVSYqgIipf5Tf04JyZ49u914442uwDO0UFHDFKy49dZb3XulaVL6SBXIaH20LVUje+HChYkumwrqVMgVmioqMWodEvpSoCq1MuNxo/l5hfla9tC+vxTwUlAwsHWRqNWP9uvy5csTnLcKulXwGi44+PHHH7vgggJaKd1+qaFtV7NmTVco+OKLL7pCRX13uP4tFWhTOrNu3bq55dK+V2G70j2qpZJay2mfKH2kWsyGUsG+WjapwFf7WPtG6RHHjh3rn0b7XelTFRBSoFHrq+997LHH4s1Phccq6Nc8dO5q3+t/BV/PRLjjXceOCthV+KyA21NPPeUKihUkDAzqqdWUjn8dPyp01jmvwIRaioXStUXfkVQASuuo4+WDDz5w66ZtreNeKSR79+7tplGBuFcgr2uSd7ynVWvlCy+80O0vnecKVio4q5RzCqgogOtd58LROaLWLQkd79oG2nei+Wk9tB21/VTIr+CAjrXk0DVb1zv1xadrlI4N9b2XFq22UnMtTi4FTxRIV8vEwECz1iehbav9r+BCSvpfVRBR+8/rh1TXMJ1HCQUXFThQK29dk9S/o1ozq4WZjvvAAIpaOGufKcij6fQ7oHMgXJ+Jybl2p4QC1/q9VaUCBcC8eei80fVJ56cqpJxpuklVBND3aJ5qDax10O+Azk2ld/Sk5rdeQWUFZbUuCpIlp6WxtpnOb62XAsdKMb148eKw07733nvuuqH7Gh03CvrptzapAKPORR33at2uc18VD3Sd1zXfk9Lf2eQeU6LzTceFKqaokoq2tZbjzTffTHL76Lqq9VXAT5/R8akKKmqJH3qc6bdJLWKVFl7Tal3Ub2dqU3Zr/krP6aXZTG2gWdcypUDX/YS2k+5Hkpt1oX///u66q/NN11X9tusYAAAAmZAPAADEjHHjxvn08/7VV1/59uzZ49u2bZvv008/9RUtWtSXM2dO995z7bXX+qpWreo7evSof9jp06d9V1xxha9ChQr+YdWrV/e1bNky0e995pln3PfqO7/++mv397Bhw/zjy5YtGzSPBQsWuGn++9//Bs1n5syZ8YZXqVLF16hRo1Rtj8WLF7v5abuE88cff7jtoGm8V5EiRXzff/99kvOeNWuWm/6LL74IGt6iRQvfBRdc4H/fpk0btw4ptWvXLrff9B2VKlXy3Xfffb7x48f7/v7773jTduvWLWgdAl/NmjVL8rs4bny+7777znfzzTf73n77bd+UKVN8gwYN8hUuXNiXK1cu37Jly/zTffLJJ+67vvnmm3jzuOWWW3zFixdP9HtGjx7tPr969eqg4ZUrV/Zdc801Kdp+4cybN8/NX8uZmCNHjgS9P378uO/SSy8NWgbRvHQMbNq0Kd46aF0PHDjgH96vXz83PHBa7QMNGzp0qH/YsWPHfDVq1PAVK1bMfa+MGDHCTTdhwgT/dIcPH/ZddNFFbrjWK6FlF+2vuLg435YtW5LcRjpf8ubN6447vTZu3Oh7+eWX3ee1DXQ8y8GDB32FChXyde/ePd65WbBgQf/wv/76yy3jkCFDEv1ebQvvWvDcc8+5zyxdutS91zYLnceAAQPccv7yyy9B8+nbt68va9asvq1bt7r3Wgd9VudTaug7Q/dboIULF/ouvPDCoOtK7dq1fTt37kxy3jomsmfP7vvzzz+D9r+265133ukfpu3Zs2fPFC+7d+0KfemYfeedd+JNH7qdvM/rtyItrsWBErsGBX7vyJEjffnz5/cf17qONG7cOOw1MPB6n9D+Cl1fbVdt/xw5cvjef/99N3zatGnueN+8eXPQdVh0Turc1Lnwzz//+Oc1depUN93TTz/tH6bzuESJEkHbYvbs2W46LXtqrt3aZim5dn/++ee+bNmy+Tp06OB+l5o3b+6u2/Pnz/elhs45bWOP5nnq1KmgabTtdYw9//zzKfqtD9zWa9eu9ZUsWdJXt27doPMjMUuWLHGf//LLL917XatKly7te+ihh+Itn6bLnTu37/fffw86lzW8V69e8ZYpULhrrO4lAu9tEjvGvd8h77qdkmPKO74Dt63UrFnTXXcSs3v3bnecN23aNGif6RzTPMeOHRvvt+m9994Lujbpd033AknR8a3v8X5HVq5c6bv11lvdPB944IFkHVuBvP0Q+tK1UudJSnj3p3rptzWp3yMAABCbaPEHAEAMUp83agmiWtqqCa8WWar5rtrZXo3iuXPnuhrEapXitQ5TyjHVslYNcq81gVKmqQWFhiWHaq+rlVJirbfU0qBgwYIulVtg6zS1CFLLmnnz5tnZoNROFStWdC2ZtExqgaQUdqpNv3HjxkQ/q1rzSlWlliuBtceVsq1Dhw7+Ydp+aiWTUI38hKjmv1KJqca25qvUiGqdodRbAwYMcCkJA6lGuL479KV+ppIrMx83SkOp9Hpqsab+z9Sa8ccff3QtWdRSwuMtm1KIhdI+SKrFoo4t1cAPPG7UCkOtyEKPm5Rsv5QKTNGo40stjNTyMlyLQrXEU8skT/369d3/akkRmC7SGx6aKlPrq9YHHrVU0Xu1aFBrHZk+fbo79wL7V9T5qVR+iS374cOH3TGg/adzIrEWl4H0OR3reinNnlqYqSXSlClT/Kl/df6oJYpa1QQeb2qxonX1jjctj9ZJLXaSm7bQa/UXLu1r4PGufaLpAr9f56laxHzzzTd2Nuj71XJJ54RaPqkFjlpTqYXo0aNHE/2sjmm1glHLnsBWYtquoce7WkYFtqBKCbVE8q55aiGpa4n6KQ383tRK6bU4pXQ91XVDaU91XdX/CbXE81pO6TsDz8nk7EO1bPJaFir9pM4ZteoKpdZ6OjfVwiswjalavqpFlNcqWClH1Tekfj91Xfbo+qxWb2fr2q0UhvrtVsvSiy++2B0Dur6qlVha0LVefdaJzjv93mmZde8QeL1MyW+9rvlaPu3Dr776KqiVcVKt/XQ86vgWXat0Hn300Udh02CqBabSV3rq1avnrl263iYm8BrrtabV8urarvcpldxjKpDOt0C6Fob+toTStlQWBqUN9vaZqNWcWvKHfo/2Y2CrYF3HtY2S+p7Aa5n3O6JWgzrOu3Tp4lqHp5b629QxrHkr5aqOaf3Wql/H5FIqZ2/96XMYAIDMi8AfAAAxyCsEVSBDKfZUaBMYqFBQSwWHSlvnFVp4r2eeecZNo0IaUSpAFdKq8EF93CjN1apVq5JMtaQ+r8L14yUKZqjwSAWnod+vVGTedydEBVyaf+ArNf34qOBaqfhUkKqAg1KPqfBe80oqNaSCGSqMUaDA64dMhcwq5A4s0FbaKBUuqTBJ/bUpBdV3332XrOVTIEQpt1TAqj7glK5J20h94r399ttB0yoYoYBA6EsF9sndZhw3wRQQUj9tKpT2ClW9AtFwfc8pCJJYn2eiYLECaYHpD1VIreMpMD1jarZfSii4oJStKoRVIaGXMjJcoa5SjAbyCvkVIA43PDT4pZRrCiIH0nqJly7T6+cqsL9NUeF6KJ2z6gdKy+31/+QV8nvLr0BK6L5OKFCuwlWlTtTxE7j/vKCrgvyhx5sKZb3jTeeICnqVHk6F8gpiK4Ad+p2h20qF0wqsJxSs1PcrFXHod+u8lqSO96S2QXJ4AWGlo1R6Pp0PSvuqwulvv/3WbbvEqDBcBfuBgW79rfNA29Wj7aVgiI4pXSt1LUhu4bvoM941r1OnTq6AX8EnpY090z7eUnotTilvnyoYp98QXWsCA+BpRcFEHe86fxTATSi4qHMxoXNP+9Ib7/2v37VQoZ8902t3UhRs0XGqddNvlypvpBX1Tah+8rSeOtd17Gq5dT0OvF6m5LdewUpVmlBq48C+YhOj40IBPgX91Hebfo/1UiBPqTvnzJkT7zPh9o2uvQn1xerRcuuY1HVbAU2tr9J+SmoCf8k9pgKvz17fpR4FR5OqWJHQ9yigp/SXod+jSk2hvznJ+R6Ptr3OKQUcFZjTPZPSqyZ1H5AY/X5o2ytIrt857VcdKw888IB/mtDremCFIx0nqjCj313tO6WfBQAAmVPSPQQDAIAMRwVPXh9kqvGtPlRUyKcCSxVMqSBL1MrF62MplArhvUII9UOjAJcKu8eMGeMKwRScUYuKcPQZ9Z+nwtzQWtui71cBoGqvhxNa4BNKfaiof6lACs7oO5NLhcoqVA/tM0bBBG2v5ATn1AeT+o5Rgb+2s4I5KsRSYbdHAQVtdwVa9H0qMFcfViowTqy1TyAVTKmwTi/VkFdhnrZdQts/tduM4yY+BSIUOFALMRXQKgAgCgCE0jAVtiXnuFGQWa1lFJjVcaNgoAqUPanZfsmlvvNUMK7v0LGodVK/lQriKPgQSkHlcBIafqYtoBKjQk0ViKr1qQradb6pcFotTVVI6h2jCi5pGye0XF6g3KPjWfNSS0QF48Sbl/quKl68eLxlUbDWoyCeCvMVUFFhvoLjCpSphaz6Ukyo1Z/2qa4D6i8rlL5f6xqun8PA4GlCktoGyaHrlYIKoYEUBVp1Pug6qf70EqOKEAMHDnSF4irA1vZVK8rA7adWbwrcqJ9EHe9DhgxxwVQFwpo3b24ppdYuCpCoLzkFnapUqWJpIS2uxeHoOqtWSSrE1/qqwD6taR8qcKUWeqq4kNw+w9LCmV67k6JrgVrAqnWTji+1dFcL1bSgPlB1Pqs1uFp4eq2pdM5714iU/tar0tC7777rtkdga+jE6Fqi3xgF//QKpXmlResu/e7o90jXQ/VpqN9ABc7USlDXq8B1Ti8J/bacre9J7nVSv9mBvyPpQfdeCjDqXkD3Ifq98+5DPPrt1u+f6JqnyiT6LdLvogLQ+l1PrBUxAACITQT+AACIcSrYUAG0CkFHjhzpCsNU81lU2J+cQgsVdKkAWS/VzlfAQC0yEivs1HgFVBQYC3XhhRe6GtJKrZdUzejQ2tiiQnjVsg4UGGxLDhVmS7j0WGq1d/LkySTnoe2gAhgVsCtIpoK5cC0FVVCjwm+9FERSyy4VhCuFZGDaq+TQvlON9HCBp8SkdJtx3PwvQKx9pMI3ufTSS13AQqnLAgvOtV8VyEtOYbqCqirs9VpB/fLLL0HpRM9k+yWHCqS1TgpQBbboTKr1VmopfaNXYOnROouXrlApB9XiSwWugftOBemBVq9e7T6rQvOuXbv6h4fuVwXyQoclRudxr169XAG9UryqNaSON1HAIjnHu6ZXazi9FGxSUHfo0KEu9WRirf60TxWMCTc/7fekvjvcsZ6abZCS66T2k4Yl5zqp6562q447tYg8cOCAC36H2wdKBaiXWoDVqlXLXSdTE/gTb9m0DdNDaq/F4dx4443umqBjL7B1ZFrSNVPXHh2P2qaBFQ0Ceek/de4Ftsr0hnnjvf/DpSMOPW9Tcu1OKQWIVVlEraQVoNN5rOuprp/hUgWnlFrA63cwtGWnWmSHbsPk/tYrsK3fER3rCoYnJyijwJ6uRWqZH0oBcgXNVTEkXKvlQLp+JpYm9osvvnCBYQVQA1t7h0vHmtC1J7XH1JkK/B7vfkW0L9RKMr2DdOkl8FqmYyz0uu5VbFDlJmVfUMtsvRSk1W9l7969XUWFwJS8AAAg9pHqEwCATECBFLXmUqsSpSNU4ZEXXAlXaLlnzx7/3+rPJpACIGrVFS7VYWiLEH2HCuVC+4FScESFxqo9H66AQwVqHhVyBL4XFaCFprRMbh85Hq2Dau2rkDWwdrf66FGLqIRa6QTS55WSTQVlahWkZQ9M8xlu+6nmvFLQ6TsVYEyI+rtSsCTUokWL3DzDpcxKTGq2WWY6bgKX3aN+vVT4qVYUXn85KjjT51R4rv64PNr/KpRT+tikqDWPgjJq6aeWGzomVCAfKLXbLzkU1FWBbWAwR6nf1EIgPWjfBAZyVQir92rloz6+ROn5FCBUIbvnyJEj8Vrkei00As9Z/a1WDqFBpNB9nRSlUlO/gl6/mNpHatWmFj/hzlXvmNFyhh6rCnSoQD+p/aXAn44HBS1C6Xj/4YcfXIA2lI5trzBYy+wNO9NtkFCrwtAWRjovdH1KznVSLaGUrlbXWr20XApie3QchqYP1LVGrWdTe7xrf6nloM4tff+ZSOtrcTg6v5VKVEFgtRxNjK6969atS/T3IyFqra3AgAJkCVGrb21/BZECt79atq9du9YFEET7UcFtBRYC95+CEuqzNLXX7pR46623XKUUBfu8dVIrNbV+UkvUwJTKqaVrTmgLMPXl5vVnm5rfel1/dW3T/YOC/l4r44QolaOCezfccIP7TOhLKW31exQ6H6/VV+Axq+M5sWB6uGus9m+4iiHhfmfP5Jg6U7rGabsrFW/g8itoq3VIq+85m9S6XWlEVXFJ21BCr+teC0D9hmm9X331Vfde9y3a5mpt7aVqBQAAmQct/gAAyCTUR5iCEurPTmkUVWtcrdRUIKsUY6odrdYdKmhW8EtBD1HBlQIxKqBXDXq1dFLhvAqakqICRtWUDxfcUesGtShTKykFVtSKTLXTVaCmQnyvjyN9rwpEX3jhBRf4UMFHaI3xUGqhpsIoBRJEgTmtk1cwouCNgg5K3aUUikprpZr5KjhTai4VsoVrgRWOAn0qZNG6aluGFjJr3VRgo5YOau2iQi4tnwqgFBhIiAJJquGvliDaBirM0mfHjh3rAlihhTgqPE2oZZHmEdrHWnJlluNG+1EtJa644go3rQquVTAbGAjyqAWHptPyqEWJ1lstu7Q8119/fbK2q76vc+fO7nhTgCk0td+ZbD9R6yoFB0KpkFnHngrHtaxqaaLWVdqv2k5p2Y+gRwEcBXIVXFQgScEf7T9tX+0/0bGk80Kt+JYuXeoKMnUOeEEtj9LPKaimAIYKtBWY07omt0+mxBQuXNi1rtQ+0bmmc1nHkPoPU+sztVLTdUP9iKkPOZ3TWma1oNE1RMEN7Te15FHrG50X4Vq2BdK1SCk/w6X91bmngnwV9iuQoWNBASi1etSxoO2pFkc6bvW92q7avjpe1DJVr4SoENwrHPbSGmtddBzq5R1nCkKpNYkCk+ofSy0h1a+YptU+uuuuu5J9vCvloa5d+owXSBddd9XXls5dtcBVEEytwxYvXuzOq+RQAME73nU8K7WdrgsKCiWnDzVdV5WeMZT2TUqvxakVrtVnOPptUrBNLZgSa7kVjrZvUq2cdU7qfNW5oGuc0rLqWNb1Vd+nIJtH12JdT/SboN9TBSl0XOmYCWxpmZJrd3LpuFGKTy1n4HGioJp+13UPoNa3qlShfZZaOv90/Ot7dN3X+afjIbBFWWp+63UO6Ddby6drh1JpJvQbpeuA1jehvgt1XurapOUKrHyka7r2jYKgCripEo+ucwmlD/bWQ9tL5772mfajAqz6XQyt8JPc39mUHFNnQttA54eup/p90/ZS6z9d0+vWret+c6Odru26BiqAp3tYBS31+6YAXmItLPWbo3SgOhcC+99V5Qyl+9SxqN8RbYfAyivad6GUEl19pQIAgAzOBwAAYsa4ceNUxdm3ePHieONOnTrlu/DCC93r5MmTbtivv/7q69q1q6948eK+7Nmz+0qVKuW74YYbfJ9++qn/cy+88IKvXr16vkKFCvly587tq1Spkm/gwIG+48eP+6d55pln3Pfu2bMn3vc2atTIjWvZsmW8cW+++aavdu3abr758+f3Va1a1ffYY4/5duzY4Z9m165d7rMar/lofkkpW7asmzbca9OmTf7pTpw44Xv11Vd9NWrU8OXLl8+9Gjdu7Js7d64vuU6fPu0rU6aMm7e2VajRo0f7GjZs6CtcuLAvZ86cbvs/+uijvv379yc631WrVrnpatWq5Tv33HN92bJl85UoUcJ3yy23+JYtWxY0bbdu3RJc39B1Dofjxuf7z3/+45Y3cFt37tzZt2HDhrDTL1iwwHfFFVf4cuXK5StatKivZ8+evgMHDviSS9Nq+bVsH3zwQbzxydl+4cybNy/RY0HLLW+//bavQoUK7pjUvHUMePsjkN5r3QLpeNLwIUOGhP3uTz75xD9M271KlSq+JUuW+C6//HK3vXR+jhw5Mt6yb9myxde6dWtfnjx5fEWKFPE99NBDvpkzZ7p5at6en3/+2dekSRN3vmq67t27+1auXOmm03okRedL3rx5w47TsZ01a1Y3TeB6NWvWzFewYEG3/DoXbr/9drdOsnfvXreNtB01X01Xv35934QJE4Lm7W2LUH/99Zf7TLhtevDgQV+/fv18F110kS9HjhxufXXcvfzyy0HHwvfff+/OCU2j+WhfJsbbh+Fe2j+B/vzzT1+vXr18F198sTtetAy33nqr77fffvMll84jb/7ffvtt0Lhjx465a1316tXd+aptqL9HjRqV5Hy9a1fgS/tI1/TXX3/dXZ8DhW6bcJ8PfG3bti1F1+JA2tcJXXcSu+YG0r4IvQZ61/ukrusJnb+hEroOf/zxx76aNWu6fa717tSpk+/333+P9/nPPvvMd8kll7jpKleu7Js4caJbxtDjKLnXbm2z5PzOy+rVq/2/S6GOHj3qW7t2rS+ldPwFnv+aT58+fdw+13JfeeWVvh9++CHecibntz7ctj5y5Iibj65nP/74Y9hlatWqlTuuDx8+nOBy65qk32NdjwKv0UOHDnX3KFqmBg0auGtloHDX/c8//9xXrVo1953lypXzDR482Dd27Nh4x11Cv7Peb0HgdTu5x1RC1+dwy5kQ/b7oeqztcd555/l69OjhrrPJuR4ndOwm59xM6bEVbv0CX5pev5uhvyWh9DtRunRpd90Ldz7ofqNkyZLuGuaN9+6xwr2uvfbaFK0XAACITnH6J9LBRwAAAAAAAAAAAABnhj7+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AAAAAAAAAAACIAQT+AAAAAAAAAAAAgBhA4A8AcMaeffZZi4uLs7179yY5bbly5ez2229P0+/X/DRfIJwJEybYueeea4cOHbJYddlll9ljjz0W6cUAAAAA0gTPmBmXnruKFStm//3vfy1WvfHGG3b++efbsWPHIr0oABAWgT8ASMQ777zjHja8V65cueziiy+2+++/3/744w83jR4GAqdJ6KV5SejwvHnzWuXKle2FF16wI0eOJLo8R48etYsuusgqVapkx48fjze+efPmVrBgQduxY4d7P3/+fPcdn3766Rlth3r16rn5vP766xYpWic9/K1YscJinbffvFfWrFndg1O7du1s7dq1CX5u6tSpdv3111vhwoX9x+ojjzxi+/btS/S7brrpJitevLjlyJHDfU+rVq1s4sSJKVrm9u3bu2V9/PHHEz2XlixZEnb8DTfcEPbBWsf88OHDrX79+u7YDjwHf/nllySX69SpU/bMM8/YAw88YPny5bNYpe3+2muv2a5duyK9KAAAAEgEz5j/j2fMyD5jBr5uvfVW/3SLFi2yf/3rX1a7dm3Lnj27G59S//nPfyx//vxB8401CgzrfBk9enSkFwUAwsoWfjAAINDzzz9v5cuXdw9F3377rXs4mT59uq1Zs8ZGjBgR1JJIwz/88EMXrChSpIh/+BVXXOH/+7rrrrOuXbu6v/XZBQsW2FNPPWUrV660Tz75JMHl0EOhvrtp06Y2aNAgF9DwfPTRRzZz5kx79dVXrWTJkmm27hs2bLDFixe7h0/V2OvRo4dF6qHsueeec8tRo0aNoHFvvfWWnT592mLNgw8+aHXr1rUTJ07YqlWrXK1CPbDpuFOgLpACfEOHDrXq1au7IJBauC1btsxGjhzpjo05c+ZYxYoVgz6j40fHdoUKFezee++1smXLuiChjuGbb77Z7e+OHTsmuZwHDhywL774wu0bHfsvvfRSqh4QQ6l2rwKZS5cudYFBLYuCd+vXr3fr9Oabb4YtnAik5dL099xzj8WyNm3aWIECBWzUqFFunwIAACC68YzJM2YknzEDBVbA1LE2ZswYq1atml1wwQXJqmwZSM+uCvz16tXLVWCNVTpvunXrZsOGDXOVTNPi+RcA0pQPAJCgcePG+XSpXLx4cdDw3r17u+Hjx4+P95khQ4a4cZs2bQo7T43r2bNnvOHt2rXzZcmSxffPP/8kuVwdO3b05cyZ07d+/Xr3/q+//vIVL17cV7duXd+pU6f8082bN8993yeffOJLraefftpXrFgx32effeaLi4sLu17PPPOM+549e/YkOb+yZcv6unXrluLl0D7Qd2ifxLqE9tvrr7/uhg8ePDhouI5DDe/QoYPv5MmTQeMWLlzoy5Mnj69q1aq+EydO+Idr3vqMjrvjx4/HW4aZM2f6vvjii2Qt79ixY33Zs2f3zZ07181z/vz5yT6XPC1btnTHRugwnROffvppvOmPHj3q69OnT5LL1rp1a99VV13ly2gOHTqU4s/cf//9bhuePn06XZYJAAAAZ45nTJ4xIyG5+23Xrl2+I0eOuL91TKW06HjixInuMxs3bvRlJHpWPnbsWIo+s2TJEreuc+bMSbflAoDUItUnAKTCNddc4/7ftGlTms1TLbhUSyxbtqQbY6umZ548eey+++5z7/v27Wt79uxxaSayZEnbS/v48eNdikm1uFKKF71PrIWWUj6q5ZHSTT700EOuBmti/vzzT9darWrVqq41lz6rdDKqmepRKzevVuIdd9wRL7VNuP4XDh8+bH369LEyZcpYzpw5XWu3l19+WU8tQdNpPkqrM3nyZLv00kvdtFWqVHE1W6NNgwYN3P+//vpr0HDVUj3nnHNcC7jQWpVKoaMWgKtXrw5Kx6Pav2oVOHbsWJfCJVSzZs3cPk8O1dJVDePGjRvbJZdckiZ9OSxcuNCmTZtmd911l2t9GEr7SfszMTr2tB+bNGkSb5y331X7WWmQcufObZdffrnbTqJzSSmPVJPz6quvts2bNwd9XjWob7nlFtevg5ZFx5lqtf7zzz/xvmvdunXuvChatKj7Hh2LTz75ZLz+S37++WfXqlH78qqrrnLjTp48aQMGDLALL7zQfY+O8yeeeCJsXxLaB1u2bMkUqYoAAABiDc+Y4fGMeXadd9557pkltbTO2m56fgmk7al9sXXrVrff9XepUqVcdwWi5zCdA0pTq0w0ocdEcvapR8eHnrGUQlfPcyVKlHDdW3jP0Xq20z7SvlPrWu9ZS89jMnfuXPfsrWUpVKiQy64SrssNpUPVM/WUKVNSvb0AIL0Q+AOAVPBuGPXgkRq6EdUDjF4qqNdN7bvvvusK/ZPzUKZ+2JROcd68eS6thAI+StlRs2ZNS0sKvmzcuNFuu+021/+bbpYTC+rogUzrphQxLVq0sFdeeSXJFIu//fabezjQzb/SZDz66KPupr9Ro0b+fiQUTPLSF2p+77//vns1bNgw7Dz14NW6dWv38KpUkZqvHso07969e8ebXql11I+B+iD497//7dZBwabE+saLBC/4pMBQYJocpbL0Uj2G46X8UR+A3mcUjGrbtq3re+FMaB/pONQxIvpfAcakUnAm5fPPP3f/d+nSJdXzUIpQLUetWrXCjlfwTg/uStGiB0M9zOk41MOnjl0dEzpmfvjhB7vzzjuDPquAofpLUVoipT5SoFT/e9vaoxSt6p9QD4/du3d3aW+03ZWCNJQCiZrniy++6KaVu+++255++mm3DjqedV7o/ArXX4YePOW7775L9TYDAABAZPCMGR7PmGnr4MGD/uPEe6VlStPvv/8+wecv9b+uYJ0Cp9omChAqQKpgq7ZpnTp1bPDgwe4ZVc9VgUHw5OxT7zs0jSrH6vlI3WEoWLx//36XRjfQuHHj3DOc9r+mUxDvq6++cs92u3fvds+I2rdapyuvvDJeZVDRuvL8BSAqpbqtIABkojQsX331lUsxsm3bNt9HH33kK1y4sC937ty+33//PVVpWMK92rZt69IXJpfS+V155ZXus2XKlPEdPHgw3jRnmoZFqQM1by914OzZs938li9fHjYNi9IqBvrXv/7lhq9cuTLBNCxa58DUMaJtpzQzzz//fLLSsGh+gWkiJ0+e7KZ94YUX4qW6USqZwLQjmi5HjhxBw7S8Gv7qq6/6IsHbb0qhqeNux44dLvXmRRdd5JZ/0aJF8dZ1+PDhic6zQIECvlq1arm/p0yZkqzPJMfLL7/szoUDBw6497/88oub96RJk84o1eeNN97opleKodQaM2aMm8fq1avjjdNwHWOB5+no0aPdcKU08tZH+vXrF++c9tLfBBo0aJDbP1u2bPEPa9iwoS9//vxBwyQwHad3/tx2221B06xYscINv/vuu4OGP/LII264UquG0rHco0ePRLcLAAAAIodnTJ4xI8Hbb+FeCR1XKU31qXSZ2hbhumTQ9tS8XnzxRf8wPevpmNdndA541q1b56bVMZDSfapnaH122LBh8ZbBO+b0OU2jZ+Tdu3cHTVOjRg2Xhnbfvn1B+04pc7t27Rpvnvfcc49bBwCINrT4A4BkUKpApelTzTTV2FNqiUmTJrnUFKmh1llffvmleyktRL9+/VzaD9XGDE0TkhClplCNNFGKQi1TWlKKwY8//tg6dOjg76haqTdUEzShGpk9e/YMeq+aol4H4QlRSg0vdYxq56kGpNZFtSeXLVuWqmXX9ynlpWqoBlLrLm3fGTNmxNu/galI1JG5Ws+pVmEkqZWZjruSJUu6GpCqpahaqIGdsavGpiTVck/jDxw44P72/j/T1n6iY6Fly5b+eVWoUMHVrDzTdJ9psYxebdrAFpKBrr322qD0PWqZJ6qJG/i93vDA4yEw/Y1S/qim7BVXXOGOr+XLl7vhSo30zTffuP2olKCBwnX+7qVV8njnTWgNYh3HolSoobSuWhYAAABEN54xecaMBGUT8Y4T76WUsGlB6Ti1LRJ6/vIymniURlP7RCk11bLTo2EaF7itkrtPP/vsMytSpIj/OEnsGUzPfToHPTt37nTdJigtqXceePtO3SqEO+a0ruruQZlbACCaJN3WHwDgUv8pP7xSpCjnvW4uz6Sfg9KlSwf1O6aUIUrpopz1SsfYqlUrO3TokHt59JAReFM6ceJEly5QfQYo7aBSZHh9wKWF2bNnu8CF+ohTKhaP+nH78MMPXQqO0G2goE8gPehomnApMTxKK6L0h6NGjXKpPHQT70ltmhultlGwLDRopHQu3vhAoUEZ7wb+r7/+SvR7du3aZamVnIcrPZRpn+o4UCHARx99FG+be+voBQATovF6oBYvJWhSn0loPdUPhwJfSo2pIJfSsAQeI+oTT+eMgncJpR8NJ/BBLHAZ9dB3JhIq6Ajd71ovUeFLuOGBx4P6ptD+UUrS0ONEAVrxHlR1jiZH+fLlg97rONX+Vl+DoceOtknoceyta7igIgAAAKILz5g8Y0biGVN95IXrAz0tJfT8pf72Ao8371lLx27oM4yGB26r5O5TpczVuZSc9Lbhnr9Enw+l/Txr1ixX6VOBytB15RkMQLQh8AcAyaAHE+WbT09qfSRqIaSHMnU0rbz0HnVw7T3cKBiimoZqWaU+GFQDTX2NKQiTPXv2NFker8ZlYM27QF9//bV7QEtMcm5+1Z/ZU0895VpFDRgwwNWs04Pcww8/nKZ9DSRGD7zhJFUzVp2Ep1Zyat0GPpSpXzjVIlTfb1dddZU/OOU9aKovuYToAUZBuMqVK7v3lSpVcv+rT4TkCF1P9YWgWpAffPCBe9+rVy/3CqXalnfccYf/IU9UGzIcrZs3TegyprawwXsA1AOjHiaTu9+TOh70kKkan6rR+vjjj7tl1cPf9u3b3XZJ7XEb2IowUEoeIv/++29XwxUAAADRjWfM+HjGTP9nzPSk7az9k1BwM7XPX+m1TxN6/koJrWuePHnSZF4AkJYI/AFAlFDaE/FqYKoVlQI8nsAbyf79+7s0FErhohqH6pBaD3LqkLpv375nvCyqxaZ5KwVLu3bt4o3XA6Ee2kIfyjZs2BBUa061OHUTHphOMdSnn37q5vP2228nGsBISfBDD7DqlFsPr4E1MtetW+cfnxaUFuVseumll1zLv4EDB9obb7zhhqmWsF7q6Fw1IMOlxnzvvffc/+rk3PuMajFqH+szSaXwCV3PKlWquIew8ePHu32nTutD6WFMx4gX+PO2+fr168MG8n755ZeglnE6ngcNGuSCi6kN/HnBQ9UIVRA1rSgYqeV999133Xma0Ha64IIL3P+hncgnl7aZzh+dV16AV/744w93foQexwo8Hj9+PGhaAAAAZF48Y/KMeTaplZ1aZOr5K60ld5/q+xcuXGgnTpxIccA68Jk1lPazviewtZ9oXXn+AhCNCPwBQJRQShWpXr26P2jgBQ4CLV261KWFUdoV1cb0Ajo33nijC7bcdtttZ/zQoeCSHszUn0K4oItStCj1i5ZDufY9et+0aVP/ez0sSvPmzRP8LtXuC62ZqHkriBGY4tC7wdaNfVJatGhhb775po0cOdL1beEZPny4e7hLbHlSIr1TpITSQ4z6IXjnnXfs2Wef9adyUcrJzp07uz7iFOQLrDGp40UpcxRU02c9qumrvkTUx4KCa6GpULSPFUTSsRVuPb/99ltXO/j5558P++CuwJhqZO7YscOlxNGxqlSjY8aMsS5dugQdNwpaan8/+uij/mHqU0T9Gmp67S+1eAykZXviiSdcreWE6Dtz5MhhS5YscamO0oq3fQOPW/2tIGogpbFp2LChjR071vXTF5juJzkpOXUcax1HjBhho0eP9g8fNmyY+199KwbSvhb1NQgAAADwjPk/PGOeHXqOmz9/fprPN7n7VM+86gtd+yk0K01Sz2BqbVmjRg1XwVP72OtyQhU5dXzqmTuU+hfs1KlTGqwhAKQtAn8AEAEKinhpEpXi8Mcff3Q3l7phVVAkIUoxeM8997iAzwsvvBA0TkEHpXJUJ9bqdyw05aJXEzFQt27d4vVnJqppqTSJCQUQFER566233A31TTfdFFTbTeMUsPnhhx/cOqozee9BMxw9UCp4pJZh+j61ptL3hz6QKuilG2+1dFMNSz2k1a9fP15eflHNVNUGfPLJJ11wSt+vG3XVMFUqkMBO1jMaBccmTJjggkFqASh60Fi8eLE7Bn7++Wf3Xv1H6CFEQSftS9WQDKzxqJq22tZqPaj0Pd7DvDpJnzlzps2ZM8e16EuI9pEevkKDTx4dB9r+6pdQQS8F4BSk0zFXt25d9/1aLn23llGphHRsB1IQUw/5Osa0T5WqSPtdtX41X9VITizwp9Sh+rxq5uoYSytqSahjSP2l6EFT/RHqHAuX0uaVV15xtapr1arl1k/Hq45JnTvqOD4xOm61vVTAoMKIRo0a2aJFi9y1QoHQ0NrQqh2s4GLNmjXTbF0BAACQMfCM+T88Y6aeuol4//333d+qQCnecaHnxcSOJWnTpo37vI5HZZpJK8ndp2rVqudIPYPq2UlBZgWc9UyoTDVavsQMGTLEBXEVwLzrrrtcVxUKNqvPQVW+DQ2Yq/uHpOYJABHhAwAkaNy4capS5lu8eHGyPzNkyBD3mU2bNoUdr3GBr6xZs/pKly7tu+eee3x//PFHovMePny4+8ynn34advzLL7/sxk+cONG9nzdvXrzvC3wtWLAg3jy0DNmyZfN16dIlweU4cuSIL0+ePL4bb7zRvX/mmWfc/H7++Wdfu3btfPnz5/edc845vvvvv9/3zz//BH22bNmyvm7duvnfHz161NenTx9fiRIlfLlz5/ZdeeWVvh9++MHXqFEj9wo0ZcoUX+XKld3y6fu0f0Tz03wDHTx40NerVy9fyZIlfdmzZ/dVqFDB7ZvTp08HTaf59OzZM946hi7n2eTtt08++STs+KuvvtpXoEAB399//x00fPLkyb7rrrvObfucOXP6LrroIrdt9+zZk+B3zZkzx9emTRtfsWLF3HYtWrSor1WrVm5bJ+T48eO+woUL+xo0aJDoepQvX95Xs2bNoGEzZszwNW7c2C2/9oum6d27t++vv/5K8FjTcV23bl1fvnz5fDly5HD78oEHHvBt3LjRlxSdC3Fxcb6tW7cmud91zmq4jpOk9oeO9SZNmrhlKlKkiK979+6+lStXBh2XnjVr1rhzpVChQr5cuXL5Klas6Hvqqaf8473zJ9x+OnHihO+5555z20nbq0yZMr5+/fq58ybQqVOn3DnUv3//JLcJAAAAIodnzPB4xozsM2bodOFeodsunGPHjrnnowEDBgQN13rnzZs33vSaZ5UqVcJuq5YtW6Zqn+pYevLJJ/3PUMWLF3fH0K+//proc5/nq6++cvPX9+i5Vc/HOg5DPf74477zzz8/3v4HgGgQp38iE3IEAABIX6rBrFrK7du3d2mKYpXSparm86+//upS1AAAAABAJOi5a9y4cS5TS2A3FLHk2LFjrp9J9X/50EMPRXpxACCeLPEHAQAAxAY9aColjPoGOXTokMUq9eOoPlkI+gEAAACIJPWtp2cvdc8QqxTYVFca9913X6QXBQDCosUfAAAAAAAAAAAAEANo8QcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAzIFukFyAhOnz5tO3bssPz581tcXFykFwcAAAAA4vH5fHbw4EErWbKkZclCHc+U4JkPAAAAQKw87xH4SwY9AJYpUybSiwEAAAAASdq2bZuVLl060ouRofDMBwAAACBWnvcI/CWDan16G7RAgQKRXhwAAAAAiOfAgQMueOU9vyD5eOYDAAAAECvPewT+ksFL9aIHQB4CAQAAAEQzUlWmHM98AAAAAGLleY+OHwAAAAAAAAAAAIAYQOAPAAAAAAAAAAAAiAEE/gAAAAAAAAAAAIAYQB9/AIAU8/l8dvLkSTt16lSkFwWZXPbs2S1r1qyRXgwAAAAAAJCBqYzrxIkTkV4MZHJZs2a1bNmynXG/7QT+AAApcvz4cdu5c6cdOXIk0osCuBuh0qVLW758+SK9KAAAAAAAIAM6dOiQ/f77766iOxBpefLksRIlSliOHDlSPQ8CfwCAZDt9+rRt2rTJ1T4pWbKk+wE60xooQGrphnzPnj3u5rxChQq0/AMAAAAAAClu6adyBQVbihYtSjkXIlrOpQYXKutS+avKurJkSV1vfQT+AADJph8fBf/KlCnjboiASNNN+ebNm106DgJ/AAAAAAAgJVSeoICLyhdy584d6cVBJpc7d27Xrc2WLVtcOWyuXLlSNZ/UhQsBAJlaamubAGmNmngAAAAAAOBMUb6AWCp3peQWAAAAAAAAAAAAiAEE/gAAAAAAAAAAAIAYQB9/AIAz1mzAtHT/jllPtUzR9FdffbX98MMPLi92jhw5rGrVqjZ06FCrU6dOqpdh/vz51rhxY7v55pvt008/9Q9/+OGH7e+//7Z33nknWfNo27atmz4xI0eOdPNbvXq1NW/e3CZPnhw0/ueff7YHHnjAli1bZjlz5rTWrVvbiBEj6HsRAAAAAAAgxsq6KOdCStDiDwAQswYPHmyHDh2yXbt2Wf369e2mm24643nq5mPWrFm2aNEiS08lS5a0/v37W/fu3cOO79ixo1WsWNH++OMPd9O0cuVKGzBgQLouEwAAAAAAACKDci4kF4E/AEDMU02obt262bZt22zPnj3m8/nslVdesUqVKlmhQoVcram1a9f6px82bJidf/75lj9/fitXrpyNGTPGPy5XrlzWq1cv69u3b4Lft3v3buvUqZOVKFHC3dioptSxY8ds3759rlbT/v37LV++fO61YMGCsPPQzZtqTBUpUiTs+N9++806d+7s1q1o0aKuJpRujAAAAAAAABC7KOdCUgj8AQBi3j///GNvv/22u7k455xz7PXXX3fvv/jiC9u7d6+7+WjVqpUdP37cfvnlF1cDafbs2Xbw4EFbuHCh1atXL2h+jzzyiLv5UI2oULrZ0s1J8eLF7ddff/XXUnrhhRescOHCNmPGDCtYsKCroaVXgwYNUrVOWob33nvPrZtqek2aNMmtAwAAAAAAAGIX5VxICoE/AEDM6tevn6vplDdvXhs/frxNnDjRsmXLZq+99po9//zzVqFCBff+wQcfdDcWuvnJmjWru6n56aef3LDzzjvPqlWrFjTfAgUKuJsmzV/TBlqyZIlt2LDBhgwZ4vKQ6yboiSeecN+fllSj6ttvv3W1tVTjqkyZMnbnnXem6XcAAAAAAAAgOlDOheQi8AcAiFmDBg1ynQsr9UGpUqVs1apVbvjmzZtd+gDdLHmvv/76y37//Xe78MIL7d1333WdDutmqGnTprZixYp48+7Ro4f7zEcffRQ0XPPWd5577rn+ebdr187lKE9IlSpV/CkR/vvf/ya5XvreJk2auLzoR44csT///NPd9GmdAAAAAAAAEHso50JyZUv2lAAAZFC6GXrrrbesYcOGduONN7paQyNGjLDrr78+7PTt27d3L9WEevrpp61Lly7x8oor57g6GX7qqaesWbNm/uGad7FixWznzp1h550lS/w6N6p1lRJKraBlUw2uuLg4tyz33nuvqx0FAAAAAACA2EU5F5JCiz8AQKZQq1Yt17nxiy++aD179nQ3OuvXr3fjDhw4YFOmTHG5zjXsyy+/dDccutFQ7SSlSQinY8eOrgbSxx9/7B9Wt25dd1OkFAman1IkbNmyxeU8F9Wu0nB1jJyYkydP2tGjR93/p0+fdn8rN7uos2Yt16hRo9x4zU83fDVr1kzDLQYAAAAAAIBoRDkXEkPgDwCQaTz55JM2ZswYa9u2rd1+++2us2PlMb/kkkv8ucl106HaTbpxUd7yuXPn2jvvvJNgraaXXnrJ9u3b5x+m3OlTp0617du3u/mqg+OWLVvaxo0b3fiKFSvaXXfdZZUrV3bpEZS/PBx1kpw7d24bOHCg65xZfysdg+hmSMM+/PBD15FzuXLlXNoFpW4AAAAAAABA7KOcCwmJ84X21oh4FCHXAb1//3534gBAZqXaOJs2bbLy5ctbrly5Ir04AMckAATguSX12HYAAACZE+UKyCjHZEqeWWjxBwAAAAAAAAAAAMQAAn8AAAAAAAAAAABADCDwBwAAAAAAAAAAAMQAAn8AAAAAAAAAAABADCDwBwAAAAAAAAAAAMQAAn8AAAAAAAAAAABADCDwBwAAAAAAAAAAAMQAAn8AAAAAAAAAAABADCDwBwAAAAAAAAAAAMSAbJFeAABADHj2xrPwHZNSNPn69evtkUcesR9++MGOHz9uJUuWtDvuuMMef/xxu/rqq93w7Nmz+6fPlSuX7d27N+y83nnnHbvrrrssd+7cFhcXZ+edd5717NnTevXqdcarVa5cOfe9v/76q5uvrFixwmrWrGk+ny/Z8xgxYoS1bds2wWnmz59vjRs3trx58/qH3X777TZy5Ej/+8mTJ9ujjz5q27dvt1q1atmYMWOsUqVKZ7R+AAAAAAAAGQ5lXalGWVfk0eIPABCTWrZsadWrV7etW7faX3/9ZZ999pldcMEF/vGDBw+2Q4cO+V8J3Qh5qlat6qY7ePCgvffee/bkk0/a3Llz02RZdSP2/PPPW3orWLBg0DoH3gjp5rFTp042fPhw+/PPP+2aa66xNm3a2MmTJ9N9uQAAAAAAAJA4yrrio6wrPAJ/AICY49Uquvfeey1PnjyWNWtWq1Klit1yyy1pMv8rrrjCzW/p0qX+YcuWLXO1jM4991y76KKL7K233goad9lll1mBAgWsSJEi1qpVq6D5PfbYYzZu3Di3zOGoNtQrr7ziaiQVKlTI1eJau3atG6d10g3fbbfdZvny5bP77rsvVev0wQcfuOW/4YYb3M3ZU089Zbt377YFCxakan4AAAAAAABIG5R1pdwHmbisi8AfACDmFC5c2CpWrOjSHUyYMMG2bNmSZvPWjck333xja9assYsvvtgN27Vrl1133XXWo0cP27Nnj0sj8Mwzz9icOXPc+Pvvv9/dAP39998utYBSDATSfLp06WL9+/cP+52vv/66vf322/bFF1+4G72bbrrJzU9pHT755BM7//zz7cMPP3Q1m954440El13jlQaidOnSrsaTlsWzatUqq1Gjhv+9UkNUrlzZDQcAAAAAAEDkUNYVHmVd4RH4AwDEHOUmV55vpT947rnnXNoD/bB/+eWX/mn69evnahR5L93MJGb16tVuOtUQatSokfXp08dat27txr3//vvWsGFDa9++vatxdemll7obsfHjx/tvLHRDtmPHDsuZM6ebNtSzzz7rbnaWL18eb9xrr73m0iNUqFDBsmXLZg8++KD9888/tnDhwmRvE9WgUj71bdu22ZIlS9xNnW6oTp8+7b9R0voF0nulewAAAAAAAEDkUNYVH2VdCSPwBwCIScWLF7ehQ4faTz/95GomNW/e3G688UaX01sGDRrkaiV5L+9G6cUXX3RpBPTSZwLznms63RwoNYBynns5wTdv3mzTp08PurlSuoKdO3e68WPHjrWjR49a7dq13U1JYL5xT4kSJdxNTt++feON0/w7d+4cNH/lcv/999/Drnu4ddD20E2abtb095tvvmkrV660X375xY3XtPv37w+aj97nz58/1fsAAAAAAAAAaYOyLsq6kovAHwAg5ikXuWoZHT582DZt2pTotE888YS/Q+AZM2bEG58jRw5Xs0q1kEaNGuWGlSlTxt1oBd5c6aZJN0hy4YUXuk6SlSZhzJgx9sgjjwTlTPc8/vjjroZSaEfKmr/SHATO/8iRIy7XuWTJkiVF6+DVFAtUrVo1V0vKc+LECfv555/dTSAAAAAAAACiB2Vd8VHW9T8E/gAAMUc1hJRDfN26dXbq1Cl34zBs2DB3U6RaSGdKNxJPPvmkq22keStnuW5gPvvsM3cToZduLBYvXuym143QH3/84T6nGky6eVFtpFAFCxZ0NzKab6CePXva008/bevXr3fvDxw4YFOmTPGnJjjvvPMS7CzZM2/ePHcjqLQH+/btczna1WmzUiqIallpHXQDd+zYMRs4cKDrnDlcqgYAAAAAAACcPZR1xUdZV8II/AEAYo5qKqkz3xYtWrgbDHUI/N1337kaQXnz5vXXOPJSBHgv3SQklzod1s2VUhmUKlXKZs2aZaNHj3ZpDHRzohsY3bTIV1995XKw6zvatGljQ4YMCepcOJA6R/aWMXDY7bff7r6zQIECdskll/hzqotuoLQcutH617/+FXa+yqeuGxstg9IgKHXD1KlT/Tdl6iD6gw8+sIceesjNR+kgPv/8c5dnHQAAAAAAAJFDWVd8lHUlLM6ncCgSpYNZJ5Pyv+ogBIDMSrm7VZOmfPnyruNfINI4JgHgf3huST22HQAAQOZEuQIyyjGZkmcWWvwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMSCqAn/ffPONtWrVykqWLGlxcXE2efLkJD8zf/58q1WrluXMmdMuuugie+edd+JN89prr1m5cuVcPtT69evbokWL0mkNAAAAAAAJ4ZkPAAAAADJR4O/w4cNWvXp199CWHOrgsGXLlta4cWNbsWKFPfzww3b33XfbrFmz/NN8/PHH1rt3b3vmmWds2bJlbv7NmjWz3bt3p+OaAAAAAABC8cwHAAAAAOkrzufz+SwKqfbnpEmTrG3btglO8/jjj9u0adNszZo1/mG33nqr/f333zZz5kz3XrU969atayNHjnTvT58+bWXKlLEHHnjA+vbtm6xlOXDggBUsWND2799vBQoUOON1A4CM6ujRo64Arnz58q5GPRBpHJMAkHGfW3jmAwAAQKRRroCMckym5Jklm2VgP/zwgzVp0iRomGp2qhaoHD9+3JYuXWr9+vXzj8+SJYv7jD6bkGPHjrlX4Ab1HiD1AoDMStdA1RfxXkCkecciv9EA8P+/07GGZz4AAACkJ8q6kFHKulLynJKhA3+7du2y8847L2iY3uuh7Z9//rG//vrLTp06FXaadevWJTjfQYMG2XPPPRdv+J49e1y0FQAyqxMnTrgfmZMnT7oXEGk6DnVM7tu3z7Jnzx7pxQGAiDp48KDFGp75AAAAkJ4o60JGKetKyfNehg78pRfVFlUfER49VCpVTNGiRUn7AiBTU0GYfmSyZcvmXvj/VgXqT6hGjRrJml4tFNQkf9y4cem+bJmBjkPtg8KFC5OSA0Cmx3Uw+XjmAwAAgFDWFYxyrugt60rJ816GPpKLFy9uf/zxR9AwvdeDWu7cuS1r1qzuFW4afTYhOXPmdK9Q2th6AUBmpWug+uPxXn5DA/5OL31Slm7h6quvdn0GeanA0lO87ZHEtIH/p5TWS6nLVOMnR44cVrVqVRs6dKjVqVPHzsT8+fOtcePGdvPNN9unn37qH67tp36U3nnnnWTNQ9tc0ydGfTBpfqtXr7bmzZvb5MmTg8b//PPPrl8m3Wjq97h169Y2YsQIy5MnT4Lbnt9oAPj/3+lYwzMfAAAA0lNGKeuinCtzlHMlVtaVkueUDP1Ec/nll9ucOXOChn355ZduuOhAqV27dtA0aiKp9940AABkNIMHD7ZDhw659Gf169e3m266KU3mq5uPWbNm2aJFiyw9lSxZ0vr372/du3cPO75jx45WsWJFV2irm6aVK1fagAED0nWZAADRiWc+AAAAILZRzpX2oirwp527YsUK95JNmza5v7du3epPx9K1a1f/9Pfdd5/99ttv9thjj7n+G0aNGmUTJkywXr16+adR+pa33nrL3n33XVu7dq316NHDDh8+bHfccUcE1hAAEGnDhg2zChUqWP78+e3CCy90tXI8mzdvdjVqxo4daxdccIHly5fP/cbs3LnTrrvuOte6oFGjRu5GJNDXX3/tfsALFSpkHTp0cCkOPN98842rraR56cYlNB93586d3Q2C5q2Cy3nz5iV7XVTY2a1bN9u2bZvrk0jU+e8rr7xilSpVcsujmlP6/Qtc//PPP9+tf7ly5WzMmDFBKQP0G9q3b98Ev3P37t3WqVMnK1GihFtu1ZQ6duyYyzuuWk1ad62rXgsWLAg7D20H1ZgqUqRI2PH6bdd20fop5ZpqQunGCACQ8fHMBwAAAKQdyrko54r6wN+SJUusZs2a7uU9wOnvp59+2r3XAek9EEr58uVt2rRprsZn9erVXRNQ7dhmzZr5p9GB+fLLL7t5KC+tHipnzpwZr/N3AEDmULZsWZs7d67ry0e/GY8++qh99913QdPopkQ/wKoR9J///Mfat2/vmuDrpkM/0i+++GLQ9O+//777jG6o/vrrL3/aBf2tH/P777/fpQVQAeQHH3wQ9Nlrr73W3bDohuLWW2+1du3aJbuz3n/++cfefvttd2NxzjnnuGGvv/66G/bFF1/Y3r173c1Hq1at7Pjx4/bLL7+4GkizZ89237Fw4UKrV69e0DwfeeQRt+6qERVKN1taH6VO+/XXX/21lF544QWXd3zGjBlWsGBBV6irV4MGDSw1tAzvvfeeWz/dfE6aNMmtAwAg4+OZDwAAAEg7lHNRzhX1gT9Fa7WxQ19evlX9r7yqoZ9Zvny5i8Jq59x+++3x5qsDccuWLW4a7Xw1FwUAZE7K7V2mTBlX40m5vlVwGPrbopuGvHnzWuXKlV0h41VXXWVVqlRxKQJuvPFGl5M7kGpLqVaQah6pqf748eNdmrGpU6e64ffee6/rmFc/6tdcc03QZ3WTpJsI5TLXzZk+t2rVqkTXQa0h9F1aRn3XxIkT/R1Qv/baa/b888+72l4a9uCDD7obC/3+qQ8k/a7+9NNPbpgKRKtVqxY0b9XI0vrrOzRtaGHthg0bbMiQIS4PuW6CnnjiCbcMaUk1qr799ltXW0s1rrS/7rzzzjT9DgBAZPDMBwAAAKQdyrko54r6wB8AAOntv//9r9WqVcvOPfdcd1Mxffp0V2MoUGALAf3wh75XLZ/Q2lWBf6vWkWpN7dixI2hc6LS6+XnyySfdzYtuRLQ8SiHgLY9uwrx0Alpuz6BBg1zNKqU+KFWqVNANlGpjKX2A5uW9VCPr999/dykflAZNaR+0Tk2bNvWnWgukFGn6zEcffRQ0XPPW93rbTi/V3FKO8oQktA4J0fc2adLE5UU/cuSI/fnnn+7GT+sEAAAAAACA/6Gci3KucP4/bAoAQCag1GHKFa70X2o9oJpCysEdWuMnpdTCwGtZoO/wcnarFpTGhS5DsWLF3N+qQaSX0g3opki1s5TKwFse1VhKjG6G1KdRw4YNXQ0tfZ9qDSldw/XXXx/2M0rnoJdqQiklWpcuXeLlFdfyq0bXU089FZRKTfPWsisNWzhZssSvT5TUOoRSSw4tm2pwaXtoWVSTTLWjAAAAAAAA8P8o56KcKyG0+AMAxKyTJ0/a0aNH/S/VstHNhn7U9eOtWlDKA36mlBJAtZ5US0g3Gcphrvm3bNnStm/f7m5atCzqo0h51z3Kv64ffOUuV+0ppS5Ibt5zj2p16ebOy8fes2dPtwzr16/3f8eUKVPcfDVMfSTphkPfq9pJXuqEUB07dnQ1kD7++GP/sLp167qbIqVI0Py0LXXDp5znotpVGq6OkZOzX/S/aoPpb62/qLNmLdeoUaPceM1P28/rCwoAAAAAACAzopyLcq7kIvAHAIhZyiWeO3du/6tNmzYu5YDyjytvt37s1YnvmVLzfOVRV3oD5etWR8miVAG6GdF7pQtQJ8udOnXyf061spQiQJ+74IIL3DKWLl06xd+vddK8lRJBfRyp7yN1dqy0Cpdccok/N7luOlS7STcuWn/dnHl9KoXSDd1LL73kOmP2KHe68rnrJk/zVc523fRt3LjRja9YsaLdddddLme81lf5y8NRJ8la14EDB7rOmfW30jGIboY07MMPP3Q3iuXKlXM3mkrdAAAAAAAAkFlRzkU5V3LF+c603WcmoCiydrry0ergAoDMSjVWNm3aZOXLl7dcuXJFenEAjkkACMBzS+qx7QAAADInyhWQUY7JlDyz0OIPAAAAAAAAAAAAiAEE/gAAAAAAAAAAAIAYQOAPAAAAAAAAAAAAiAEE/gAAAAAAAAAAAIAYQOAPAAAAAAAAAAAAiAEE/gAAAAAAAAAAAIAYQOAPAAAAAAAAAAAAiAEE/gAAAAAAAAAAAIAYQOAPAJDpLFiwwEqXLh3pxQAAAAAAAADOCOVcCEXgDwAQk66++mrLmTOn5cuXz/Lnz29VqlSxTz75xI1r0KCB/f777/5pb7/9dnv44YfDzmfnzp3WsWNHK168uJvPBRdcYL169XLjNE/NX6/s2bNbjhw5/O81TsqVK2dxcXG2YcOGoPn27NnTDR8xYkQ6bgUAqTV16lSrWLGiVahQwcaMGRNv/IcffmhVq1a1Sy+91G699VY7duyYG/7ll19ajRo13DXAu1bII4884uanz9x555128uTJs7o+AAAAAICMi3IupES2FE0NAEAYDab2TvfvWHDDsBR/ZvDgwe5Gx+fz2fTp0+3GG2+0evXqWdmyZZM9jy5dutj5559v69atswIFCtimTZvs22+/deN++umnoJuqQoUKhb3BUWH/O++8YwMHDnTvFSCYMGGCCygAiD4KyvXu3dvmzZtnBQsWtNq1a7vrR+HChd14XVP69Oljq1evdsMU+Js4caJ16NDB7r77bvv666/dw1D37t1t9uzZ1rRpU2vWrJm99NJLljVrVuvcubO99957LgAIAAAAAIg+0VjWRTkXkosWfwCAmKcaRy1btnQ3LOvXr7f58+e7v5Pjxx9/tDvuuMNNnyVLFrvwwgutW7duKfp+3SypkP/06dPu/eTJk61u3bpWsmTJVK0PgPS1aNEiV5uxVKlSrmZj8+bNXQAvkB60jhw5YqdOnbLDhw9biRIlbO/evW56Bf3kmmuucQFBue666yxbtmzuelSnTh3bvn17RNYNAAAAAJCxUc6FpBD4i0GpTU01Z84cq1mzplWvXt3VTP/zzz/dcFJTAdF/nj777LMul7fSy+ml3N74H92ITJkyxf755x+3fVLiyiuvdLWpdEPzyy+/pOr7K1WqZGXKlPEHDsaOHetusgBEpx07drign0d/Bwbq9JA1cuRId43Wg43SoyjtStGiRV0QUC0BFRD8/PPP4wX4dB81fvx4dw0HAAAAACClKOdCUgj8xWhqqrlz59ry5cttyJAhtm/fPv94LzWVagGsWbPGDfNqouuE/+ijj2zlypVWq1YtGz16tBuu1FRq5rtq1SoXfNBFAchMwTblyfYCairU9XJkb9682RX06jNqDbJ///6InafSt29fW7FihXtpmWHWr18/V4Mpb968dtNNN1n//v2tWLFiKZqH8qW3atXKpTZQCyClT1ChfUrpBmjcuHEu57r2e+vWrVM8DwDR4cSJE/bmm2+6AJ+ChLpuf/DBBy4gqP/vu+8+u+KKK1zAUKk9Az366KN22WWXWf369SO2/AAAAACAjIdyLiQXgb8Yk9rUVKLCqoMHD7q/Dxw44B9OairEgjMJtqn1nBdQU+Cwbdu2brim79Gjhyv4VX9NyrMdqfMU4Q0aNMj+/vtvVwNKqQ/efffdoGBpcijfuVpULlu2zP766y978MEHrWvXrrZ27doUzUd9f3355Zc2fPhw97c6ZAYQndSKL/B+R38HpizR74HujdQvggJ7euD6/vvv3birrrrKvvvuO1u4cKGrMBLYx8GoUaPctUPXAQAAAAAAUoJyLiQXgb8Yk9rUVPL666/b9ddf74YrkKGOPs80NVVat7CSl19+2c2zcuXKFJwh2c4k2ObRuaQObxs2bOje6wdR/TeF9uMUqfN02LBhVq1aNReMPHToUKq2Uyy76KKLrEWLFu66lFo6dhTwLViwoP38888pvrFS/nVdt0h/AEQ3dY6uSiC6Nut6OmPGDJcBIfC6rUwIekjy0jDr3kR2797t/tfnXn31Vbvrrrvc+2nTprl7IXV4rqAhAAAAAACpRTkXEkPgL5NJKDWV6CRVlF7DL7/8cleD4ExSU6VHC6uvvvrK5s2b56bXxahTp04pWv+0DkRqfppWHaF664DodCbBtsCm8DfffLPb36Igm3fM6v+0ag2bmvNUwb4NGza4cy1Pnjz23HPPpcmyxBKlZp0+fbo7x8NRwPfo0aNBL+/ap2vR8ePH3UvXDgWGa9euneJlUKtQXROVphVA9FJgbujQoda4cWP326/7lcKFC7uHKl1/9Tuh9MpK56lrilI933vvve6zui5fcsklrmPz+++/3/V9IA899JC7D1LlEc1z4MCBEV5LAAAAAEBGRTkXEkPgL8akNjXVnj17XOulmjVruuluueUWf8qq1KamSo8WVmq6rFzG2bNnd+9TksM4PQKR+vvTTz/1Lx8yrsSCbR610lDTdY8KhfUDqx+3Xbt2ufzakTpPzzvvPDetXnfeeactXrz4jLdJLHj88cfd9Ucvpd9r0qSJPf3002GnVeA3d+7cQS9RBQBVBFChf/HixV3+cnWgXK5cuRQvj/ZzaEAZQHRS/wTq6Hzjxo12zz33uGG65nvX6549e7prsn43VHEoV65cbrjulTRcr8BW2ZrPli1b/PcTTz75ZITWDAAAAACQEVHOheQiz1AMp6ZSE12lpnrqqafCpqY655xzXGoq1UrX3woqKMhWvnz5oJRVXmoqBcRSkpoqJS2sVFh27bXXJtnCSi2a1OpPATx1ZKrPX3zxxSkORIoXiLztttviBSI17+QEIgP77UF0Cxds0/kSLtgmCrapdan67pOtW7e6zmrVusOjY0k/jKJxM2fOjNh5unPnTv/xqmXSsX42LbhhmEUbXbMSomuNcqJ73nnnHfcK55VXXknW9yX0edXASs0yAgAAAAAAIDKirayLci6kBC3+YkxqU1Ppc2rV16pVK6tevbp988039sQTT6RraqrUtLBSqz0F5BTE07qlJH9weqR6RObor8nb92php+PEs3fvXnfcis4Lr0VIJM7Txx57zE2r9KPqnHfAgAFptu0AAAAAAAAAABkDLf5iNDWVXoGUmsqj1FR6hWrXrp17hVJqqmhqYaXpREEbb9q0DkTqezRvBSIDv0OBSKUIRcYTGGw7ffq0C5R5wTa1aA0MtmlaBYC9/pq8fR9aI0bBQa+lnoJxd911V8TO0/fffz/Z3w0AAAAAAAAAiE0E/pBuUpvOMLEWVgqUqMnw5Zdf7lr9lS1bNqKBSGQsqQ22ycKFC+MNU2vUwBapAAAAAAAAAABEEvkKkW5Sm84wsIVV+/btg+Z59913uwCdWmM98MAD9tZbb0U01SMAAAAAAAAAAEC0oMUfMlQLq5w5c9rHH38cNaketS7q123Pnj3WpEkTa9CggQsQAgCAjKHZgGkWTWY91TLSiwAAAAAAADIwAn/IVNI6EKmgodJ/ApmNgudANPD5fJFeBAAAAAAAkMFRvoBYKncl8AcASLYcOXJYlixZXLreokWLuvekv0Ukb8rV4lrHYPbs2SO9OAAAAAAAIINReYLKFVS+oLIuyrkQyXKu48ePu2NR5a8qd00tAn8AkN6evdGixrOTzujj+tEpX7687dy50wX/0ss///xjf/75p/u7QIEClj9//qDxhw8fdv2CejdoRYoUCbox0w/kyZMnrUSJEu79qVOnbO/evW6YfjRDp0fGpf1YunRpy5o1a6QXBQAAAAAAZDAqT1C5grK6bd68OdKLA1iePHns/PPPd+WwqUXgLwOjT5qMb+rUqdanTx/XfPfxxx+3u+++O2j8hx9+aC+++KKL9qvPwXfffdf1c3jbbbfZzz//7IIZ6lfwtddecxeC5cuX23333eeCItWqVXPT0woGaU2BM/34KIimYzCtab433HCDO37z5ctn7dq1s/Hjx9s555zjxut8aNSokU2ZMsUN6927t11zzTXuM/Ldd9/Zp59+alu3brXPPvvMDRs8eLCVKlXKOnfuHPQ3Mj5d4wj6ITNI63uGDh062Pr16/2VJerWrWuTJ0+O0NoBAAAAQOSo/KlChQp24sSJSC8KMrmsWbNatmzZzrjBAoE/IEIU3FDAYt68eVawYEGrXbu23XjjjVa4cGE3XgV3KuBbvXq1G3brrbfaxIkTXQHe6NGjXSsoTdO+fXsXANFnVQg4atQoq1+/vg0cONDGjRtn99xzj2U2URcUt9jjpVZMj8Dy999/7wJ6ZcuWde+rV69u8+fPd8e+6Ljftm2bO4f0/fpbLfhy5crlbtBeeOEFe+WVV+yOO+5ww+S///2vLV682L1v06ZN2EJzAMhM9wwff/yxf/6qCNGkSZMIriEAAAAARD7gQsVixAoCf4jNdIZpkNIwvS1atMiqVKniWh5J8+bNbfbs2f7ghqiQ7siRI1aoUCHXis9LW6gCPFHt/WPHjvlrAKiFk4J+ohZQzz33XKYM/CFjUwpR77wQ/b19+3b/ex3vI0eOdC1aFMi79tpr7eqrr3bjhg0bZt26dYuXGlRpQVVYHm5+ABDt0uOewaNhs2bNctdVAAAAAACQ8RH4Q+waGkX9d/XxpWlwQ5T+UDX/mzVrZq1bt3bDLrzwQld4p2GTJk0iuIGYpFZ9b775pmvZ4qXs/OCDD6xx48auIPyrr76yLVu2RHoxASDNpMc9g2fGjBl2+eWXu4AhAAAAAADI+FLfOyCAsxbcUIGfavIruOFRH2Y7d+50w+fMmeOGjR071oYMGWJ16tRx/frQPB0ZUcmSJYMKtPW3hnlWrFjhcl2rn0Ed4zfddJNLD6rh6seqfPnydtVVV7lzp0WLFu4zau2nVn/h5gcAmfGewTNhwgTX3x8AAAAAAIgNBP6ADBbcCJQjRw7XT4/665HKlSu71k5LlixxtfrVKS2Q0dSrV8/WrFnjzolDhw651ig6ngNbuqxatcr++usv916F2BUrVrSWLVu6gu3Nmzfbt99+a1WrVrXp06e7aW644QZ7//333d8qDG/VqlWE1g4AouOeQf755x/78ssv47UCBAAAAAAAGReBPyCDBTdUq99LY6j+eqZOnWqVKlVy7/fs2eP+P3nypA0ePJj+/ZAhqfB66NChLnVnjRo1rE+fPla4cGHXek8tWVTY3bdvX7viiitccE8t+e69995E59mvXz/75JNP7KKLLrKNGzfa3XfffdbWB0hLuubrt0AVO8aMGRNv/IcffujOC6V8vPXWW13/bdKxY0f3OQ3X+eA5evSoCxJpfjrn9u7de1bXB5G7ZxBVjmjYsGG8flEBAAAAAEDGReAPyGDBDRXiqTBXw6pXr24FChSw++67z83zvffecwV9avnXoEEDu+666yK9mkCqqPXJL7/84oJ0XgBbBdReC5eePXva2rVrXVo7BTrUp1WgcuXKuZavnqJFi9rXX3/t5jd58mTLnTv3WV4j4MypUkfv3r1t7ty5tnz5cpfaed++ff7xSuOo35L58+e7IJFMnDjR/d+1a1dbt26d+5xagmkeouDhBRdcYBs2bLCbb77ZXnrppQitHc72PYOX5rN9+/YRXTcAAAAAAJC24nwqJUKiDhw44O8fSgUm0aLZgGkWTWadit/yIKLyT7ao0YfT7Gzi3EjEs5MivQQAUkkBOwX7Jk36//P44Ycftvr169ttt93m3uuWTgGgRYsWuf/btm3rAkRXX3110Hweeugh1xdsly5drGnTpm6eCgr9/fffbn7r16+3zCTqfjOeahnpRUAGFq3PLRkB2w4AAABArDyz0OIPAAAgA1DLLqV09OjvwH7f4uLibOTIkS6dpwJ/St8YGvQ7ePCgTZs2zT88cJ6FChVywT8AAAAAAABkXAT+AAAAYoDSOr755psuBa4CemoB+MEHH/jH6/3tt99uPXr0sDJlykR0WQEAAAAAAJA+CPwBAABkAGrFF9jCT397/V7KihUrXF9w559/vmXNmtVuuukmlx7U8/jjj9s555zj0n+Gm6da+6nVHwAAAAAAADIuAn8AAAAZQL169WzNmjUuUHfo0CGbMWOGNWvWzD9eKTtXrVplf/31l3s/Z84cq1ixovv7jTfesOXLl9vrr78eNM8bbrjB3n//ffe3WgfqPZDRTJ061R3rFSpUsDFjxsRLb1ujRg3/S/0hjBgxwo3TOaF+LZUet2PHjq7VrGzevNmlw61atao1b97c9Z8AAAAAAEBGQeAPAIBMLK0LzLdt22aNGjVy09euXdsWL14ckfWKRWrNN3ToUGvcuLHbvmq5V7hwYWvRooVL7anWe3379rUrrrjCBSwUrLj33nvdZ++//34XzKhbt6777Lhx49zw7t2728aNG+2iiy6yTz75xH0eyEhOnjxpvXv3trlz57rr0pAhQ2zfvn3+8errUq1h9dJ4tWpt06aNG3f33XfbK6+84gLqVapU8Z8XOreUEldpczt37myDBw+O2PoBAAAAAJBScT51+IJEHThwwBV2qgCtQIECFi2aDZhm0WTWqeAC44jLP9miRYOKvSyaLLhhWKQXIV1xbiTi2UkWTRpM7W3RJNbPjXAF5pUrV7Z58+a53zkF6pQaUsGkULpdKFeunM2fP9/Kly/vph01apQL/g0cONCKFi1q99xzjz3wwAMuGKiA08yZM2348OE2a9asiKwfkBz8ZmSs341QumYp2Ddp0v8v58MPP+yuS7fddlvYaXv16mULFy5073Xd2rNnj/v7hx9+sOeee85dt3Rd/Prrr934nTt3umD7unXrLCOI1ueWjIBtBwAAACBWnllo8QcAQCa1aNEi18pFKSLz5cvnUtrNnj077LQqFC9evLgL+snWrVtd4bpcc801NnHiRPd3XFycaykouhEpUaLEWVsfAJmPWrvqGubR34F9YQaaMGGCdejQwf/+wgsv9FdMUODQ+1y1atX81zT9n9D8AAAAAACIRgT+AADIpNKjwPyJJ56wd99910qXLu1a1qgFDQBEmlotf/bZZ9a+fXv/sLFjx7rWgnXq1LGcOXNa1qxZ3XCl1J0+fbrVqlXLdu3aZXnz5o3gkgMAAAAAkDIE/gAAqe7nbeXKla7Vl4ZfeeWV9ttvv/k/9/LLL7t5KmWa0j0icxSYjx8/3vWN9fvvv9tbb71ld911VwSXHECsU9+WgRUW9LeGhfr222+tbNmyrlKCR79PX331lS1ZssSaNWvmfgO9ShBTpkyxZcuWubTFZcqUOUtrAwAAAADAmSPwBwCZnPp56927t82dO9eWL1/ugjn79u3zj8+fP7+tWLHCvTS+UKFC1qZNGzeuf//+9vzzz7txXbp0scGDB7vhKkhVv3Fr1qyxn3/+2Tp16hSx9cPZLTB/++237ZZbbnF/t2zZ0h0bAGJPWlcYGTZsmEuxqeFNmza1P/74I1nLUa9ePfdbo+vXoUOHbMaMGe6alFSrZfH699PvoH6/1E+p7N2711V4EPVh6g0HAAAAACAjIPAHAJncmfTzllB/bqNHj7Z+/fpZ9uzZ3ftixYqdtfVB8qVHgblaxsyZM8f9/eOPP9JSBohB6VFh5O6777ZVq1a54a1atbIXX3wxWcuSLVs2l5qzcePGLmjYp08fK1y4sLVo0cKlM5bTp0+7lMTt2rUL+ux7773nb5neoEEDu+6669xwXcM0/OKLL7Y8efLQchkAAAAAkKFki/QCAAAybj9v//73v13LjIcfftgFDRVElA0bNrjWYCoYVoHvyJEjXQEqoktggbkKxh977DF/gbla8Kj1n1dgvnjx4ngF5m+++aZrFaMCe6/AXCleu3fvboMGDbIcOXK4aZByDab2tmiy4IZhkV4ERGmFEfEqjNx2222prjBSoEAB/2eOHDnipkuu1q1bu1cg9dHnyZIli0s/HEpBQr1C6XcutLIDAAAAAAAZBYE/AECK+nlTIa5n1KhR9sYbb7hA0WuvveYCfQoYqTXI4cOHXeHwrFmz7I477rDvvvsuosuPs1NgfumllwYdIwBiT3pUGJGXXnrJXn/9dTf866+/Tue1AAAAAAAgNkVdqk8VHJcrV85y5crl+v8ILAwIR/2FKBVP7ty5XTqxXr162dGjR89ongCQmZxJP28fffSRC/pJ+/bt7fvvv/cXAt90003ub6WO/OWXX87CmgAAorHCiH4fQiuMqELBAw884CqMePr27WtbtmxxqTVfffXVCC01zgae+QAAAAAgkwT+Pv74Y/fw/8wzz9iyZcusevXqrsB49+7dYacfP368KyDQ9GvXrrW3337bzeOJJ55I9TwBILM5k37ezj33XNePW2CfSKIWZPPnz3d/q+BNAUMAQGxIjwojgTp37uwChohNPPMBAAAAQCYK/A0bNsz1C6SUcJUrV3a1gfPkyWNjx44NO70KCq688krr2LGjq92ptEHqWySwdmdK5wkAmbmftxo1arj0jV4/b0rnJl4/b+3atQv67OjRo61Hjx6ugE2tOIYMGeKGq8+3FStWuLSPatHx1ltvRWTdAAAZo8KI+ob1TJkyxSpVqpTu64HI4JkPAAAAADJJH3/Hjx+3pUuXWr9+/YL6FWrSpEmCfQVdccUV9sEHH7iHPhVA/Pbbb65foi5duqR6nnLs2DH38hw4cMBf8K1XtIgzn0WT0xZn0SV64tpx0bWrouo4Tg+cG4lIYN/fcMMN7vW/yU7b1KlT/X/L1q1bg95LgwYN3HU2+CtOW/bs2e3DDz+MNzwU5wYQHufG2cNvRhLC7HvdT6uihyqM6Nh45JFH7JxzzrHmzZu7ih5q/edVGFm4cGHQ8aM+/FRhRMMKFizo+oXV3+rfT9NmzZrVtRDUdLF83KWXaN9mPPMBAAAAQOqk5DklagJ/e/futVOnTtl5550XNFzv161bF/YzqvWpz1111VWuD5GTJ0/afffd50/7kpp5yqBBg+y5556LN3zPnj3x+pKIpPPzR1dB1e7TpSyq5K5t0aK8FbZoEutpjzg3EhFl+55zAwiPc+Ps4TcjCQns+8suu8y++eabgMl227hx4/x/y5IlS4LeyyWXXOJaCAZ/xW4bOHBgpjru0svBgwctmvHMBwAAAADp/7wXNYG/1FD/US+++KJLL6cO3Ddu3GgPPfSQDRgwwJ566qlUz1e1RdVHRGDtT3UiX7RoUStQoIBFi60Ho6tGeLFT/+vrJSr4glshRdImu8qiSbFixSyWcW4kIsr2/SbbZ9Ek1s+NFgOnW7SY/uT/9/GF8Dg3zh5+M5IQZfv+6mmPWDSZ3/Jliya5cuWyWJPZn/kAAAAAIKXPe1ET+CtSpIhL7fPHH38EDdf74sWLh/2MHvSU4kV9SUnVqlXt8OHDds8999iTTz6ZqnlKzpw53SuUUsboFS18UZYKKkuUpcpSsqxo4YuuXRVVx3F64NxIRJTte86NzHtuxPq2PlOcG5nzvIi63wyJsn3PuZGxlicUz3wAAAAAkDopeU6JmieaHDlyWO3atW3OnDlBOUv1/vLLLw/7mSNHjsRbWT30idLApGaeAAAAAIC0xzMfAAAAAKS/qGnxJ0q10q1bN6tTp47ruH3EiBGuNucdd9zhxnft2tVKlSrl+mOQVq1a2bBhw6xmzZr+tC+qEarh3sNgUvMEAAAAAJwdPPMBAAAAQCYK/HXo0MF1pv7000/brl27rEaNGjZz5kx/R+1bt24Nqu3Zv39/i4uLc/9v377d9cegB8CBAwcme54AkKkMjbIcaRV7RXoJAADAWcQzHwAAAACkrzif8qMgUerovWDBgrZ///6o6ui92YBpFk1mnRpjUSX/ZIsWDaIsuLHghmEWyzg3MsZ5IZwbmffcmPVUy0gvQlRrMLW3RZNYPjei6byIut8M4XcjQ50b0frckhGw7QAAAADEyjNL1PTxBwAAAAAAAAAAACD1CPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAkYOrUqVaxYkWrUKGCjRkzJmjcwYMHrUaNGv5XwYIFbcSIEW6cptVn4uLi7NChQ/7PbN682a6++mqrWrWqNW/e3Pbv33/W1wkAAADRex/5yCOPuM/pfvHOO++0kydPuuGLFi2yOnXqWPbs2d28AQAAEkLgDwAAxGygbdu2bdaoUSM3fe3atW3x4sXJXhYVsvTu3dvmzp1ry5cvtyFDhti+ffv84/Pnz28rVqxwL40vVKiQtWnTxo2rX7++zZ4928qWLRs0zz59+liPHj1s9erV1rlzZxs8eHCqtxUAAACi05ncRzZr1sx++uknW7VqlR07dszee+89N7xkyZL29ttv22233Rax9QIAABkDgT8AABAV0iPQ9u9//9s6duzoPjNw4EDr379/spdHtaqrVKlipUqVsnz58rkWevqOcH744QcrXry4lS9f3r1XDW3v70Br1661a665xv2t/ydOnJjs5QEAAEDGcCb3kdddd51ly5bNVWhTC7/t27e74aVLl7bq1atbliwU5QEAgMRxtwAAAKJCegTaVGCiloKitJolSpRI9vLs2LHDLYtHf3sFL6EmTJhgHTp0SHKe1apV8wf79H9C8wMAAEDGlRb3kaoUN378eGvatGm6LisAAIg92SK9AAAAAOkVaHviiSdcrWmlBD19+rQLGKY1n89nn332WbLmPXToUPvXv/5lo0ePtpYtW1revHnTfHkAAACQMSR2H/noo4/aZZdd5jJbAAAApAQt/gAAQIYsIGnfvn2S06qWtPrU+/333+2tt96yu+66K9nfo35UAgOP+lvDQn377bcuxajSLyVFwcwpU6bYsmXL7N5777UyZcoke3kAAACQMZzpfeSoUaNcivjhw4efleUFAACxhcAfAACICukRaHv77bftlltucX+rhZ36+kuuevXq2Zo1a9xyHDp0yGbMmGHNmjVLdetD2bt3rwtcivocvOeee5K9PAAAAMgYzuQ+ctq0aTZmzBg3Tn39AQAApBSBPwAAEBXSI9CmFnVz5sxxf//4448pamGnghal5mzcuLHVqFHD+vTpY4ULF7YWLVq4tKSi9KGTJk2ydu3aBX1WqTwVmFRLw4oVK1rv3r3dcC2L3l988cWWJ0+eFLVABAAAQMKmTp3q7rMqVKjgAmeh9u3bZ23atLFKlSpZ5cqV7ddff3XDv/zyS3evp76me/Xq5Z9+5cqVLs2mxl155ZX222+/nZX7yIceesgta8OGDd1nVVlMVq1a5e4vP/nkE7v99tvt8ssvT/W2AgAAsY2qQwAAICoEFpCoIOSxxx7zF5Co8Eat/7wCksWLF8cLtA0YMMB27drlCnwUGBw2bJi9/PLL1r17dxs0aJDlyJHD3nzzzRQtU+vWrd0r0PTp0/1/Z8mSxQX3QimNp16htFzJDVoCAAAgeU6ePOkqWs2bN88KFixotWvXthtvvNHdSwYG1HQf1rFjRzty5IjLwqB7y7vvvtu+/vprK1eunLtvnD17tjVt2tT69+9vzz//vKuI9sYbb9jgwYPdPWd630du3Lgx7PyqVasWdnoAAIBQBP4AAEDUSOtA26WXXmo//PBDOi0tAAAAosGiRYtciz31pyzNmzd3AbzbbrvNvd+/f78tWbLEPvjgA/demRdk9+7dli9fPhf0k2uuucYmTpzoAn9xcXF28OBB/+dLlCgRobUDAABIGQJ/AAAAAAAAyLCUPtML+on+Duw7etOmTVakSBHr1KmT/fzzz3b11VfbkCFDrGjRonb48GFbvXq1S//5+eefu5Tz8u9//9sFAB9++GEXHFRwEQAAICOgjz8AAAAAAADEdCpQBe4effRRW7p0qe3Zs8fGjRvnWvWpFeB9991nV1xxhQsYZs2a1X1m1KhRLsWnsk088MAD/j6bAQAAoh2BPwAAAAAAAGRY6gs6sIWf/tYwjwJ65cuXtxo1arjU8W3atLEVK1a4cVdddZV99913tnDhQje+QoUKbvhHH33k+pqW9u3b2/fff3/W1wsAACA1CPwBAAAAAAAgw6pXr56tWbPGBfyUqnPGjBnWrFkz/3j1z1esWDGX8lPmz59vl1xyib+fP9HnXn31Vbvrrrvc+3PPPdd+/PFH9/ecOXOsYsWKEVgzAACAlKOPPwAAAAAAAGRY2bJls6FDh1rjxo3t9OnT9thjj1nhwoVdi70xY8a41n/Dhw+3m2++2U6cOOFa9nXv3t19dtCgQTZz5kz39xNPPGGVKlVyf48ePdp69Ojh5lewYEEbO3Zs+C9/9kaLKs9OivQSAACACCPwBwAAzr5oKyDJP9miSsVekV4CAACSZerUqdanTx8XHHn88cft7rvvDhq/b98+u/POO239+vUuxeIXX3xhF154oTVo0MAOHjzoplErrU6dOtmIESPsnXfecUEbL01jv379rEOHDhFZN2QsrVu3dq9A06dP9/9dp04dW7ZsWbzPKSCoV6hGjRrZ8uXL02lpAQAA0g+pPgEAAAAAQIqdPHnSevfubXPnznUBkiFDhrhAX6CHHnrIBe7WrVtnS5YsseLFi7vhCxYscH2s6aUUim3btvV/pmvXrv5xKQ36KRCp+amfNrX0CqXlU/9uatVVuXJl+/XXX91wBSLVCkyvokWL2sMPP+yf36WXXuqClkolmVJpvTzDhg2zatWqueFNmza1P/74I8XLBEQa5wUAAOmLwB8AAAAAAEixRYsWWZUqVaxUqVKWL18+a968uc2ePds/fv/+/S7Y17FjR/c+T548ljdv3qB5qLWf+l1r2LBhVAYi9fenn36aquVLj+VRi8pVq1a54a1atbIXX3wx1dsLiATOCwAA0h+BPwAAAAAAkGI7duxwQT+P/lYgz6OAXpEiRVwaz5o1a1qvXr1coX+gTz75xPW7phZ1ng8//NC13lHAMCUtd9IjEKkWSV6fbymVHstToEAB/7gjR45YXFxcqpYNiBTOCwAA0h+BPwAAAAAAkOYU5FMh/6OPPmpLly61PXv22Lhx44KmmTBhQlA6T7XW+e2331zrnXr16tn9998f8UBkaqXX8rz00ktWtmxZe++996x///5nvJzA2cR5AQBA+iPwBwAAAAAAUqxkyZJBBfb6W8MCC/TLly/v+t1SAb367FIqPs/WrVvt999/tyuuuMI/rHDhwpYzZ073d/fu3W3x4sURDUSmp9QuT9++fW3Lli1211132auvvnpWlhU4WzgvAAA4c9nSYB4AAAAAACCTUYu8NWvWuIBfwYIFbcaMGfbUU0/5x5coUcKKFSvmWvAoADh//ny75JJLglrt3HLLLUFp+Xbt2uXvz2vy5MkuJeCZBCK1jOECkaJApJYpsUDkmUjv5encubNdc8019txzz1lm0mzANIsmsyK9ABkM5wUAAOmPFn8AAAAAACDFsmXLZkOHDrXGjRu7Qvo+ffq4FnstWrRw6fxk+PDhLiVf1apV7cCBA64VX2Crnfbt2wfNc8SIEXbppZda9erV7d1337WRI0emKhB56NAhF4hs1qxZ2ECkJCcQeSbSY3k2bNjg/3vKlCmp7n8QiBTOCwAA0h8t/gAAAAAAQKq0bt3avQJNnz7d/3edOnVs2bJlYT+7cOHCeMPUT5deZxqIPH36tD322GP+QOSYMWNcSyMvEHnixAkXrAwNRL7yyivx1uWee+5x6QabNGliDRo0cIGHSC3P4MGD7ccff7SsWbNamTJl7I033kjVtgIihfMCAID0R+APAAAAAADEhLQORCoYobSC0bI8CowAGR3nBQAA6YtUnwAAAAAAAAAAAEAMIPAHAAAAAAAAAAAAxAACfwAAAAAAAAAAAEAMIPAHAAAAAAAAAAAAxIBskV4AAAAAAAAQ5Z690aLKs5MivQQAAABAVCLwBwAAAAAAMpahcRZNGlTsZdFkwQ3DIr0IgNNgam+LFpwXAIDMglSfAAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAQAybOnWqVaxY0SpUqGBjxoyJN37fvn3Wpk0bq1SpklWuXNl+/fVXN/yFF16w888/34oUKRI0/bPPPmulS5e2GjVquNeCBQvO2roAAIDEEfgDAAAAAAAAYtTJkyetd+/eNnfuXFu+fLkNGTLEBfoCPfTQQ9ahQwdbt26dLVmyxIoXL+6GN2vWzBYuXBh2vn379rUVK1a4V4MGDc7KugAAgKQR+AMAAAAAAABi1KJFi6xKlSpWqlQpy5cvnzVv3txmz57tH79//34X7OvYsaN7nydPHsubN6/7u27dulaiRImILTsAAEg5An8AAAAAAABAjNqxY4cL+nn09/bt2/3vN23a5FJ5durUyWrWrGm9evVyrQSTMmzYMKtWrZr16NHDDh06lG7LDwAAUobAHwAAAAAAAJBJKcinVoGPPvqoLV261Pbs2WPjxo1L9DMK9m3YsMGlDlULweeee+6sLS8AAEgcgT8AAAAAAAAgRpUsWTKohZ/+1rDAFoDly5e3GjVqWJYsWaxNmzau377EnHfeeZY1a1b3uvPOO23x4sXpug4AACD5CPwBAAAAAAAAMapevXq2Zs0aF/BTSs4ZM2ZYs2bN/OPVh1+xYsVcyk+ZP3++XXLJJYnOc+fOnf6/p0yZ4voQBAAAMRL4+/HHH23QoEEu/7ea+MuRI0ds2bJl5PcGAAAAgAyOZz4AyNiyZctmQ4cOtcaNG7tWfX369LHChQtbixYtXP9/Mnz4cLv55putatWqduDAAevevbsb/tRTT1np0qXtr7/+cv+rXz957LHH3LTq40+/BwMGDIjoOgIAgP/JZql0/Phxu/XWW12tHp/PZ3FxcdaqVSurUKGCSwvQtGlT92D45JNPpvYrAAAAAAARwjMfAMSO1q1bu1eg6dOn+/+uU6eOC+CFUkAvXFDv/fffT6clBQAAEWvxpxo/U6dOtddff93Wr1/vHgQ9uXLlsltuucU9IAIAAAAAMh6e+QAAAAAgEwX+PvzwQ+vRo4fdc889du6558Ybr1zgv/3225kuHwAAAAAgAnjmAwAAAIBMFPjbvXu3y+WdkKxZs7p+HwAAAAAAGQ/PfAAAAACQiQJ/ZcqUsXXr1iU4/rvvvrOLLrootbMHAAAAAEQQz3zRSelXK1as6PpaHDNmTLzxV199tVWqVMlq1KjhXv/8848b3qFDB/+wUqVKWdu2bd3wIUOG+IdrvoUKFTrr6wQAAAAgCgJ/HTt2tNGjR9sPP/zgH6bO3uWtt96yCRMmWNeuXdNmKQEAAAAAZxXPfNHn5MmT1rt3b5s7d64tX77cBe327dsXb7pPP/3UVqxY4V65c+d2wz7++GP/sMaNG/sDf48++qh/uP72hgMAAADImLKl9oNPPvmk/fjjj9awYUPXt4MeAHv16mV//vmn/f7779aiRQv3PqVee+019/Cya9cuq169ur366qtWr169BKf/+++/3bJMnDjRfXfZsmVtxIgR7vtTO08AAAAAyOx45os+ixYtsipVqrgWe9K8eXObPXu23Xbbbcmex7Fjx2zWrFk2cuTIeOMUzE3NPgUQRYb+fwWNqFGRawoAABmmxV+OHDls5syZNm7cOLvgggtcKhE9QFSrVs3eeecd++KLL1yfDymhGoiqvfjMM8/YsmXL3ANbs2bNXN8S4Rw/ftyuu+4627x5s6vRuH79elfz1HsISs08AQAAAAA880WjHTt2BK27/t6+fXvY1po1a9a0YcOGxRs3Y8YMu/zyy+Ol9Ny7d6+tXLnSmjRpkk5LDwAAACBqW/ypjwDVuFR6kM6dO7tXWtBDSffu3e2OO+5w79944w2bNm2ajR071vr27Rtveg1Xjc/vv//esmfP7oaVK1fujOYJAAAAAJkdz3wZ13//+18XENy/f7+1bt3a9dvXsmXLoFZ96u8vlFpUanpvOwMAAADIRC3+1EeA+nr4448/0mxBVJNz6dKlQbULs2TJ4t4H9ikR6PPPP3c1FXv27GnnnXeeXXrppfbiiy/aqVOnUj1PAAAAAMjseOaLTiVLlgxq4ae/NSyQ1yKwYMGC1r59e1u8eHFQQPfLL790Ab5QajkZLiAIAAAAIJP08Ve7dm1bs2ZNmi2I0oro4U0Pc4H0ft26dWE/89tvv7lOzTt16mTTp0+3jRs32r/+9S87ceKES/OSmnmK0tfo5Tlw4ID7//Tp0+4VLeLMZ9HktMXFSibbNBcXXbsqqo7j9MC5kTHOC+HcyLznRnSdF8K5kVnPjWg6L4RzI3GcG2d3eXjmiz516tRx+2Tbtm0usKe0nWqZ6S3zyZMnXZ+IRYoUcUFRbbOuXbv6x0+dOtUaNGhgefPmDVpPpUVdu3atNWrUKJH15/qUGK5PZw+/3Unh3MiM5wUAIPadTsHvWKoDf15n6qpxefvtt1u2bKme1RmtaLFixezNN990fUvowVQ1HtWpux4CU2vQoEH23HPPxRu+Z88eO3r0qEWL8/NH0d2THhZP/6+viaiQu7ZFi/JW2KJJLPZ3EohzI2OcF8K5kXnPjag6L4RzI9OeG9F0XgjnRuI4NxJ38ODBNJ0fz3zRqX///nb11Ve7baMgqAKf6gdx6NChVqBAAWvbtq0LAGp406ZNrWHDhv5j9YMPPnD7NPTYfffdd9089u3bl/AXF+D6lBiuT2cPv91J4NzIlOcFACD2HUzB816qn9z04KcUKvfee689+OCDLp2I0sEEiouLc52DJ4dqJOpBLjSVjN4XL1487GdKlCjh+h8I7FD+kksusV27drnajamZp/Tr1891Dh9Y+7NMmTJWtGhR9yAVLbYejK5aZcVOxe9UPqJ8Sy1abLKrLJqo8CSWcW5kjPNCODcy77kRVeeFcG5k2nMjms4L4dxIHOdG4nLlypWm8+OZLzp16dLFvQIpfadnxYoVCX520qRJYYc/+uijSX/xAa5PieH6dPbw250Ezo1MeV4AAGJfrhQ876U68Hfuueda4cKFXUfhaSFHjhyu9uacOXNcDUVRDUa9v//++8N+5sorr7Tx48e76fRAKr/88ot7ONT8JKXzlJw5c7pXKH2H9z3RwBdl6SSyRFm6DSXciBa+6NpVUXUcpwfOjYxxXgjnRuY9N6LrvBDOjcx6bkTTeSGcG4nj3Di7y8MzH4JxfUoM16ezh9/upHBuZMbzAgAQ+7Kk4Hcs1YG/+fPnW1pTjctu3bq5fgvq1avnUsscPnzY7rjjDjdefROolqnSskiPHj1s5MiR9tBDD9kDDzxgGzZscB29qzZqcucJAAAAAIiPZz4AAAAAyHjOficNiejQoYPrU+Hpp592qVtq1KhhM2fO9HfUvnXr1qCoplKxzJo1y3r16mXVqlVzD4h6IHz88ceTPU8AAAAAwNnBMx8AAAAARHHgT52Fq3PwadOm2ZYtW9ywsmXL2g033GCdOnUK6ochuZSOJaGULOFqnF5++eX2448/pnqeAAAAAIDweOYDAAAAgIwl1cmt9+/f7/pbuPPOO2327Nl24sQJ91Kn4kqpctVVV7kO0gEAAAAAGQ/PfAAAAACQiVr8Pfnkk7Z06VJ79dVXrXv37pY9e3Y3XA+CY8aMcX0uaBqNBwAAAABkLDzzRV6zAdMsWsyK9AIAAAAASN8Wf5MmTbJ//etf7uU9AIr+Vgfsen322WepnT0AAAAAIIJ45gMAAACATBT427dvn1WsWDHB8ZUqVbI///wztbMHAAAAAEQQz3wAAAAAkIkCfxdddJF9/vnnCY7XuAsvvDC1swcAAAAARBDPfAAAAACQiQJ/SveiDt5btGjh/t+8ebN7zZo1y1q2bOk6fL///vvTdmkBAAAAAGcFz3wAAAAAkPFkO5OHwN27d9tLL73kHvwCqc+Hp59+2vX5AAAAAADIeHjmAwAAAIBMFPiTZ5991tXw/Oqrr2zLli1uWNmyZa1JkyZWpEiRtFpGAAAAAEAE8MwHAAAAAJko8Cd62Lv11lvTZmkAAAAAAFGFZz4AAAAAyAR9/KnG5xNPPJHg+CeffNLmzp2b2tkDAAAAACKIZz4AAAAAyESBvwEDBti2bdsSHL99+3Z74YUXUjt7AAAAAEAE8cwHAAAAAJko8Ld69WqrX79+guPr1q1rq1atSu3sAQAAAAARxDMfAAAAAGSiwN+xY8fs+PHjiY4/cuRIamcPAAAAAIggnvkAAAAAIBMF/i699FKbNGlS2HE+n88mTpxolStXPpNlAwAAAABECM98AAAAAJCJAn8PPPCAfffdd3bLLbe4FDAnT550L6V60bAffvjBTQMAAAAAyHh45gMAAACAjCdbaj/YuXNn+/XXX12H76rpmSXL/8cQT58+bXFxcda/f3/r1q1bWi4rAAAAAOAs4ZkPAAAAADJR4E+eeeYZ9zCo9C+//fabG3bhhRda27Zt3f8AAAAAgIyLZz4AAAAAyCSpPj162HvkkUfswQcftBIlSrgaodOmTbMDBw6kzRICAAAAACKGZz4AAAAAiNEWfyNHjrRXXnnFvv/+eytSpIh/+NSpU61du3Z24sQJ18m7aLoff/wxaDoAAAAAQPTimQ8AAAAAMlGLv88//9zV9gx8sFPn7nfddZdlzZrVxo4d6zp9f+mll2zLli02cODA9FhmAAAAAEA64JkPAAAAADJR4O/nn3+2yy67LGjYvHnzbM+ePdarVy/XsXuVKlXsscces/bt29v06dPTenkBAAAAAOmEZz4AAAAAyESBv3379lmZMmWChs2ZM8fi4uLsxhtvDBp+5ZVX2tatW9NmKQEAAAAA6Y5nPgAAAADIRIG/8847z3bt2hU0bMGCBZYnTx6rXr160PAcOXK4FwAAAAAgY+CZDwAAAAAyUeCvTp069u6779rBgwfd+59++skWLVpkzZo1s2zZsgVNu27dOitdunTaLi0AAAAAIN3wzAcAAAAAGVvwk1sSnnnmGatbt65VqFDB9euwdOlSl/KlX79+8aadNGmSXXPNNWm5rAAAAACAdMQzHwAAAABkohZ/VatWtblz51rt2rVtx44drtN3deau94Hmz5/vUsHccsstab28AAAAAIB0wjMfAAAAAGSiFn9yxRVX2LRp0xKd5uqrr7bVq1efyXIBAAAAACKAZz4AAAAAyCQt/gAAAAAAAAAAAABEJwJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAAQAwg8AcAAAAAAAAAAADEAAJ/AAAAAAAAAAAg05g6dapVrFjRKlSoYGPGjIk3vmHDhla9enWrXLmyPf/88/7hc+bMsZo1a7pxTZs2tT///DPoc4888ogVKVLkrKwDkBACfwAAAAAAAAAAIFM4efKk9e7d2+bOnWvLly+3IUOG2L59++IFBleuXGmrVq2y6dOnu+nk4Ycfto8++siNq1Wrlo0ePdr/mZ9//tl27dp11tcHCEXgDwAAAAAAAAAAZAqLFi2yKlWqWKlSpSxfvnzWvHlzmz17dtA0BQoUcP+fOHHCveLi4tx7/X/w4EH394EDB6xEiRL+zzz22GP24osvntV1AcIh8AcAAAAAAAAAADKFHTt2uKCfR39v37493nRXXHGFFStWzJo0aWI1atRww15//XW7/vrrrWTJkrZ69Wrr0qWLG/7xxx9bnTp17Pzzzz+LawJkoMDfa6+9ZuXKlbNcuXJZ/fr1XQQ+OdTEVhH3tm3bBg33+Xz29NNPu+h77ty53Ym6YcOGdFp6AAAAAEBCeN4DAABARvD999+7IOGKFStszZo1btjw4cPtyy+/dMMvv/xyGzRokB0+fNheeeUVe/zxxyO9yEB0Bv4UGVd+3WeeecaWLVvmOsls1qyZ7d69O9HPbd682XWc2aBBg3jj/v3vf7sT74033rCFCxda3rx53TyPHj2ajmsCAAAAAAjE8x4AAAAiTa31Alv46W8NCyd//vx27bXX2syZM23Pnj22du1aq1mzpht3yy23uODgb7/9Zhs3brRLLrnEVXD766+/rFq1amdtfYCoD/wNGzbMunfvbnfccYdVrlzZPbzlyZPHxo4dm+BnTp06ZZ06dbLnnnvOLrjggni1P0eMGGH9+/e3Nm3auBPuvffecxH5yZMnn4U1AgAAAAAIz3sAAACItHr16rkWfAr4HTp0yGbMmOEqjnn279/vgnxy7NgxmzVrllWqVMnOOeccN3zTpk1u3Jw5c6xixYpWtWpV++OPP1xlNb003apVqyK2fkBUBf6OHz9uS5cudalZPFmyZHHvf/jhhwQ/9/zzz7tcu3fddVe8cToJd+3aFTTPggULupQyic0TAAAAAJB2eN4DAABANMiWLZsNHTrUGjdu7Pru69OnjxUuXNhatGjhKpD9/fff1rx5c1eprHbt2taoUSO74YYb3OdGjRplrVq1cpkrvvnmG3viiScivTpAPNksiuzdu9fV5jzvvPOChuv9unXrwn7m22+/tbffftvl2Q1HD4HePELn6Y0LpSi+Xp4DBw64/0+fPu1e0SLOfBZNTlucRZfoiWvHRdeuiqrjOD1wbmSM80I4NzLvuRFd54VwbmTWcyOazgvh3Egc50bGWp5ofd4TnvlSjutT4rg+Zc7zQjg3Ms65EcvnBYCUUyBPr8BrxNSpU/3vQ/uh9q4hN910k3uFG+dRGnuuOUhrKTmmoirwl1IHDx60Ll262FtvvWVFihRJs/mqQ06lkQmlZrzR1E/E+fmj6O5JF7TTpSyq5K5t0aK8FbZoklQfKhkd50bGOC+EcyPznhtRdV4I50amPTei6bwQzo3EcW4k/XwUS9LreU945ks5rk+J4/qUOc8L4dzIOOdGLJ8XAIDYdzAFz3tRFfjTw1zWrFldPtxAel+8ePF40//6668uZ66a1oZGPdXsdv369f7PaR4lSpQImqea8YbTr18/1+F8YO3PMmXKWNGiRa1AgQIWLbYejK5aZcVO/a9D1KjgW2rRYpNdZdFEqZJiGedGxjgvhHMj854bUXVeCOdGpj03oum8EM6NxHFuJC5XrlwWzaLleU945ks5rk+J4/qUOc8L4dzIOOdGLJ8XAIDYlysFz3tRFfjLkSOHy5mrTjHbtm3rf7DT+/vvvz/e9OpQc/Xq1UHD1Km7Ip//+c9/3INb9uzZ3cOg5uE9+OmhbuHChdajR4+wy5EzZ073CqX+J/SKFr4oSyeRJcrSbSjhRrTwRdeuiqrjOD1wbmSM80I4NzLvuRFd54VwbmTWcyOazgvh3Egc50bGWp5ofd4TnvlSjutT4rg+Zc7zQjg3Ms65EcvnBQAg9mVJwe9YVAX+RLUuu3XrZnXq1LF69erZiBEj7PDhw3bHHXe48V27drVSpUq51CyKcF566aVBny9UqJD7P3D4ww8/bC+88IJVqFDBypcvb0899ZSVLFnS/7AJAAAAAEh/PO8BAAAAQPqKusBfhw4dXL8KTz/9tOuMXbU2Z86c6e+sfevWrSmuofPYY4+5h8l77rnH/v77b7vqqqvcPKM9FQ4AAAAAxBKe9wAAAAAgkwX+RGlewqV6kfnz5yf62XfeeSfesLi4OHv++efdCwAAAAAQOTzvAQAA4GxpNmCaRZNZT7WM9CIgEyC5NQAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAABADCPwBAAAAAAAAAAAAMYDAHwAAAAAAAAAAQARMnTrVKlasaBUqVLAxY8YEjTty5Ig1b97cKlWqZFWqVLFXX33VP65Dhw5Wo0YN9ypVqpS1bdvWDZ8/f74VKlTIP2748OFnfZ0QWdki/P0AAAAAAAAAAACZzsmTJ6137942b948K1iwoNWuXdtuvPFGK1y4sH+avn37WqNGjezQoUNWp04dFwi86KKL7OOPP/ZP07lzZ2vSpIn/vf7+9NNPz/r6IDrQ4g8AAAAAAAAAAOAsW7RokWvJpxZ7+fLlc0G92bNn+8fnyZPHBf1E49UycOfOnUHzOHbsmM2aNcvf4g8g8AcAAAAAAAAAAHCW7dixwwX9PPp7+/btYafdtm2brVq1ymrVqhU0fMaMGXb55Ze79J4epfusXr26tW7d2jZu3JiOa4BoROAPAAAAAAAAAAAgSqlVn/r0GzJkiOXNmzdo3IQJE9w4jwKDmzdvtpUrV9odd9xht912WwSWGJFE4A8AAAAAAAAAAOAsK1myZFALP/2tYYF8Pp917drVWrRoYe3atQsa988//9iXX37pWvZ5ChQo4NKCivoL3Lp1q506dSrd1wXRg8AfAAAAAAAAAADAWVavXj1bs2aNC/gdOnTIpe1s1qxZ0DT9+vVzff31798/3uenT59uDRs2tPz58/uH/fHHH/6/v/32WytatKhlzZo1ndcE0SRbpBcAAAAAAAAAAAAgs8mWLZsNHTrUGjdubKdPn7bHHnvMChcu7Fr3jRkzxg0bPHiwVa5c2WrUqOE+o/decFBpPtu3bx80Tw174403LHv27K7l3/vvvx+RdUPkEPgDAAAAAAAAAACIAKXpDEzV6bXkC0z1mZCPP/443rAHHnjAvZB5keoTAAAAAAAAAAAAiAEE/gAAAAAAAAAAAIAYQOAPAAAAAAAAAAAAiAEE/gAAAAAAAAAAAIAYQOAPAAAAAAAAAAAAiAHZIr0AAAAAAAAAAAAAMe/ZGy1qPDsp0kuAdEKLPwAAAAAAAAAAACAGEPgDAAAAAAAAACCCpk6dahUrVrQKFSrYmDFjgsYdOXLEmjdvbpUqVbIqVarYq6++6h/3yCOPuM9VrVrV7rzzTjt58qQbPmTIEKtRo4Z7aXyhQoXO+joBiAwCfwAAAAAAAAAARIiCdb1797a5c+fa8uXLXdBu3759QdP07dvX1q1bZwsXLrTXXnvNNm7c6IY3a9bMfvrpJ1u1apUdO3bM3nvvPTf80UcftRUrVriX/m7btm1E1g3A2UfgDwAAAAAAAACACFm0aJFryVeqVCnLly+fa903e/Zs//g8efJYo0aN3N8arxZ8O3fudO+vu+46y5Ytm8XFxVmdOnVs+/bt8eY/YcIE69Chw1lcIwCRROAPAAAAAAAAAIAI2bFjhwv6efR3uACebNu2zbXuq1WrVrxWg+PHj7emTZsGDd+7d6+tXLnSmjRpkk5LDyDaEPgDAAAAAAAAACDKKZWnWu4pFWjevHmDximd52WXXWb169cPGj5x4kRr3bq1Zc+e/SwvLYBIIfAHAAAAAAAAAECElCxZMqiFn/7WsEA+n8+6du1qLVq0sHbt2gWNGzVqlK1du9aGDx8eb94ff/wxaT6BTIbAHwAAAAAAAAAAEVKvXj1bs2aNC/gdOnTIZsyYYc2aNQuapl+/fq6vv/79+wcNnzZtmo0ZM8b146e+/gLt3r3bBQQbN258VtYDQHQg8AcAAAAAAAAAQIQoYDd06FAXoKtRo4b16dPHChcu7Fr3qf+/33//3QYPHmyLFi1y4/WaNWuW++xDDz1k+/bts4YNG7rhAwcO9M/3s88+szZt2ljWrFkjuHYAzrbgKgAAAAAAAAAAAOCsUj98egWaPn16UKrPcDZu3JjgPHv06JGGSwggo6DFHwAAAAAAAAAAAGzq1KlWsWJFq1ChgksjG+jIkSPWvHlzq1SpklWpUsVeffVV/7i9e/e6Vqv63E033WRHjx51w/W/3mu4xms6pC8CfwAAAAAAAAAAAJncyZMnrXfv3jZ37lxbvny5DRkyxKWSDdS3b19bt26dLVy40F577TV/q9OXXnrJbr75ZtuwYYNdcMEF/qCh/td7Ddd4TZdWgUjp2bOnnXfeeVanTp2g4V9++aVLf6sAZa9evfzDn332WStdurQ/be6CBQss1hD4AwAAAAAAAAAAyOTUj6QCZaVKlbJ8+fK51n2zZ8/2j8+TJ481atTI/a3xCsjt3LnTvf/888+tS5cu7u/OnTvbF198kejwtApEduzYMSgtrpw+fdruvvtumzx5sv3000926NChoPVQ8HLFihXu1aBBA4s1BP4AAAAAAAAAAAAyuR07drign0d/b9++Pey027Zts1WrVlmtWrXc+/3791vBggXjfS5wnoUKFbK///47zQKRcuWVV1rhwoWDhu3du9dNX65cOff+mmuusYkTJ1pmQeAPAAAAAAAAAAAAyXLs2DHr0KGDa4GXN2/eqAhEBipatKgdPnzYVq9ebadOnXKtDgM/N2zYMKtWrZr16NHDtQaMNdkivQAAAAAAAAAAAGQGDab2tmiy4IZhkV4ERJGSJUsGBcj0d7169YKm8fl81rVrV2vRooW1a9fOP1yt/bxWf/qc5hU4zyJFirjWfmr1l97i4uLsgw8+sPvuu8+lC1U6T68vQgX7nnrqqf9r7z6go6j2B47/Qu+9hSKERwRBivT2BBTpzcJD0EcRxIL8QUAEpAuiFEGKICAgT0Tg+USlBGliQ5AmICAoTelSFKMCkvmf331n9u1uNiGBJDs7+/2cs4fs7Ozkbpjf3Pvbe+de8/PAgQNl1KhRpgPTTbjjDwAAAAAAAAAAIMxpJ9/evXtNR53eCbd69Wpp2rSpzz6DBw82a/0NHTrUZ3urVq3kX//6l/lZO91at24dcLs+v5WOSLtD8Ubq168vX3zxhWzZskWqVKki0dHRZnvhwoUlffr05vHYY4/J119/LW5Dxx8AAAAAAAAAAECYy5Ahg0yaNEkaNWpkOsv69+9v1s/Tu/t02s2ffvpJXnnlFbP2nr6ujzVr1ng6BJctWyZlypQxd9f16NHDbH/88cfNc92urw8aNChFOyITcvbsWfOvvm/atGnSvXt38/zUqVOefT744AOzhqDbMNUnAAAAAAAAALjQihUrzBf3cXFx8vzzz3u+iLf16tVL/v3vf0uJEiVk27Ztnu06Ld7ly5fNz/qF+yOPPCJTpkwx62ItWLBA0qVLJ4UKFTJ38ejdMwDco02bNubhbdWqVT5TfSa0rt6mTZvibc+aNassX778ljsi9TqmU3PaHZFz5841d/917drVdD6eP39eihcvLpMnT5b27dvLuHHjJCYmxhxnyJAhUq5cOfOzHmPXrl1mOtDbb79dZs+eLW7jyDv+ZsyYIaVKlZIsWbJIrVq1TO9xQubMmWMqorx585pH48aN4+2vJ+Lw4cMlMjLSnGS6z6FDh9LgkwAAAAAAvJHvAQCQNnRdq379+smGDRtk586dZg0r/WLcW6dOnXy+0Ld99tln5otxfZQtW1batWtntmvH4e7du812ncbvpZdeSrPPAyA8aSfkwYMHzV2DPXv2NNv0umVP+amDEfQuvqtXr5o7Etu3b2+2awfg/v37zeOf//yn53g6YGHPnj3mWqYDH/Llyydu47iOvyVLlpgKacSIEbJjxw6pXLmyuXXTvi3T3yeffCIdO3aUjRs3yubNm83olCZNmvjM+zp+/HiZOnWqzJo1y8znmj17dnPMP//8Mw0/GQAAAACEN/I9AADSjg6W0SnsihUrJjly5JDmzZvLxx9/7LNPvXr1zN0zCdE698iRI3L33Xeb57ly5fK89vvvv5s7ZgAAzuK4jj+9XVznfO3WrZuUL1/eJG+6UOS8efMC7r9o0SJ5+umnzVyyequm3t6pt3yuX7/eM/pTb0PXhSbbtm0rlSpVkoULF5r5aG/29lIAAAAAQPKR7wEAkHa0PtROP5v+7D14Jil0Pa4HH3zQTO1pe/nll6VkyZKmztU6GADgLI7q+NNbMbdv326mZrFppaLPdXRnUuhIk2vXrnluz9QRKadPn/Y5Zu7cuc2UMkk9JgAAAADg1pDvAQAQepYuXSodOnTw2TZo0CA5duyYdO/eXaZNmxa0sgEAAssgDvLzzz/L9evX4y0Iq88PHDiQpGPoIrU6t6ud+GkSaB/D/5j2a/6uXLliHrZff/3V/KsjS/XhFBESeBHNYIkTp93a75x+7Qhn/Vc56jxODcRGaMSFIjbCNzacFReK2AjX2HBSXChiI3HERmiVx6n5niLnSz6uT4nj+hSecaGIjdCJDTfHRUKKFCli1ruyP7v+XKNGjXh/C/u5//bjx4+b99SuXTvg30/XB9Q6WafwRmjGRTjEBvVGIlz+f+82yYlVR3X83Sq9zfzdd98160DoQvE3a9y4cTJq1Kh428+dO+eodSJuy+msi9bZuP9NHeAIWauJU0RJwnOlB0NCa6i4BbERGnGhiI3wjQ1HxYUiNsI2NpwUF4rYSByxkbjLly+Lm6VUvqfI+ZKP61PiuD6FZ1woYiN0YsPNcZGQUqVKye7du+Wbb74xa/OtXLlSnnjiiXh/i/Pnz5s76v23L1iwQFq0aGHqR9vhw4eldOnS5ue3337b/I5w/Nu6JS6U2///qDcS8UZtcZT7Pwx2CRwtOfmeozr+ChQoIOnTp5czZ874bNfnOkIlMRMnTjSJ4Lp168y6Djb7fXqMyMhIn2PqOhGBDB482Cw47z36UxeRL1iwoM8CtsF2/LKDRgeISKHryZsjPNVZ28Upjkh9cZJChQqJmxEboREXitgI39hwVFwoYiNsY8NJcaGIjcQRG4m71c6wcMn3FDlf8nF9ShzXp/CMC0VshE5suDkubrS+rk7VqXeKDBgwwKyZ27JlS5kzZ465i17X3f34449N51/16tVl0qRJ0r59e/Pe1atXm7V0vf92L7zwgmzZssXU6cWLF5eZM2eG7d/2Zh2R8+Ikbv//o94InTqj4dfjxUk+aTlRQjXfc1THX6ZMmaRatWpmofZ27dqZbfbC7c8880yC7xs/fryMHTtW1qxZYyoob1FRUSYZ1GPYiZ8mdVpBPfXUUwGPlzlzZvPwp+tPeC9kG2yWk24LNpNJOGv0hN447RSWs/6rHHUepwZiIzTiQhEb4RsbzooLRWyEa2w4KS4UsZE4YiO0yuPUfE+R8yUf16fEcX0Kz7hQxEboxIab4yIxWufa9a5NO/Rsb731VoLv1frU35tvvpnCJQw/ToqLcIgN6o3EUGeEUmwkpzyO6vhTOuqyS5cuJqGrWbOmGVUSGxtrRp+ozp07S7FixczULOqVV16R4cOHyzvvvGNuLbfXcciRI4d5RERESN++fWXMmDESHR1tEsNhw4aZES3+lR4AAAAAIPWQ7wEAAABA6nJcx5/eeq7zRmtyp0mdjtqMiYnxLNaui8p692zq7eRXr16Vhx56yOc4uqjsyJEjzc8DBw40yWTPnj3l0qVLUr9+fXNMp0+FAwAAAABuQr4HAAAAAGHW8ad0mpeEpnrRhdy9HT169IbH01Ggo0ePNg8AAAAAQPCQ7wEAAABA6nHWJKUAAAAAAAAAAAAAbgodfwAAAAAAAAAAAIAL0PEHAAAAAAAAAAAAuIAj1/gDAAAAAAAAACRN0xdXipOsGdYy2EUAgLDFHX8AAAAAAAAAgFS3YsUKKVu2rERHR8vcuXPjvd6rVy8pXLiwVK9e3Wd7p06dzPvuvPNOGTx4sM9rEydONK+VL19eJk+eHJJlAYCURMcfAAAAAAAAACBV/fXXX9KvXz/ZsGGD7Ny5UyZMmCDnz5+P16m2atWqeO/t3LmzHDhwwLzvyy+/NMdQ69atk40bN8revXtl37598sgjj4RcWQAgpdHxBwAAAAAAAABIVVu3bpUKFSpIsWLFJEeOHNK8eXP5+OOPffapV6+e5M+fP957mzVrJhEREZIxY0apUqWKnDhxwmx/4403zF13ul0VKlQo5MoCACmNjj8AAAAAAAAAQKo6efKk6Wiz6c92p1lSXb58WVauXCkNGzY0zw8dOmTutKtZs6Y0adJEDh48GHJlAYCURscfAAAAAAAAAMDRLMuSrl27ylNPPSUlSpTwTNkZGxtr7uDr37+/dOvWLezKAgD+6PgDAAAAAAAAAKSqokWL+txVpz/rtqR6/vnnJW/evKZTzftOvQceeMD83LRp0yTfZeeksgBASqPjDwAAAAAAAACQqnQKzL1795pOtt9++01Wr15tOsiSYtasWbJz506ZOXOmz/Y2bdrIJ598Yn7WO+1KliwZcmUBgJRGxx8AAAAAAAAAIFVlyJBBJk2aJI0aNZIqVaqYu+Xy588vLVq0MGvuKZ0+s06dOrJ7924pXry4LFu2zGx/5pln5OjRo1KjRg3z3vnz55vtPXr0kF27dsmdd94pvXv3ljlz5oRcWQAgpWVI8SMCAAAAAAAAAOBH74rTh7dVq1Z5fl6wYEHA9+n6eYFkzpxZlixZEvJlAYCUxB1/AAAAAAAAAAAAgAvQ8QcAAAAAAAAAAAC4AB1/AAAAAAAAAAAAgAvQ8QcAAAAAAAAAAAC4AB1/AAAAAAAAAAAAgAtkCHYBAAAAAAAAAAAuMvJ+cZScy8Uxyj4b7BIAcDnu+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAXo+AMAAAAAAAAAAABcgI4/AAAAAAAAAAAAwAUc2fE3Y8YMKVWqlGTJkkVq1aolW7duTXT/ZcuWSbly5cz+FStWlFWrVvm8blmWDB8+XCIjIyVr1qzSuHFjOXToUCp/CgAAAACAP/I9AAAAAAijjr8lS5ZIv379ZMSIEbJjxw6pXLmyNG3aVM6ePRtw/y+//FI6duwo3bt3l507d0q7du3MY+/evZ59xo8fL1OnTpVZs2bJli1bJHv27OaYf/75Zxp+MgAAAAAIb+R7AAAAABBmHX+vvvqqPP7449KtWzcpX768Sd6yZcsm8+bNC7j/a6+9Js2aNZPnnntO7rjjDnnxxRelatWqMn36dM/ozylTpsjQoUOlbdu2UqlSJVm4cKGcPHlSli9fnsafDgAAAADCF/keAAAAAKSuDOIgV69ele3bt8vgwYM929KlS2ematm8eXPA9+h2HTHqTUd32knekSNH5PTp0+YYtty5c5spZfS9Dz/8cLxjXrlyxTxsv/zyi/n30qVLEhcXJ05x/c9YcZJLcX+Jo2SMEKe4Hvu/88kJ9Fx2M2IjNOJCERvhGxuOigtFbIRtbDgpLhSxkThiI3G//vqrpzPMiZyS7ylyvuTj+pQ4rk/hGReK2Aid2HBzXChiI3Riw0lxoYiNMI4NB8WFIjZSLt9zVMffzz//LNevX5fChQv7bNfnBw4cCPgeTfIC7a/b7dftbQnt42/cuHEyatSoeNtLliyZzE8UXvIGuwCO9ro4SV6HlcftiI3EOOtcJDbSDnFxI846F4mNtENs3IizzkWnxsbly5dN55fTOCXfU+R8ycf16UacdT1w6vXJjYiNG3HOuUhcpC1iIzHOOheJjbRFbCTGWediXoeVJzn5nqM6/pxCR6B6jyrVEZ8XLlyQ/PnzS0SEs3rBkXDvd4kSJeTHH3+UXLlyBbs4gCMQF0BgxAYQGLERenTkpyaBRYsWDXZRHI+cL7RxfQICIzaAwIgNID7iwt35nqM6/goUKCDp06eXM2fO+GzX50WKFAn4Ht2e2P72v7otMjLSZ58qVaoEPGbmzJnNw1uePHlu8lMhmPSixYUL8EVcAIERG0BgxEZoceKdfk7L9xQ5nztwfQICIzaAwIgNID7iwp35XjpxkEyZMkm1atVk/fr1PiMv9XmdOnUCvke3e++v1q5d69k/KirKJIPe+2hv9pYtWxI8JgAAAAAgZZHvAQAAAEDqc9Qdf0qnW+nSpYtUr15datasKVOmTJHY2Fjp1q2beb1z585SrFgxsyaD6tOnjzRo0EAmTZokLVu2lHfffVe2bdsms2fPNq/rNC19+/aVMWPGSHR0tEkMhw0bZm6HbNeuXVA/KwAAAACEE/I9AAAAAAizjr8OHTrIuXPnZPjw4WYxdp2eJSYmxrNY+/HjxyVduv/dqFi3bl155513ZOjQoTJkyBCT7C1fvlzuvPNOzz4DBw40yWTPnj3l0qVLUr9+fXPMLFmyBOUzIvXptD0jRoyIN30PEM6ICyAwYgMIjNhAaiDfQ0rg+gQERmwAgREbQHzEhbtFWLoiIAAAAAAAAAAAAICQ5qg1/gAAAAAAAAAAAADcHDr+AAAAAAAAAAAAABeg4w8AAAAAAAAAAABwATr+AAAAAAAAAAAAABeg4w+us3nzZkmfPr20bNky2EUBHKFr164SERHheeTPn1+aNWsmu3fvDnbRgKA7ffq09O7dW0qXLi2ZM2eWEiVKSOvWrWX9+vXBLhoQ9DojY8aMUrhwYbnvvvtk3rx5EhcXF+ziAQD5HuCHfA9IHDkf8D/ke+GDjj+4zptvvmkq9E8//VROnjwZ7OIAjqCJ36lTp8xDG7cZMmSQVq1aBbtYQFAdPXpUqlWrJhs2bJAJEybInj17JCYmRho1aiS9evUKdvGAoNcZGiOrV682MdGnTx9Tb/z111/BLh6AMEe+B8RHvgcERs4HxEe+Fx4yBLsAQEr67bffZMmSJbJt2zYzomfBggUyZMiQYBcLCDod1VakSBHzs/47aNAg+fvf/y7nzp2TggULBrt4QFA8/fTTZpTb1q1bJXv27J7tFSpUkMceeyyoZQOcUmcUK1ZMqlatKrVr15Z7773XtK169OgR7CICCFPke0Bg5HtAYOR8QHzke+GBO/7gKkuXLpVy5cpJ2bJl5dFHHzW3KVuWFexiAY77wuTtt9+WMmXKmGlggHB04cIFM9JTR3l6J4C2PHnyBKVcgFPdc889UrlyZfnPf/4T7KIACGPke8CNke8B/0XOByQd+Z770PEH1037ogmgfdvyL7/8Ips2bQp2sYCgW7FiheTIkcM8cubMKR9++KEZLZ0uHdUAwtP3339vvijULw8BJI3Gi04HAwDBQr4HBEa+B8RHzgckD/meu9ACgGt899135tb9jh07muc6p32HDh1McgiEO52ve9euXeahcdK0aVNp3ry5HDt2LNhFA4KCuwOAm4sbnSoJAIKBfA9IGPkeEB85H5A85Hvuwhp/cA1N+HQB0qJFi/pcsHTe4unTp0vu3LmDWj4gmHRaC53qxTZ37lwTE3PmzJExY8YEtWxAMERHR5sG7YEDB4JdFCBk7N+/X6KiooJdDABhinwPSBj5HhAfOR+QPOR77sIdf3AFTQAXLlwokyZN8oxy08c333xjEsPFixcHu4iAo2jjV6d9+eOPP4JdFCAo8uXLZ0ZCz5gxQ2JjY+O9funSpaCUC3CqDRs2yJ49e+TBBx8MdlEAhCHyPSB5yPcAcj4gOcj33Ic7/uCa+ewvXrwo3bt3jzfSUy9YOjr0ySefDFr5gGC7cuWKnD592vyssaKjonXR99atWwe7aEDQaAJYr149qVmzpowePVoqVapkvlhcu3atzJw504x2A8K5zrh+/bqcOXNGYmJiZNy4cdKqVSvp3LlzsIsHIAyR7wGJI98DAiPnA+Ij3wsPdPzBFTTRa9y4ccDpXTQRHD9+vOzevdtU8EA40ko8MjLS/KyLveuCvcuWLZOGDRsGu2hA0JQuXVp27NghY8eOlf79+8upU6ekYMGCUq1aNZMEAuFeZ+j6WXnz5pXKlSvL1KlTpUuXLubuAQBIa+R7QOLI94DAyPmA+Mj3wkOExUqnAAAAAAAAAAAAQMijCxcAAAAAAAAAAABwATr+AAAAAAAAAAAAABeg4w8AAAAAAAAAAABwATr+AAAAAAAAAAAAABeg4w+JOn/+vBQqVEiOHj0a7KK43qxZs6R169bBLgaSgLhIO4MGDZLevXsHuxhIpVj55JNPJCIiQi5dumSeL1iwQPLkyZPGpXS+ffv2SfHixSU2NjbYRUEKITZSRkxMjFSpUkXi4uKCXRQgZNGuTTvke6GF2Eg75HyhjXZtyiDncx9iI2WQ8908Ov6QqLFjx0rbtm2lVKlSEgqOHTsmWbNmld9++02+/fZbefDBB03Z9UI6ZcqUgO+ZMWOG2SdLlixSq1Yt2bp1q8/rf/75p/Tq1Uvy588vOXLkMMc8c+aM53W9gOvxd+3aFe/YDRs2lL59+yap7I899pjs2LFDPvvss2R/bqStUI4LbUjo+er90HPfm2VZMnz4cImMjDTva9y4sRw6dMhnnwsXLsgjjzwiuXLlMg2T7t27m+Mn1IDxpn+3hOLR34ABA+Stt96Sw4cP3/TnR+jESocOHeTgwYOpVp6oqChZt26dua537dpVKlasKBkyZJB27doF3F/P46pVq0rmzJmlTJkyJn6SW4ckdL6PHDnSNF6Tonz58lK7dm159dVXk/xZ4WxOjQ27TeP/+Oqrr3z2X7ZsmZQrV86c9xpHq1atSnY9osddvnx5vLJobCYUk/6aNWsmGTNmlEWLFt3U5wYQ2u1a8j2kplCODXI+pCWntmvJ+RBsTo0Ncr7wQccfEvT777/Lm2++aRp3Tnbt2jXPzx988IE0atTIJGxa/tKlS8vLL78sRYoUCfjeJUuWSL9+/WTEiBEmCatcubI0bdpUzp4969nn2WeflY8++shc8DZt2iQnT56UBx54IMU/R6ZMmaRTp04yderUFD82Uk6ox4XSxO3UqVOehyaJ3saPH2/OQx2VvGXLFsmePbuJC2042zQB1C9b1q5dKytWrJBPP/1UevbsmeKfo0CBAuZ3z5w5M8WPDefFijYWdURcati9e7dcvHhRGjRoINevXze/6//+7/9M4zSQI0eOSMuWLU3s6Bd9+qVejx49ZM2aNcmqQ1JKt27dTBz89ddfKX5spC0nx4ZNE0LveqJatWqe17788kvp2LGjKf/OnTtNwqaPvXv3JqseSSmaNNJ2AsKzXUu+h9QS6rGhyPkQ7u1acj4Ek5Njw0bOFwYsIAHLli2zChYs6Hl+4cIFq1OnTlaBAgWsLFmyWGXKlLHmzZtnXtu4caOlp9PFixc9++/cudNsO3LkiHk+f/58K3fu3Nb7779v3ps5c2arSZMm1vHjx31+7/Lly6277rrLvB4VFWWNHDnSunbtmud1Pebrr79utW7d2sqWLZs1YsQIz2v33HOPNXPmzHifpWTJktbkyZPjba9Zs6bVq1cvz/Pr169bRYsWtcaNG2eeX7p0ycqYMaP5W9j2799vyrB582bzXD+fPtfP669BgwZWnz59fP5G/o8uXbp49t+0aZOVKVMm6/fff0/kfwbBFOpxYf++hMTFxVlFihSxJkyY4NmmcaC/d/Hixeb5vn37zO/7+uuvPfusXr3aioiIsE6cOJHgZw8Uj1qeQHHhXf633nrLKl68+A3+Z+D0WFErV660oqOjTaw0bNjQ8/9vnyf+5+f3339vtWnTxipUqJCVPXt2q3r16tbatWt9jnny5EmrRYsW5pilSpWyFi1aFPCaP3r0aKtDhw7xyqnX4LZt28bbPnDgQKtChQo+2/T9TZs2TXIdklj9o+d45cqVPc8DxYG+13blyhUTh+vWrYt3LIQWJ8dGYm0a2z/+8Q+rZcuWPttq1aplPfHEE0muR5T+Hq37EotJuzz+D21f2Y4dO2a26d8EQHi1a72R7yElhXpskPMhrTi5XeuNnA9pzcmxQc4XPrjjDwnSKUi8e/uHDRtm5pxevXq17N+/34xC0ZFZyR3xoLc6L1y4UL744gszJcTDDz/s8zs7d+4sffr0Mb/rjTfeMLfZ63v8b5e///77Zc+ePWbKFKXH+vzzz6VNmzZJKsvVq1dl+/btPiN/0qVLZ55v3rzZPNfXdRSd9z56m/Ntt93m2Sep6tat6zOSYsOGDeZ26bvvvtuzT/Xq1c3IHh0pAWdyQ1zo9CwlS5aUEiVKmGkHdBSn94i306dP+5zzuXPnNtNZ2Oe8/qtTvej5atP9NX6Se+7qVAbecbF48WIzDUe9evU8+9SsWVN++ukn1tcI8Vj58ccfzeh5XdtGR1PqSEpdzyMxeq62aNFC1q9fb0aZ6RQP+v7jx4979tHY0JH5OkXLe++9J7Nnzw44+vLDDz8053tS6XnuPzJUR6/ZcZCUOiQ5vOPg+++/N9PMeNcPepeAThPD9GChLxRiQ+sMHW1av35983pyYiMp9UhSaT3lHRv6WXUqPu/Y0DZZ4cKFiQ0gTNu1iSHfQzjHBjkf0kIotGsTQ86HcI4Ncj73yxDsAsC5dCqIokWLep7rheWuu+7yNPxuZq57TaqmT59uLgRK53G/4447zPzY2tAbNWqUufB16dLFvK5Tt7z44osycOBAc1u9TadI0Vvgvelcw5UqVfIpc2J+/vlnc+u/Xji86fMDBw6Yn/UiphWv/+Kquo++5p/oaQPA2x9//OGZz1uPY09Bowu86kVeG+p2Y11ly5bNXCj9p+GAc4R6XJQtW1bmzZtntv3yyy8yceJEc+5qIqgLSdvndaC4sF/Tf/2nH9DELV++fPHiQo8ZKOn1nspAH+qHH34w66u89NJLct9993n2scuuf/tQWWMD8WNFvyD529/+JpMmTfKci/qFxSuvvJLgMXQaFX3Y9Lx///33TaP0mWeeMddqnZ7i66+/9sTg3LlzJTo62uc4J06cMFNbNG/ePMnl13M5UBz8+uuv5tqu02TcqA6xPf/88zJ06FCfbZpE6joONrt+0EFxuraQ1gX6hY83/XtSP4Q+J8eGTg+m5dAv4rRNo8mjTumi6zLYXyYmFBvedYS9LaF9bDp9TPr06X22XblyxUy5pPQ1OzZ0yhgtS506dcyXnt6IDSA827U3Qr6HcI0Ncj6kFSe3a5OCnA/hGBvkfOGDjj8kSCs57wWgn3rqKVMx6ZzWTZo0MYGojcfk0IZijRo1fEZTapKlo+a0sfvNN9+Y0W/eo9q0ktXA14ajJkrKe9SZ95z2SR39mRp03m9tuHvTOfEDNfj176ij71577bV4r2uD2LuRDGcJ9bjQylMfNi2rnrfa2NRGRErT0Tg5c+b02dawYcN4+2lC2qpVK1PxP/fccz6v2UkicRHasaLns/1Fh837XExohJs29lauXGlGfukIeT2uPcLtu+++M/Gji7HbdNRk3rx5fY6jDWMdxeb/pV5a0XNa56T3pvPT6zop/oYMGWJGyG3bts1z7tuoH9zBybGhdy/oGiY2rZt0BOmECRNSpY01efLkeCNJ9UsTreP86Rfnly9fNusM+X/xTmwA4dmuTWvke+Ej1GODnA9pxcnt2rRGzodQiQ1yvvBBxx8SpBcCHd1i05EB2rOuo8k0AO+9914zUktHj9nB+N/pe+MvNJ1UelHTkW6BFlP3vmDqgqH+o2hiYmJM5Zmcz6ejCs6cOeOzXZ/bIw30Xz22Tp3h3Xjw3sf71mS9wHrzr8DtpEFv8daRfXqB9nfhwgUpWLBgkj8H0pbb4iJjxoxm9KpOM6Hs81rP8cjISM9++twezaz7+E8doA0QPXf94yIqKipew9v/vNfKXqd/0QXodVoCf3pcRVyEdqzcjAEDBpi40njS66teUx966CFzbieHNnST24DVczlQ/aDnqZZD648b1SHefwv/+kFHS/t7++23TaNYp+koVqxYwFjQUYIIbaEWG5qg6u+6UWx4t51uVI94H8s/NvSLQ213eRszZoysWbPGtJ38v1hUtJ2Am+O2dm2gz0e+h5vhttgg50NqCbV2rT9yPqSWUIsNcj53Yo0/JEgbhjq3vDcNMJ16QiuqKVOmeBpsduDpCASbzlnsTxuKOqLFpqMTNNDtkZM6SkG36QXB/+Hf0+9NK0wd0eB9C/SN6FQsOt+yzpVsi4uLM8/tURf6ujaSvffR8unoihuNzAjk1VdflaVLl5oReTpfsT+d9kJH9OnfHs7ktrjQBEynF7Arak3atFL2Pud1mgtdx8E+5/VfLZ/OdW/TNUw0fvxHMCXFs88+a8qg0wp4J7W2vXv3mjisUKFCso8N58SKPZWRt6+++irRY+ioZx01qeuYVKxY0Zyb3ut+6PQYGj86B7xNv9DwbmDrlygbN25M1loP9nnuHQdKG8J2HCSlDkkOHfGpU4LpSOzatWsH3Edjgfoh9IVabGi95Z3M3Sg2klKPJIdOPTN69GjTfgr0JYi2m7T9RGwAyee2dq0/8j3cLLfFBjkfUkuotWv9kfMhtYRabJDzuZQFJGD37t1WhgwZrAsXLpjnw4YNs5YvX24dOnTI2rt3r9WqVSurZs2a5rWrV69aJUqUsNq3b28dPHjQWrFihVW2bFkd8mYdOXLE7DN//nwrY8aM5j1fffWVtW3bNqt27drmYYuJiTG/c+TIkeZ37Nu3z1q8eLH1wgsvePbRY77//vs+Ze3Vq5fVu3dvn21Xrlyxdu7caR6RkZHWgAEDzM9aftu7775rZc6c2VqwYIH5XT179rTy5MljnT592rPPk08+ad12223Whg0bTJnr1KljHjb9fFomPba/Bg0aWH369DE/r1271kqfPr01a9Ys69SpU57HpUuXPPvr36h06dI39f+FtBHqcTFq1ChrzZo11g8//GBt377devjhh60sWbJY3377rWefl19+2cTBBx98YD5v27ZtraioKOuPP/7w7NOsWTPrrrvusrZs2WJ9/vnnVnR0tNWxY0fP6xs3bjRlunjxYry/YcmSJa3Jkyebn+fNm2fi4sMPP/SJi8uXL3v2HzFihHXPPffc1P8XnBMrx44dszJlymSuxQcOHLAWLVpkFSlSxOc80XjInTu35xj333+/VaVKFXN93bVrl9W6dWsrZ86cnuuqaty4sVW1alVzLu7YscNq1KiRlTVrVmvKlCnm9WXLllkVK1aMVz495/W4esyGDRt66gvb4cOHrWzZslnPPfectX//fmvGjBnmXNV4TE4d4n2+e9PzunLlyuZnPecLFy5sdenSxScOzp4969lfrxkRERHW0aNHb+F/BU7g5NjQc/mdd94x57w+xo4da6VLl85cq21ffPGFKf/EiRPNPnouaz22Z8+eZNUjgeotpXGg+ys9psbh0KFDfWLj/PnzPvVNjhw5rNjY2Fv8nwHCT6i3a8n3kFpCPTbI+ZBWnNyuVeR8CBYnxwY5X/ig4w+J0oapJi7qxRdftO644w5zAcmXL58JUK0kbdoQ1IuJNij//ve/m4uLf2NXL2DvvfeeSXa04tQLlF78vGkFW7duXfN7cuXKZcowe/bsRC8a2tDWRMubnaD5PzQ58zZt2jST6OkF2G6Ie9ML1tNPP23lzZvXXIj0wqsXoOQmgnqRDFQevdjZmjRpYo0bNy5J/zcInlCOi759+3rOd210tmjRwjQOvMXFxZnkVl/X8tx7773Wd99957OPVsCa9GnFq+Xp1q2bT+KW1CRQz/9AcaHxYtPEWZNehHasqI8++sgqU6aMOa80HrRhmVhDV+PEbrjq+Tx9+nSf66o6efKk1bx5c3NMPbe0AVuoUCHP73300Ud9vjCx6b6Bzj1veh5rQ1vjReNTy+fvRnVIUpJAO178H/pe20svvWQ1bdo0iX95OJ1TY0OTQK3TtL1j1zVab/lbunSpdfvtt5vzvkKFCtbKlSuTXY8kJQnUz32jtpx++fLEE08k468PwC3tWvI9pKZQjg1yPqQlp7ZrFTkfgsmpsUHOFz7o+EOidLSaXgyuX79+y8fyv4ClFB3BpsfVkXahTEf16cXZe0QonIm4SDurVq0yf+tr164FuygIcqwk1Y8//mgaievWrTPnjX45o6PfQpne0aCJpn6pBHcgNlLGuXPnzOfw/vIVQPLQrk075HuhhdhIO+R8oY12bcog53MfYiNlkPPdvPgrTQNeWrZsKYcOHZITJ06YxcydSOcznjZtmpkPPpTpmgALFy6U3LlzB7souAHiIu3ExsbK/Pnz4y0Oj9CQFrGia43ovPU6571eRwcOHCilSpWSu+++2yz+rOuJ1KhRQ0KZrjM0ZMgQqVevXrCLghRCbKQMXePi9ddfN2tMALg5tGvTDvleaCE20g45X2ijXZsyyPnch9hIGeR8Ny9Ce/9u4f1Aki1YsED69u1rFogG8F/EBXBr1qxZI/3795fDhw9Lzpw5pW7dujJlyhQpWbJksIsGBBWxASCt0a4FAiM2gFtDuxYIjNhAYuj4AwAAAAAAAAAAAFwgXbALAAAAAAAAAAAAAODW0fEHAAAAAAAAAAAAuAAdfwAAAAAAAAAAAIAL0PEHAAAAAAAAAAAAuAAdfwAAAAAAAAAAAIAL0PEHAAAAAAAAAAAAuAAdfwAAAAAAAAAAAIAL0PEHAAAAAAAAAAAAuAAdfwAAAAAAAAAAAICEvv8HX4JyRB+Kj5MAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Figure saved to ptbxl_model_comparison_ablation.png\n" + ] + } + ], + "source": [ + "_label_map = {\n", + " 'A -- superdiagnostic / 100 Hz (baseline)': 'A\\n(super/100Hz)',\n", + " 'B -- superdiagnostic / 500 Hz': 'B\\n(super/500Hz)',\n", + " 'C -- diagnostic (27-class) / 100 Hz': 'C\\n(diag/100Hz)',\n", + " 'D -- diagnostic (27-class) / 500 Hz': 'D\\n(diag/500Hz)',\n", + "}\n", + "configs = [c['name'] for c in ABLATION_CONFIGS]\n", + "short_labels = [_label_map[c] for c in configs]\n", + "model_names = [m['name'] for m in MODELS]\n", + "colors = ['steelblue', 'coral', 'darkorange', 'mediumseagreen']\n", + "\n", + "x = np.arange(len(short_labels))\n", + "n_models = len(model_names)\n", + "width = 0.18\n", + "offsets = np.linspace(-(n_models - 1) * width / 2,\n", + " (n_models - 1) * width / 2,\n", + " n_models)\n", + "\n", + "fig, axes = plt.subplots(1, 2, figsize=(18, 5))\n", + "\n", + "for ax_idx, (metric, metric_label) in enumerate([\n", + " ('roc_auc_macro', 'ROC-AUC (macro)'),\n", + " ('f1_macro', 'F1 (macro)'),\n", + "]):\n", + " ax = axes[ax_idx]\n", + " for m_idx, mname in enumerate(model_names):\n", + " vals = [\n", + " results_df[\n", + " (results_df['model'] == mname) &\n", + " (results_df['config'] == cfg)\n", + " ][metric].values[0]\n", + " for cfg in configs\n", + " ]\n", + " bars = ax.bar(x + offsets[m_idx], vals, width,\n", + " label=mname, color=colors[m_idx])\n", + " ax.bar_label(bars, fmt='%.3f', padding=3, fontsize=7)\n", + "\n", + " ax.set_xticks(x)\n", + " ax.set_xticklabels(short_labels, fontsize=10)\n", + " ax.set_ylim(0, 1.15)\n", + " ax.yaxis.set_major_formatter(mticker.FormatStrFormatter('%.2f'))\n", + " ax.set_ylabel('Score', fontsize=12)\n", + " ax.set_title(f'PTB-XL Ablation — {metric_label}', fontsize=12)\n", + " ax.legend(fontsize=9)\n", + " ax.grid(axis='y', alpha=0.3)\n", + "\n", + "plt.suptitle('ResNet-18 vs SE-ResNet-50 vs Lambda-ResNet-18 vs BiLSTM: Model × Task Ablation on PTB-XL',\n", + " fontsize=12, y=1.02)\n", + "plt.tight_layout()\n", + "plt.savefig('ptbxl_model_comparison_ablation.png', dpi=150, bbox_inches='tight')\n", + "plt.show()\n", + "print('Figure saved to ptbxl_model_comparison_ablation.png')\n" + ] + }, + { + "cell_type": "markdown", + "id": "fea96a63", + "metadata": {}, + "source": [ + "## 9. Analysis & Findings\n", + "\n", + "### Effect of Label Granularity\n", + "\n", + "Comparing configs **A vs C** (both at 100 Hz): moving from the 5-class\n", + "**superdiagnostic** vocabulary to the 27-class **diagnostic** vocabulary\n", + "increases classification difficulty because:\n", + "\n", + "* Rare classes have far fewer positive examples, making gradient updates noisy.\n", + "* The larger output head must learn $K = 27$ independent sigmoid thresholds.\n", + "* Macro averaging penalises poor performance on rare labels equally.\n", + "\n", + "Formally, the expected macro-AUC satisfies\n", + "$$\\overline{\\text{AUC}}_{27} \\leq \\overline{\\text{AUC}}_{5}$$\n", + "when the 27-class problem is strictly harder per class.\n", + "\n", + "### Effect of Sampling Rate\n", + "\n", + "Comparing configs **A vs B** (both superdiagnostic): at 500 Hz ($T = 5000$)\n", + "the model receives 5× more temporal resolution per lead. This allows the\n", + "model to detect high-frequency features (notches, fragmented QRS) that are\n", + "aliased away at 100 Hz. However:\n", + "\n", + "* Input size grows by 5×, substantially increasing memory and training time.\n", + "* SE-ResNet-50's strided convolutions progressively downsample, scaling the\n", + " effective receptive field with $T$; BiLSTM processes all time-steps sequentially\n", + " so benefits more directly from longer inputs.\n", + "\n", + "### Effect of Model Architecture\n", + "\n", + "Comparing all four architectures across the same configs:\n", + "\n", + "* **ResNet-18** (control) — a plain 1-D residual network without attention.\n", + " Provides the baseline CNN performance against which attention-augmented\n", + " variants are measured.\n", + "\n", + "* **SE-ResNet-50** — augments bottleneck blocks with Squeeze-Excitation\n", + " channel attention:\n", + " $$\\tilde{x}_c = \\sigma\\!\\left(W_2\\,\\delta\\!\\left(W_1\\,z_c\\right)\\right) \\cdot x_c$$\n", + " where $z_c$ is the global average-pooled channel descriptor and $\\delta$ is ReLU.\n", + " Expected to outperform ResNet-18 by recalibrating channel responses to\n", + " emphasise diagnostically relevant waveform features.\n", + "\n", + "* **Lambda-ResNet-18** — replaces SE modules with Lambda layers that compute\n", + " both *content-based* and *position-based* linear attention without explicit\n", + " softmax:\n", + " $$\\lambda_n = \\sum_m \\text{softmax}(k_m) \\cdot (v_m \\odot e_{n-m})$$\n", + " This captures long-range context more efficiently than convolution while\n", + " remaining computationally lighter than full self-attention.\n", + "\n", + "* **BiLSTM** — a single bidirectional LSTM layer with $H = 64$ hidden units\n", + " ($\\text{lstm\\_d1\\_h64}$ from Nonaka & Seita 2021), processing the full sequence\n", + " left-to-right and right-to-left and taking the last hidden state:\n", + " $$h_T = [\\overrightarrow{h}_T ; \\overleftarrow{h}_1] \\in \\mathbb{R}^{2H}$$\n", + " Captures long-range temporal dependencies but may under-perform CNN variants\n", + " on localised morphological patterns.\n", + "\n", + "### Trade-off Summary\n", + "\n", + "| Factor | Expected ranking |\n", + "|--------|----------------|\n", + "| Fewer classes (5 vs 27) | All models easier; relative ranking preserved |\n", + "| Higher sampling rate (500 Hz) | All models improve; BiLSTM benefits proportionally more |\n", + "| Architecture (no attention) | ResNet-18 < SE-ResNet-50 ≈ Lambda-ResNet-18 |\n", + "| Architecture (CNN vs RNN) | CNN variants expected to outperform BiLSTM on morphological tasks |\n", + "\n", + "These findings closely mirror the comprehensive benchmarks in Strodthoff *et al.*\n", + "(2021) and Nonaka & Seita (2021), where CNN-based models (and attention-augmented\n", + "variants) generally outperform RNN baselines on PTB-XL when trained sufficiently long." + ] + }, + { + "cell_type": "markdown", + "id": "464a7b21", + "metadata": {}, + "source": [ + "## 10. References\n", + "\n", + "1. Wagner, P. *et al.* (2020). PTB-XL, a large publicly available electrocardiography dataset.\n", + " *Scientific Data* 7, 154. https://doi.org/10.1038/s41597-020-0495-6\n", + "\n", + "2. Reyna, M.A. *et al.* (2020). Will Two Do? Varying Dimensions in Electrocardiography:\n", + " The PhysioNet/Computing in Cardiology Challenge 2020. *CinC 2020*.\n", + "\n", + "3. Strodthoff, N. *et al.* (2021). Deep Learning for ECG Analysis: Benchmarks and Insights\n", + " from PTB-XL. *IEEE JBHI* 25, 1519–1528.\n", + "\n", + "4. Nonaka, N. & Seita, J. (2021). In-depth Benchmarking of Deep Neural Network Architectures\n", + " for ECG Diagnosis. *Machine Learning for Healthcare (MLHC) 2021*.\n", + "\n", + "5. Hu, J. *et al.* (2018). Squeeze-and-Excitation Networks. *CVPR 2018*.\n", + "\n", + "6. Zhao, M. *et al.* (2024). PyHealth: A Deep Learning Toolkit for Healthcare Predictive\n", + " Modeling. *arXiv:2401.06284*." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pyhealth/datasets/__init__.py b/pyhealth/datasets/__init__.py index 54e77670c..b4cd3c659 100644 --- a/pyhealth/datasets/__init__.py +++ b/pyhealth/datasets/__init__.py @@ -61,6 +61,7 @@ def __init__(self, *args, **kwargs): from .mimic4 import MIMIC4CXRDataset, MIMIC4Dataset, MIMIC4EHRDataset, MIMIC4NoteDataset from .mimicextract import MIMICExtractDataset from .omop import OMOPDataset +from .ptbxl import PTBXLDataset from .sample_dataset import SampleBuilder, SampleDataset, create_sample_dataset from .shhs import SHHSDataset from .sleepedf import SleepEDFDataset diff --git a/pyhealth/datasets/ptbxl.py b/pyhealth/datasets/ptbxl.py new file mode 100644 index 000000000..e1893f57c --- /dev/null +++ b/pyhealth/datasets/ptbxl.py @@ -0,0 +1,298 @@ +""" +PyHealth dataset for the PTBXL dataset. + +Data links: + .hea / .mat files: https://www.kaggle.com/datasets/physionet/ptbxl-electrocardiography-database + .csv: https://physionet.org/content/ptb-xl/1.0.1/ptbxl_database.csv + Note that to run this properly the .csv needs to be in the same folder as the dataset + +Dataset paper: + Wagner, P., Strodthoff, N., Bousseljot, R., Samek, W., and Schaeffter, T. (2020) + 'PTB-XL, a large publicly available electrocardiography dataset' (version 1.0.1), PhysioNet. + RRID:SCR_007345. Available at: https://doi.org/10.13026/x4td-x982 + +Dataset paper link: + https://physionet.org/content/ptb-xl/1.0.1/ + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" +import logging +import pandas as pd +import dask.dataframe as dd +from pathlib import Path +from typing import Optional + +from .base_dataset import BaseDataset +from pyhealth.tasks import PTBXLMultilabelClassification + +logger = logging.getLogger(__name__) + +"""Full list of possible diagnoses for the PTB-XL dataset provided here: https://github.com/physionetchallenges/physionetchallenges.github.io/blob/master/2020/Dx_map.csv +Not all codes are present in the data but they are included for completeness, as referenced in the Data Description section here: https://physionet.org/content/challenge-2020/1.0.2/ +""" +SNOMED_CT_ABBREVIATION = { + "270492004": "IAVB", + "195042002": "IIAVB", + "164951009": "abQRS", + "426664006": "AJR", + "57054005": "AMI", + "413444003": "AMIs", + "426434006": "AnMIs", + "54329005": "AnMI", + "251173003": "AB", + "164889003": "AF", + "195080001": "AFAFL", + "164890007": "AFL", + "195126007": "AH", + "251268003": "AP", + "713422000": "ATach", + "29320008": "AVJR", + "233917008": "AVB", + "251170000": "BPAC", + "74615001": "BTS", + "426627000": "Brady", + "6374002": "BBB", + "698247007": "CD", + "426749004": "CAF", + "413844008": "CMI", + "27885002": "CHB", + "713427006": "CRBBB", + "204384007": "CIAHB", + "53741008": "CHD", + "77867006": "SQT", + "82226007": "DIB", + "428417006": "ERe", + "13640000": "FB", + "84114007": "HF", + "368009": "HVD", + "251259000": "HTV", + "49260003": "IR", + "251120003": "ILBBB", + "713426002": "IRBBB", + "251200008": "ICA", + "425419005": "IIs", + "704997005": "ISTD", + "426995002": "JE", + "251164006": "JPC", + "426648003": "JTach", + "425623009": "LIs", + "445118002": "LAnFB", + "253352002": "LAA", + "67741000119109": "LAE", + "446813000": "LAH", + "39732003": "LAD", + "164909002": "LBBB", + "445211001": "LPFB", + "164873001": "LVH", + "370365005": "LVS", + "251146004": "LQRSV", + "54016002": "MoI", + "164865005": "MI", + "164861001": "MIs", + "698252002": "NSIVCB", + "428750005": "NSSTTA", + "164867002": "OldMI", + "10370003": "PR", + "251182009": "VPVC", + "282825002": "PAF", + "67198005": "PSVT", + "425856008": "PVT", + "284470004": "PAC", + "427172004": "PVC", + "17338001": "VPB", + "164947007": "LPR", + "111975006": "LQT", + "164917005": "QAb", + "164921003": "RAb", + "314208002": "RAF", + "253339007": "RAAb", + "446358003": "RAH", + "47665007": "RAD", + "59118001": "RBBB", + "89792004": "RVH", + "55930002": "STC", + "49578007": "SPRI", + "65778007": "SAB", + "427393009": "SA", + "426177001": "SB", + "60423000": "SND", + "426783006": "NSR", + "427084000": "STach", + "429622005": "STD", + "164931005": "STE", + "164930006": "STIAb", + "251168009": "SVB", + "63593006": "SVPB", + "426761007": "SVT", + "251139008": "ALR", + "164934002": "TAb", + "59931005": "TInv", + "266257000": "TIA", + "164937009": "UAb", + "11157007": "VBig", + "164884008": "VEB", + "75532003": "VEsB", + "81898007": "VEsR", + "164896001": "VF", + "111288001": "VFL", + "266249003": "VH", + "251266004": "VPP", + "195060002": "VPEx", + "164895002": "VTach", + "251180001": "VTrig", + "195101003": "WAP", + "74390002": "WPW" +} + +class PTBXLDataset(BaseDataset): + """Base dataset for the PTB-XL ECG dataset + + PTB-XL is a publically available electrocardiography dataset. Contains 21837 samples from 18885 patients, all approximately 10 seconds in duration. + + Dataset is available here: https://www.kaggle.com/datasets/physionet/ptbxl-electrocardiography-database + File with train / test splits available here: https://physionet.org/content/ptb-xl/1.0.1/ptbxl_database.csv + + Files in the dataset are in the format HR00001.mat / HR00001.hea. The .hea files contain patient data including age, sex, and diagnosis codes. + The .mat files contain the ECG signal data of shape (12, 5000), mapping to 10 seconds of data sampled at 500Hz for the 12 ECG leads. + The associated .csv file must be in the same directory as the .hea / .mat files. + + Args: + root (str): Root directory of the raw data (.mat files, .hea files, .csv file). + dataset_name (str): Name of the dataset, PTBXL by default. + + Attributes: + root (str): Root directory of the raw data (.mat files, .hea files, .csv file). + dataset_name (str): Name of the dataset, PTBXL by default. + tables (List[str]): Name of the data table(s), PTBXL by default. + CLASSES (List[str]): Constant list of available diagnoses in the dataset as SNOMED CT abbreviations. + default_task (PTBXLMultilabelClassification): Default task for this dataset. + + Examples: + >>> dataset = PTBXLDataset(root="./data") + """ + + CLASSES = list(SNOMED_CT_ABBREVIATION.values()) + + def __init__( + self, + root: str, + dataset_name: Optional[str] = None, + **kwargs, + ) -> None: + super().__init__( + root=root, + tables=["ptbxl"], + dataset_name=dataset_name or "ptbxl", + config_path=None, + **kwargs, + ) + + def load_data(self) -> dd.DataFrame: + """Returns a dataframe with each individual row corresponding to each .hea/.mat file combination in the PTB-XL dataset. + Uses the stratified fold assignments from ptbxl_database.csv - 1 through 8 for train, 9 for validation, 10 for test. + + Returns: + dd.DataFrame: Dataframe with one row per record with the following columns: + patient_id (str): .hea file identifier starting with HR + event_type (str): "ptbxl" (only one event type in the dataset) + timestamp (NaT): pd.NaT (no timestamps available in the dataset) + ptbxl/mat (str): Path to the associated .mat file + ptbxl/age (str): Patient age + ptbxl/sex (str): Patient sex + ptbxl/dx_codes (str): Patient SNOMED CT diagnosis codes + ptbxl/dx_abbreviations (str): Patient SNOMED CT diagnosis abbreviations + ptbxl/split": Stratified fold assignment "train" / "test" / "val" + + Raises: + FileNotFoundError: If no .hea files are found in root. + FileNotFoundError: If ptbxl_database.csv is not found in root + """ + root_path = Path(self.root) + # Collect .hea files: first try flat layout (files directly in root), + # then fall back to g1/…g22/ sub-directory layout. + files = sorted(root_path.glob("*.hea")) + if not files: + for subdir in sorted(root_path.iterdir()): + if subdir.is_dir() and subdir.name.startswith('g'): + files.extend(sorted(subdir.glob("*.hea"))) + + # Check existence of required .hea files + if not files: + raise FileNotFoundError(f"No .hea files found in {self.root}. Are you sure you have the right directory?") + logger.info(f"Found {len(files)} .hea files") + + # Check existence of required .csv file (for the train/test/val splits) + if not (root_path / "ptbxl_database.csv").exists(): + raise FileNotFoundError(f"No ptbxl_database.csv file found in {self.root}. Does it exist in this directory?") + db = pd.read_csv(root_path / "ptbxl_database.csv", index_col="ecg_id") + + rows = [] + for hea_file in files: + age = None + sex = None + dx = [] + + ecg_id = int(hea_file.stem.replace("HR", "")) + if ecg_id not in db.index: + continue + + with open(hea_file, "r") as f: + for line in f: + line = line.strip() + + # Parse individual lines + if line.startswith("#Age:") or line.startswith("# Age:"): + try: + age = int(line.split(":")[1].strip()) + except ValueError: + age = None + elif line.startswith("#Sex:") or line.startswith("# Sex:"): + sex = line.split(":")[1].strip() + elif line.startswith("#Dx:") or line.startswith("# Dx:"): + dx = [x.strip() for x in line.split(":")[1].split(",")] + + # Map diagnosis codes to the abbreviations (may need them for tasks) + dx_abbreviations = [SNOMED_CT_ABBREVIATION[x] for x in dx if x in SNOMED_CT_ABBREVIATION] + + # Train / test / validation splits using the strat_fold column in ptbxl_database.csv + ecg_id = int(hea_file.stem.replace("HR","")) + if ecg_id not in db.index: + logger.debug(f"Skipping {hea_file.name}: ecg_id {ecg_id} not in ptbxl_database.csv") + continue + strat_fold = db.loc[ecg_id, "strat_fold"] + if strat_fold <= 8: + split = "train" + elif strat_fold == 9: + split = "val" + else: + split = "test" + + # Append required data to the list + rows.append({ + "patient_id": hea_file.stem, + "event_type": "ptbxl", + "timestamp": pd.NaT, + "ptbxl/mat": str(hea_file.with_suffix(".mat")), + "ptbxl/age": age, + "ptbxl/sex": sex, + "ptbxl/dx_codes": ",".join(dx), + "ptbxl/dx_abbreviations": ",".join(dx_abbreviations), + "ptbxl/split": split + }) + + df = pd.DataFrame(rows) + + logger.info(f"Parsed {len(df)} records.") + return dd.from_pandas(df, npartitions=1) + + @property + def default_task(self) -> PTBXLMultilabelClassification: + """Returns the default task for the PTBXL dataset: PTBXLMultilabelClassification. + + Returns: + PTBXLMultilabelClassification: The default task instance created with the default label type and sampling rate. + """ + return PTBXLMultilabelClassification() \ No newline at end of file diff --git a/pyhealth/models/__init__.py b/pyhealth/models/__init__.py index 5233b1726..ef012bfd1 100644 --- a/pyhealth/models/__init__.py +++ b/pyhealth/models/__init__.py @@ -2,6 +2,7 @@ from .agent import Agent, AgentLayer from .base_model import BaseModel from .biot import BIOT +from .bilstm_ecg import BiLSTMECG from .cnn import CNN, CNNLayer from .concare import ConCare, ConCareLayer from .contrawr import ContraWR, ResBlock2D @@ -9,6 +10,7 @@ from .embedding import EmbeddingModel from .gamenet import GAMENet, GAMENetLayer from .jamba_ehr import JambaEHR, JambaLayer +from .lambda_resnet import LambdaConv1d, LambdaBottleneck1d, LambdaResNet1d, LambdaResNet18ECG from .logistic_regression import LogisticRegression from .gan import GAN from .gnn import GAT, GCN @@ -19,9 +21,12 @@ from .micron import MICRON, MICRONLayer from .mlp import MLP from .molerec import MoleRec, MoleRecLayer +from .resnet_ecg_base import BasicBlock1d, Bottleneck1d, ResNet1d, ECGBackboneModel +from .resnet import ResNet18ECG from .retain import MultimodalRETAIN, RETAIN, RETAINLayer from .rnn import MultimodalRNN, RNN, RNNLayer from .safedrug import SafeDrug, SafeDrugLayer +from .se_resnet import SEModule1d, SEResNetBottleneck1d, SEResNet50ECG from .sparcnet import DenseBlock, DenseLayer, SparcNet, TransitionLayer from .stagenet import StageNet, StageNetLayer from .stagenet_mha import StageAttentionNet, StageNetAttentionLayer diff --git a/pyhealth/models/bilstm_ecg.py b/pyhealth/models/bilstm_ecg.py new file mode 100644 index 000000000..232f894c7 --- /dev/null +++ b/pyhealth/models/bilstm_ecg.py @@ -0,0 +1,189 @@ +""" +Bidirectional LSTM for 12-lead ECG multi-label classification. + +This module provides :class:`BiLSTMECG`, a :class:`~pyhealth.models.BaseModel` +subclass implementing the Bidirectional LSTM architecture benchmarked in: + + Nonaka, N. & Seita, J. (2021). *In-depth Benchmarking of Deep Neural + Network Architectures for ECG Diagnosis.* Proceedings of Machine Learning + Research 126:1–19, MLHC 2021. + +The paper's best-performing LSTM variant (``lstm_d1_h64``) uses a **single +bidirectional LSTM layer** with ``hidden_size=64``, producing 128-dimensional +hidden states that are projected to the output head. This implementation +follows the same design but exposes ``hidden_size`` and ``n_layers`` as +constructor arguments so it can be used as a drop-in replacement across the +full ablation grid. + +Mathematical framing +-------------------- +Given an ECG tensor :math:`X \\in \\mathbb{R}^{B \\times C \\times T}` (batch +size :math:`B`, :math:`C=12` leads, :math:`T` time-steps), the model: + +1. Permutes to :math:`(B, T, C)` for sequence-first processing. +2. Passes through a bidirectional LSTM: + + .. math:: + + h_t = \\text{BiLSTM}(x_t, h_{t-1})\\quad h_t \\in \\mathbb{R}^{B \\times 2H} + +3. Takes the **last** time-step output :math:`h_T \\in \\mathbb{R}^{B \\times 2H}`. +4. Projects with a linear head :math:`W \\in \\mathbb{R}^{2H \\times K}` to produce + logits for :math:`K` classes. +5. Optimises with **binary cross-entropy with logits** (multi-label): + + .. math:: + + \\mathcal{L} = -\\frac{1}{K}\\sum_{k=1}^{K} + \\bigl[y_k \\log \\sigma(\\hat{y}_k) + + (1-y_k)\\log(1-\\sigma(\\hat{y}_k))\\bigr] + +Paper alignment +--------------- ++------------+----------------------------+---------------------------+ +| Paper name | Paper setting | Default in this class | ++============+============================+===========================+ +| lstm_d1_h64| 1 layer, hidden=64 | n_layers=1, hidden_size=64| ++------------+----------------------------+---------------------------+ +| lstm_d3_h128| 3 layers, hidden=128 | n_layers=3, hidden_size=128| ++------------+----------------------------+---------------------------+ + +Signal format expected +---------------------- +``feature_keys=["signal"]`` → each batch element is a ``np.ndarray`` of shape +``(12, T)`` loaded by ``SampleSignalDataset`` from a ``.pkl`` file. :math:`T` +is typically 1000 at 100 Hz or 5000 at 500 Hz. + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" + +from typing import List, Optional + +import numpy as np +import torch +import torch.nn as nn + +from pyhealth.models import BaseModel + + +class BiLSTMECG(BaseModel): + """Bidirectional LSTM ECG classifier. + + Extends :class:`~pyhealth.models.BaseModel` so it integrates seamlessly + with :class:`~pyhealth.trainer.Trainer`, :class:`~pyhealth.datasets.PTBXLDataset`, + and :func:`~pyhealth.metrics.multilabel_metrics_fn`. + + Args: + dataset: A PyHealth ``SampleSignalDataset`` (or ``Subset``) that + exposes ``input_info`` with ``"signal"`` → ``{"n_channels": 12}``. + feature_keys (List[str]): Must be ``["signal"]``. + label_key (str): Key in the sample dict that holds the label list. + Use ``"labels"`` to match ``PTBXLMultilabelClassification`` output. + mode (str): ``"multilabel"`` applies ``BCEWithLogitsLoss``; other modes + are passed through to :class:`~pyhealth.models.BaseModel`. + hidden_size (int): LSTM hidden dimension *per direction*. The + bidirectional output is ``2 × hidden_size``. Paper best variant + uses ``hidden_size=64`` (1 layer). Defaults to ``64``. + n_layers (int): Number of stacked LSTM layers. Paper uses ``1``. + Defaults to ``1``. + dropout (float): Dropout probability applied between LSTM layers + when ``n_layers > 1``. Defaults to ``0.2``. + + Examples: + Paper-aligned variant (lstm_d1_h64):: + + >>> from pyhealth.models import BiLSTMECG + >>> model = BiLSTMECG( + ... dataset=sample_dataset, + ... feature_keys=["signal"], + ... label_key="labels", + ... mode="multilabel", + ... hidden_size=64, + ... n_layers=1, + ... ) + + Deeper variant used in ablation grid:: + + >>> model_deep = BiLSTMECG( + ... dataset=sample_dataset, + ... feature_keys=["signal"], + ... label_key="labels", + ... mode="multilabel", + ... hidden_size=128, + ... n_layers=3, + ... ) + """ + + def __init__( + self, + dataset, + feature_keys: List[str], + label_key: str, + mode: str, + hidden_size: int = 64, + n_layers: int = 1, + dropout: float = 0.2, + **kwargs, + ): + super().__init__(dataset=dataset) + self.feature_key = feature_keys[0] + self.label_key = label_key + self.mode = mode + + # PTB-XL always has 12 leads; match the hard-coded default in ResNet18ECG + in_channels: int = 12 + + output_size: int = self.get_output_size() + + # ── Bidirectional LSTM ──────────────────────────────────────────────── + # Input: (B, T, C) after permute + # Output: (B, T, hidden_size * 2) — bidirectional concatenation + self.lstm = nn.LSTM( + input_size=in_channels, + hidden_size=hidden_size, + num_layers=n_layers, + bidirectional=True, + batch_first=True, + dropout=dropout if n_layers > 1 else 0.0, + ) + + # ── Projection head ─────────────────────────────────────────────────── + # Pool over all time-steps: AdaptiveAvgPool1d(1) → (B, hidden_size*2) + self.pool = nn.AdaptiveAvgPool1d(1) + self.fc = nn.Linear(hidden_size * 2, output_size) + + # ------------------------------------------------------------------ + def forward(self, **kwargs) -> dict: # type: ignore[override] + """Forward pass. + + Keyword args are the collated batch dict from the DataLoader. + The key ``self.feature_keys[0]`` (``"signal"``) holds a list of + ``np.ndarray`` of shape ``(12, T)``. + + Returns: + dict with keys ``"loss"``, ``"y_prob"``, ``"y_true"``, + ``"logit"`` — the standard PyHealth model output contract. + """ + # Input tensor already collated by the dataloader: (B, 12, T) + x: torch.Tensor = kwargs[self.feature_key].to(self.device) + + # (B, 12, T) → (B, T, 12) for sequence-first LSTM with batch_first=True + out, _ = self.lstm(x.permute(0, 2, 1)) # (B, T, hidden*2) + # (B, T, hidden*2) → (B, hidden*2, T) → pool → (B, hidden*2) + # Matches paper bi_lstm.py: AdaptiveAvgPool1d over ALL timesteps + pooled = self.pool(out.permute(0, 2, 1)).squeeze(-1) # (B, hidden*2) + logits = self.fc(pooled) # (B, K) + + y_true = kwargs[self.label_key].to(self.device) + loss = self.get_loss_function()(logits, y_true) + y_prob = self.prepare_y_prob(logits) + + return { + "loss": loss, + "y_prob": y_prob, + "y_true": y_true, + "logit": logits, + } diff --git a/pyhealth/models/lambda_resnet.py b/pyhealth/models/lambda_resnet.py new file mode 100644 index 000000000..fe6813ae0 --- /dev/null +++ b/pyhealth/models/lambda_resnet.py @@ -0,0 +1,394 @@ +""" +1-D Lambda-ResNet-18 ECG model. + +Implements the ``lambda_resnet1d18`` backbone used in: + + Nonaka N. & Seita J. (2021). In-depth Benchmarking of Deep Neural Network + Architectures for ECG Diagnosis. *PMLR* 149:1–19. + https://proceedings.mlr.press/v149/nonaka21a.html + +The lambda layer is described in: + + Bello I. (2021). LambdaNetworks: Modeling Long-Range Interactions Without + Attention. *ICLR 2021* (Spotlight). + https://openreview.net/forum?id=xTJEN-ggl1b + +**Architecture notes** (from the reference ``lambdanet1d.py``): + +* The model uses **bottleneck blocks** (expansion = 4) for all four stages, + not basic blocks — giving effective channel widths of 256 / 512 / 1024 / 2048. +* Every bottleneck replaces its middle 3×1 convolution with a + :class:`LambdaConv1d` layer followed by optional average-pooling (for + downsampling stages) and BN + ReLU. +* ``dim_u = 4`` (intra-depth dimension) and ``nhead = 4``. +* The local positional context uses a learnable ``nn.Parameter`` shaped as a + Conv2d weight ``(dim_k, dim_u, 1, dim_m)`` applied via ``F.conv2d``, + exactly as in the reference. +* A ``Dropout(0.3)`` is placed *inside* the backbone's final FC (before the + linear projection to ``backbone_out_dim``). +* Input is clamped to ``[-20, 20]`` at the start of each forward pass and + after each of the first three stages — matching the reference's numerical + stability guards. + +See :mod:`pyhealth.models.resnet_ecg_base` for shared building blocks. + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" + +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from pyhealth.datasets import SampleDataset +from pyhealth.models.resnet_ecg_base import ECGBackboneModel + +_CLAMP = 20.0 + + +# --------------------------------------------------------------------------- +# Lambda convolution layer +# --------------------------------------------------------------------------- + +class LambdaConv1d(nn.Module): + """1-D lambda layer (Bello, 2021). + + Captures content-based and position-based long-range interactions across + the full temporal sequence without materialising an attention map. + + Matches ``LambdaConv1d`` in the reference ``lambdanet1d.py``: + + * Queries projected and BN-normalised. + * Keys projected (no BN), softmax-normalised over the time dimension. + * Values projected and BN-normalised. + * **Content lambda**: ``λ_c = softmax(K)ᵀ V`` → ``(B, dim_k, dim_v)``. + * **Position lambda**: learnable ``nn.Parameter`` shaped as a Conv2d + weight ``(dim_k, dim_u, 1, dim_m)`` applied to the reshaped values via + ``F.conv2d``, producing per-position context ``(B, dim_k, dim_v, N)``. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels (must be divisible by + ``nhead``). + nhead (int): Number of query heads. + dim_k (int): Key/query depth. Default ``16``. + dim_u (int): Intra-depth dimension. Default ``4``. + dim_m (int): Receptive field for the local positional embedding + (must be odd; ``0`` disables the positional term). Default ``7``. + + Examples: + >>> layer = LambdaConv1d(256, 256, nhead=4) + >>> layer(torch.randn(2, 256, 312)).shape + torch.Size([2, 256, 312]) + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + nhead: int, + dim_k: int = 16, + dim_u: int = 4, + dim_m: int = 7, + ) -> None: + super().__init__() + + assert out_channels % nhead == 0, ( + f"out_channels ({out_channels}) must be divisible by nhead ({nhead})" + ) + + self.nhead = nhead + self.dim_k = dim_k + self.dim_u = dim_u + self.dim_m = dim_m + self.dim_v = out_channels // nhead + + self.local_context = dim_m > 0 + self.padding = (dim_m - 1) // 2 + + # Projections + self.to_queries = nn.Sequential( + nn.Conv1d(in_channels, dim_k * nhead, kernel_size=1, bias=False), + nn.BatchNorm1d(dim_k * nhead), + ) + self.to_keys = nn.Sequential( + nn.Conv1d(in_channels, dim_k * dim_u, kernel_size=1, bias=False), + ) + self.to_values = nn.Sequential( + nn.Conv1d(in_channels, self.dim_v * dim_u, kernel_size=1, bias=False), + nn.BatchNorm1d(self.dim_v * dim_u), + ) + + # Positional embedding: stored as a Conv2d-shaped parameter so that + # F.conv2d can apply it efficiently, matching the reference exactly. + if self.local_context: + self.embedding = nn.Parameter( + torch.randn(dim_k, dim_u, 1, dim_m), requires_grad=True + ) + else: + self.embedding = nn.Parameter( + torch.randn(dim_k, dim_u), requires_grad=True + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, _, N = x.shape + k, u, v = self.dim_k, self.dim_u, self.dim_v + h = self.nhead + + queries = self.to_queries(x).view(B, h, k, N) # (B, h, k, N) + + keys = self.to_keys(x).view(B, k, u, N) # (B, k, u, N) + keys = F.softmax(keys, dim=-1) # softmax over N + + values = self.to_values(x).view(B, v, u, N) # (B, v, u, N) + + # Content lambda: λ_c = keys^T values → (B, k, v) + lambda_c = torch.einsum("bkum,bvum->bkv", keys, values) + y_c = torch.einsum("bhkn,bkv->bhvn", queries, lambda_c) # (B, h, v, N) + + # Position lambda + if self.local_context: + # values reshaped to (B, u, v, N) then treated as a 2D feature + # map of size (v, N) with u channels for the Conv2d application. + values_2d = values.view(B, u, v, N) # (B, u, v, N) + # F.conv2d: weight (k, u, 1, dim_m) applied to (B, u, v, N) + # → (B, k, v, N) + lambda_p = F.conv2d(values_2d, self.embedding, + padding=(0, self.padding)) # (B, k, v, N) + y_p = torch.einsum("bhkn,bkvn->bhvn", queries, lambda_p) + else: + lambda_p = torch.einsum("ku,bvun->bkvn", self.embedding, values) + y_p = torch.einsum("bhkn,bkvn->bhvn", queries, lambda_p) + + return (y_c + y_p).contiguous().view(B, h * v, N) # (B, out_ch, N) + + +# --------------------------------------------------------------------------- +# Lambda bottleneck block +# --------------------------------------------------------------------------- + +class LambdaBottleneck1d(nn.Module): + """1-D Lambda bottleneck block. + + Three-conv bottleneck where the middle 3×1 convolution is replaced by a + :class:`LambdaConv1d` layer, following ``LambdaBottleneck1d`` in the + reference ``lambdanet1d.py``. + + For downsampling stages (``stride > 1``) an ``AvgPool1d`` is appended + after the lambda layer (before BN + ReLU), matching the reference. + + Args: + in_planes (int): Number of input channels. + planes (int): Base channel width; output is ``planes * 4``. + stride (int): Downsampling stride. Default ``1``. + + Examples: + >>> block = LambdaBottleneck1d(64, 64) # 64 → 256 channels + >>> block(torch.randn(4, 64, 312)).shape + torch.Size([4, 256, 312]) + """ + + expansion: int = 4 + + def __init__( + self, + in_planes: int, + planes: int, + stride: int = 1, + ) -> None: + super().__init__() + + # 1×1 bottleneck-down + self.conv1 = nn.Conv1d(in_planes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm1d(planes) + + # Lambda layer (replaces 3×1 conv) + optional avg-pool + BN + ReLU + lambda_layers: List[nn.Module] = [ + LambdaConv1d(planes, planes, nhead=4) + ] + if stride != 1 or in_planes != self.expansion * planes: + lambda_layers.append( + nn.AvgPool1d(kernel_size=3, stride=stride, padding=1) + ) + lambda_layers.append(nn.BatchNorm1d(planes)) + lambda_layers.append(nn.ReLU()) + self.conv2 = nn.Sequential(*lambda_layers) + + # 1×1 bottleneck-up + self.conv3 = nn.Conv1d(planes, self.expansion * planes, + kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm1d(self.expansion * planes) + + # Shortcut + self.shortcut: nn.Module + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Sequential( + nn.Conv1d(in_planes, self.expansion * planes, + kernel_size=1, stride=stride), + nn.BatchNorm1d(self.expansion * planes), + ) + else: + self.shortcut = nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = F.relu(self.bn1(self.conv1(x))) + out = self.conv2(out) + out = self.bn3(self.conv3(out)) + out = out + self.shortcut(x) + return F.relu(out) + + +# --------------------------------------------------------------------------- +# LambdaResNet1d backbone +# --------------------------------------------------------------------------- + +class LambdaResNet1d(nn.Module): + """1-D Lambda-ResNet backbone. + + All four stages use :class:`LambdaBottleneck1d` blocks, matching + ``LambdaResNet1d`` in the reference ``lambdanet1d.py``. + + The backbone's final FC wraps the linear projection with + ``Dropout(0.3)``, and ``torch.clamp([-20, 20])`` is applied after the + stem and after each of the first three stages for numerical stability + (both matching the reference). + + Args: + num_blocks (List[int]): Blocks per stage. + num_lead (int): Number of input channels (ECG leads). Default ``12``. + backbone_out_dim (int): Projection output dimension. Default ``256``. + + Examples: + >>> bb = LambdaResNet1d([2, 2, 2, 2]) + >>> bb(torch.randn(2, 12, 1250)).shape + torch.Size([2, 256]) + """ + + def __init__( + self, + num_blocks: List[int], + num_lead: int = 12, + backbone_out_dim: int = 256, + ) -> None: + super().__init__() + self.in_planes = 64 + + self.conv1 = nn.Conv1d(num_lead, 64, + kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = nn.BatchNorm1d(64) + self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1) + + self.layer1 = self._make_layer(LambdaBottleneck1d, 64, num_blocks[0]) + self.layer2 = self._make_layer(LambdaBottleneck1d, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(LambdaBottleneck1d, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(LambdaBottleneck1d, 512, num_blocks[3], stride=2) + + self.avgpool = nn.AdaptiveAvgPool1d(1) + # Dropout inside the backbone FC, matching the reference. + self.fc = nn.Sequential( + nn.Dropout(0.3), + nn.Linear(512 * LambdaBottleneck1d.expansion, backbone_out_dim), + ) + + def _make_layer( + self, + block: type, + planes: int, + num_blocks: int, + stride: int = 1, + ) -> nn.Sequential: + strides = [stride] + [1] * (num_blocks - 1) + layers: List[nn.Module] = [] + for s in strides: + layers.append(block(self.in_planes, planes, s)) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = torch.clamp(x, min=-_CLAMP, max=_CLAMP) + x = F.relu(self.bn1(self.conv1(x))) + x = self.maxpool(x) + + x = self.layer1(x) + x = torch.clamp(x, min=-_CLAMP, max=_CLAMP) + x = self.layer2(x) + x = torch.clamp(x, min=-_CLAMP, max=_CLAMP) + x = self.layer3(x) + x = torch.clamp(x, min=-_CLAMP, max=_CLAMP) + x = self.layer4(x) + + x = self.avgpool(x) + x = torch.flatten(x, 1) + return self.fc(x) + + +# --------------------------------------------------------------------------- +# LambdaResNet18ECG (PyHealth BaseModel) +# --------------------------------------------------------------------------- + +class LambdaResNet18ECG(ECGBackboneModel): + """Lambda-ResNet-18 backbone for ECG classification (Nonaka & Seita, 2021). + + Replaces the 3×1 convolution in every bottleneck block with a + :class:`LambdaConv1d` layer, enabling global temporal context modelling + without explicit attention maps (Bello, 2021). + + This is the ``lambda_resnet1d18`` variant from the reference code: four + stages of :class:`LambdaBottleneck1d` with layer counts ``[2, 2, 2, 2]`` + and effective channel widths of 256 / 512 / 1024 / 2048 (bottleneck + expansion = 4). + + All training/evaluation conventions (prediction head, sliding-window + protocol) are identical to :class:`~pyhealth.models.ResNet18ECG`. + + Args: + dataset (SampleDataset): Dataset used to infer feature/label keys, + output size, and loss function. + in_channels (int): Number of ECG leads. Default ``12``. + backbone_output_dim (int): Backbone projection output dimension. + Default ``256``. + dropout (float): Dropout probability in the prediction head. + Default ``0.25``. + + Examples: + >>> import numpy as np + >>> from pyhealth.datasets import create_sample_dataset, get_dataloader + >>> samples = [ + ... {"patient_id": "p0", "visit_id": "v0", + ... "signal": np.random.randn(12, 1250).astype(np.float32), + ... "label": [1, 0, 1, 0, 0]}, + ... {"patient_id": "p1", "visit_id": "v1", + ... "signal": np.random.randn(12, 1250).astype(np.float32), + ... "label": [0, 1, 0, 1, 0]}, + ... ] + >>> dataset = create_sample_dataset( + ... samples=samples, + ... input_schema={"signal": "tensor"}, + ... output_schema={"label": "multilabel"}, + ... dataset_name="test", + ... ) + >>> model = LambdaResNet18ECG(dataset=dataset) + >>> out = model(**next(iter(get_dataloader(dataset, batch_size=2)))) + >>> sorted(out.keys()) + ['logit', 'loss', 'y_prob', 'y_true'] + """ + + def __init__( + self, + dataset: SampleDataset, + in_channels: int = 12, + backbone_output_dim: int = 256, + dropout: float = 0.25, + ) -> None: + super().__init__(dataset=dataset) + + self.backbone = LambdaResNet1d( + num_blocks=[2, 2, 2, 2], + num_lead=in_channels, + backbone_out_dim=backbone_output_dim, + ) + self._build_head(backbone_output_dim, dropout) diff --git a/pyhealth/models/resnet.py b/pyhealth/models/resnet.py new file mode 100644 index 000000000..ef539098a --- /dev/null +++ b/pyhealth/models/resnet.py @@ -0,0 +1,95 @@ +""" +Plain 1-D ResNet-18 ECG model. + +Implements the ``resnet1d18`` backbone used in: + + Nonaka N. & Seita J. (2021). In-depth Benchmarking of Deep Neural Network + Architectures for ECG Diagnosis. *PMLR* 149:1–19. + https://proceedings.mlr.press/v149/nonaka21a.html + +See :mod:`pyhealth.models.resnet_ecg_base` for the shared building blocks. + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" + +from pyhealth.datasets import SampleDataset +from pyhealth.models.resnet_ecg_base import BasicBlock1d, ECGBackboneModel, ResNet1d + + +class ResNet18ECG(ECGBackboneModel): + """ResNet-18 backbone for ECG classification (Nonaka & Seita, 2021). + + Standard 1-D ResNet-18 with a two-layer prediction head. + + **Backbone** (``resnet1d18`` in the reference code): + + * Stem: ``Conv1d(12, 64, 7, stride=2) → BN → ReLU → MaxPool1d(3, stride=2)`` + * Four stages of :class:`~pyhealth.models.BasicBlock1d` with layer counts + ``[2, 2, 2, 2]`` and channel widths ``[64, 128, 256, 512]``. + * ``AdaptiveAvgPool1d(1)`` → ``Linear(512, backbone_output_dim)``. + + **Head** (``HeadModule`` in the reference code): + + ``Linear(256, 128) → ReLU → BN(128) → Dropout(0.25) → Linear(128, n_classes)`` + + **Windowing** (Section 4.2): + + During *training* a random 2.5-second window is cropped from each + recording (handle in the task preprocessing / collate function). + During *evaluation* use :meth:`forward_sliding_window` for the 50 %-overlap + sliding-window protocol from the paper. + + Args: + dataset (SampleDataset): Dataset used to infer feature/label keys, + output size, and loss function. + in_channels (int): Number of ECG leads. Default ``12``. + base_channels (int): Width of the first residual stage. Default ``64``. + backbone_output_dim (int): Backbone projection output dimension. + Default ``256``. + dropout (float): Dropout probability in the prediction head. + Default ``0.25``. + + Examples: + >>> import numpy as np + >>> from pyhealth.datasets import create_sample_dataset, get_dataloader + >>> samples = [ + ... {"patient_id": "p0", "visit_id": "v0", + ... "signal": np.random.randn(12, 1250).astype(np.float32), + ... "label": [1, 0, 1, 0, 0]}, + ... {"patient_id": "p1", "visit_id": "v1", + ... "signal": np.random.randn(12, 1250).astype(np.float32), + ... "label": [0, 1, 0, 1, 0]}, + ... ] + >>> dataset = create_sample_dataset( + ... samples=samples, + ... input_schema={"signal": "tensor"}, + ... output_schema={"label": "multilabel"}, + ... dataset_name="test", + ... ) + >>> model = ResNet18ECG(dataset=dataset) + >>> out = model(**next(iter(get_dataloader(dataset, batch_size=2)))) + >>> sorted(out.keys()) + ['logit', 'loss', 'y_prob', 'y_true'] + """ + + def __init__( + self, + dataset: SampleDataset, + in_channels: int = 12, + base_channels: int = 64, + backbone_output_dim: int = 256, + dropout: float = 0.25, + ) -> None: + super().__init__(dataset=dataset) + + self.backbone = ResNet1d( + in_channels=in_channels, + layers=[2, 2, 2, 2], + block=BasicBlock1d, + base_channels=base_channels, + output_dim=backbone_output_dim, + ) + self._build_head(backbone_output_dim, dropout) diff --git a/pyhealth/models/resnet_ecg_base.py b/pyhealth/models/resnet_ecg_base.py new file mode 100644 index 000000000..702bccc47 --- /dev/null +++ b/pyhealth/models/resnet_ecg_base.py @@ -0,0 +1,375 @@ +""" +Shared building blocks for 1-D ResNet-based ECG models. + +This module provides: + +* :class:`BasicBlock1d` – the two-conv residual block for ResNet-18/34. +* :class:`Bottleneck1d` – the three-conv bottleneck block for ResNet-50+. +* :class:`ResNet1d` – a generic 1-D ResNet backbone whose block type and + layer counts are fully configurable via constructor arguments. +* :class:`ECGBackboneModel` – an abstract :class:`~pyhealth.models.BaseModel` + that owns the shared prediction head, :meth:`forward`, and + :meth:`forward_sliding_window` inherited by every ECG ResNet variant. + +References: + He K. et al. (2016). Deep Residual Learning for Image Recognition. *CVPR*. + + Nonaka N. & Seita J. (2021). In-depth Benchmarking of Deep Neural Network + Architectures for ECG Diagnosis. *PMLR* 149:1–19. + https://proceedings.mlr.press/v149/nonaka21a.html + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" + +from typing import Callable, Dict, List, Optional, Type, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from pyhealth.datasets import SampleDataset +from pyhealth.models import BaseModel + + +# --------------------------------------------------------------------------- +# BasicBlock1d +# --------------------------------------------------------------------------- + +class BasicBlock1d(nn.Module): + """1-D two-conv residual basic block (ResNet-18/34). + + Directly mirrors ``torchvision.models.resnet.BasicBlock`` with all 2-D + operations replaced by 1-D equivalents. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + stride (int): Stride of the first convolution. Default ``1``. + + Examples: + >>> block = BasicBlock1d(64, 128, stride=2) + >>> block(torch.randn(4, 64, 500)).shape + torch.Size([4, 128, 250]) + """ + + expansion: int = 1 + + def __init__( + self, + in_channels: int, + out_channels: int, + stride: int = 1, + ) -> None: + super().__init__() + + self.conv1 = nn.Conv1d( + in_channels, out_channels, + kernel_size=3, stride=stride, padding=1, bias=False, + ) + self.bn1 = nn.BatchNorm1d(out_channels) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv1d( + out_channels, out_channels, + kernel_size=3, stride=1, padding=1, bias=False, + ) + self.bn2 = nn.BatchNorm1d(out_channels) + + self.downsample: Optional[nn.Sequential] = None + if stride != 1 or in_channels != out_channels: + self.downsample = nn.Sequential( + nn.Conv1d(in_channels, out_channels, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm1d(out_channels), + ) + + def _conv_branch(self, x: torch.Tensor) -> torch.Tensor: + """Conv1 → BN1 → ReLU → Conv2 → BN2. + + Extracted so SE/Lambda subclasses can insert attention after BN2 + and before the residual addition. + """ + out = self.relu(self.bn1(self.conv1(x))) + return self.bn2(self.conv2(out)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + identity = x if self.downsample is None else self.downsample(x) + return self.relu(self._conv_branch(x) + identity) + + +# --------------------------------------------------------------------------- +# Bottleneck1d +# --------------------------------------------------------------------------- + +class Bottleneck1d(nn.Module): + """1-D three-conv bottleneck residual block (ResNet-50+). + + Mirrors ``torchvision.models.resnet.Bottleneck`` with 1-D operations. + The channel expansion factor is 4 (``planes * 4`` output channels). + + Args: + in_channels (int): Number of input channels. + planes (int): Base channel width; output is ``planes * 4``. + stride (int): Stride of the 3×1 convolution. Default ``1``. + + Examples: + >>> block = Bottleneck1d(64, 64) # 64 → 256 channels + >>> block(torch.randn(4, 64, 500)).shape + torch.Size([4, 256, 500]) + """ + + expansion: int = 4 + + def __init__( + self, + in_channels: int, + planes: int, + stride: int = 1, + ) -> None: + super().__init__() + out_channels = planes * self.expansion + + self.conv1 = nn.Conv1d(in_channels, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm1d(planes) + self.conv2 = nn.Conv1d(planes, planes, + kernel_size=3, stride=stride, padding=1, bias=False) + self.bn2 = nn.BatchNorm1d(planes) + self.conv3 = nn.Conv1d(planes, out_channels, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm1d(out_channels) + self.relu = nn.ReLU(inplace=True) + + self.downsample: Optional[nn.Sequential] = None + if stride != 1 or in_channels != out_channels: + self.downsample = nn.Sequential( + nn.Conv1d(in_channels, out_channels, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm1d(out_channels), + ) + + def _conv_branch(self, x: torch.Tensor) -> torch.Tensor: + out = self.relu(self.bn1(self.conv1(x))) + out = self.relu(self.bn2(self.conv2(out))) + return self.bn3(self.conv3(out)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + identity = x if self.downsample is None else self.downsample(x) + return self.relu(self._conv_branch(x) + identity) + + +# --------------------------------------------------------------------------- +# ResNet1d +# --------------------------------------------------------------------------- + +BlockType = Union[Type[BasicBlock1d], Type[Bottleneck1d]] + + +class ResNet1d(nn.Module): + """Generic configurable 1-D ResNet backbone. + + Args: + in_channels (int): Input channels (ECG leads). + layers (List[int]): Blocks per stage, e.g. ``[2, 2, 2, 2]``. + block (BlockType): Block constructor (:class:`BasicBlock1d` or + :class:`Bottleneck1d`, or an augmented subclass). + base_channels (int): Width of the first residual stage. Default ``64``. + output_dim (int, optional): Projection output dimension. ``None`` + returns the raw GAP output. Default ``256``. + block_kwargs (dict, optional): Extra keyword arguments forwarded to + every block constructor call. + + Examples: + >>> bb = ResNet1d(12, [2, 2, 2, 2], BasicBlock1d, output_dim=256) + >>> bb(torch.randn(4, 12, 1250)).shape + torch.Size([4, 256]) + """ + + def __init__( + self, + in_channels: int, + layers: List[int], + block: BlockType, + base_channels: int = 64, + output_dim: Optional[int] = 256, + block_kwargs: Optional[dict] = None, + ) -> None: + super().__init__() + block_kwargs = block_kwargs or {} + + self.stem = nn.Sequential( + nn.Conv1d(in_channels, base_channels, + kernel_size=7, stride=2, padding=3, bias=False), + nn.BatchNorm1d(base_channels), + nn.ReLU(inplace=True), + nn.MaxPool1d(kernel_size=3, stride=2, padding=1), + ) + + channel_widths = [base_channels * (2 ** i) for i in range(4)] + strides = [1, 2, 2, 2] + + self.stages = nn.ModuleList() + in_ch = base_channels + for n_blocks, stride, out_ch in zip(layers, strides, channel_widths): + # First block may change spatial resolution and/or channel width; + # remaining blocks keep stride=1. + stage: List[nn.Module] = [ + block(in_ch, out_ch, stride=stride, **block_kwargs) + ] + in_ch = out_ch * block.expansion # type: ignore[attr-defined] + for _ in range(1, n_blocks): + stage.append(block(in_ch, out_ch, stride=1, **block_kwargs)) + self.stages.append(nn.Sequential(*stage)) + + self.gap = nn.AdaptiveAvgPool1d(1) + + self.proj: Optional[nn.Linear] = None + final_ch = channel_widths[-1] * block.expansion # type: ignore[attr-defined] + if output_dim is not None: + self.proj = nn.Linear(final_ch, output_dim) + self.out_channels = output_dim + else: + self.out_channels = final_ch + + self._init_weights() + + def _init_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv1d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.stem(x) + for stage in self.stages: + x = stage(x) + x = self.gap(x).squeeze(-1) + if self.proj is not None: + x = self.proj(x) + return x + + +# --------------------------------------------------------------------------- +# ECGBackboneModel – shared PyHealth BaseModel +# --------------------------------------------------------------------------- + +class ECGBackboneModel(BaseModel): + """Abstract base class for ECG ResNet-variant PyHealth models. + + Subclass template:: + + class MyECGModel(ECGBackboneModel): + def __init__(self, dataset, ...): + super().__init__(dataset) + self.backbone = MyBackbone(...) + self._build_head(backbone_output_dim, dropout) + + Provides :meth:`_build_head`, :meth:`forward`, and + :meth:`forward_sliding_window`. Subclasses only need to set + ``self.backbone`` and call ``self._build_head()``. + """ + + def __init__(self, dataset: SampleDataset) -> None: + super().__init__(dataset=dataset) + assert len(self.feature_keys) == 1, ( + f"{type(self).__name__} expects exactly one feature key." + ) + assert len(self.label_keys) == 1, ( + f"{type(self).__name__} expects exactly one label key." + ) + self.feature_key = self.feature_keys[0] + self.label_key = self.label_keys[0] + self.backbone: nn.Module # assigned by subclass before _build_head() + + def _build_head(self, backbone_output_dim: int, dropout: float = 0.25) -> None: + """Build the prediction head from Nonaka & Seita (2021). + + ``Linear(d, 128) → ReLU → BN(128) → Dropout(0.25) → Linear(128, n_classes)`` + + Must be called after ``self.backbone`` has been assigned. + + Args: + backbone_output_dim (int): Output dimension of the backbone. + dropout (float): Dropout probability. Default ``0.25``. + """ + output_size = self.get_output_size() + self.head = nn.Sequential( + nn.Linear(backbone_output_dim, 128), + nn.ReLU(inplace=True), + nn.BatchNorm1d(128), + nn.Dropout(p=dropout), + nn.Linear(128, output_size), + ) + + def forward(self, **kwargs) -> Dict[str, torch.Tensor]: + """Forward pass for a single fixed-length window. + + Args: + **kwargs: Must contain the feature key (tensor ``(batch, n_leads, + window_length)``) and the label key. + + Returns: + Dict with keys ``loss``, ``y_prob``, ``y_true``, ``logit``, and + optionally ``embed`` when ``kwargs["embed"]`` is ``True``. + """ + x: torch.Tensor = kwargs[self.feature_key].to(self.device) + emb = self.backbone(x) + logits = self.head(emb) + + y_true = kwargs[self.label_key].to(self.device) + loss = self.get_loss_function()(logits, y_true) + y_prob = self.prepare_y_prob(logits) + + results: Dict[str, torch.Tensor] = { + "loss": loss, + "y_prob": y_prob, + "y_true": y_true, + "logit": logits, + } + if kwargs.get("embed", False): + results["embed"] = emb + return results + + def forward_sliding_window( + self, + signal: torch.Tensor, + window_size: int, + step_size: Optional[int] = None, + ) -> torch.Tensor: + """Sliding-window evaluation (Nonaka & Seita, 2021, Section 4.2). + + Splits *signal* into overlapping windows, runs the model on each, and + returns the per-class **maximum** probability across windows. + + Args: + signal (torch.Tensor): ``(batch, n_leads, total_length)``. + window_size (int): Samples per window (e.g. ``1250`` for 2.5 s at + 500 Hz). + step_size (int, optional): Stride between windows. Defaults to + ``window_size // 2`` (50 % overlap). + + Returns: + torch.Tensor: ``(batch, n_classes)``. + """ + if step_size is None: + step_size = window_size // 2 + + total_length = signal.shape[-1] + starts = list(range(0, total_length - window_size + 1, step_size)) + if not starts: + signal = F.pad(signal, (0, window_size - total_length)) + starts = [0] + + all_probs: List[torch.Tensor] = [] + self.eval() + with torch.no_grad(): + for start in starts: + window = signal[..., start: start + window_size].to(self.device) + logits = self.head(self.backbone(window)) + all_probs.append(self.prepare_y_prob(logits)) + + return torch.stack(all_probs, dim=1).max(dim=1).values diff --git a/pyhealth/models/se_resnet.py b/pyhealth/models/se_resnet.py new file mode 100644 index 000000000..dc0d0b37a --- /dev/null +++ b/pyhealth/models/se_resnet.py @@ -0,0 +1,219 @@ +""" +1-D SE-ResNet-50 ECG model. + +Implements the ``se_resnet1d50`` backbone used in: + + Nonaka N. & Seita J. (2021). In-depth Benchmarking of Deep Neural Network + Architectures for ECG Diagnosis. *PMLR* 149:1–19. + https://proceedings.mlr.press/v149/nonaka21a.html + +The SE block is described in: + + Hu J., Shen L. & Sun G. (2018). Squeeze-and-Excitation Networks. *CVPR*. + +The paper benchmarks SE-ResNet-**50** (not SE-ResNet-18). The backbone uses +three-conv bottleneck blocks (expansion = 4) with an SE module applied after +the third convolution and before the residual addition (Figure 3 of Hu et al.). + +The reference implementation (``senet1d.py``) uses ``Conv1d(1×1)`` rather +than ``nn.Linear`` for the SE excitation bottleneck, and the downsample +convolution in each stage uses ``kernel_size=1, padding=0`` (not 3/1 as in +the SENet-154 variant). + +See :mod:`pyhealth.models.resnet_ecg_base` for shared building blocks. + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" + +from typing import Optional + +import torch +import torch.nn as nn + +from pyhealth.datasets import SampleDataset +from pyhealth.models.resnet_ecg_base import ECGBackboneModel, ResNet1d + + +# --------------------------------------------------------------------------- +# SE module +# --------------------------------------------------------------------------- + +class SEModule1d(nn.Module): + """1-D Squeeze-and-Excitation module (Hu et al., 2018). + + Uses ``Conv1d(1×1)`` projections (matching the reference ``senet1d.py``). + + Squeeze: ``AdaptiveAvgPool1d(1)`` → ``(batch, C, 1)`` + Excitation: ``Conv1d(C, C//r, 1) → ReLU → Conv1d(C//r, C, 1) → Sigmoid`` + Scale: element-wise multiply input by the channel weights. + + Args: + channels (int): Number of input channels. + reduction (int): Bottleneck reduction ratio. Default ``16``. + + Examples: + >>> se = SEModule1d(256) + >>> se(torch.randn(4, 256, 312)).shape + torch.Size([4, 256, 312]) + """ + + def __init__(self, channels: int, reduction: int = 16) -> None: + super().__init__() + self.avg_pool = nn.AdaptiveAvgPool1d(1) + self.fc1 = nn.Conv1d(channels, channels // reduction, + kernel_size=1, padding=0) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv1d(channels // reduction, channels, + kernel_size=1, padding=0) + self.sigmoid = nn.Sigmoid() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + s = self.avg_pool(x) + s = self.relu(self.fc1(s)) + s = self.sigmoid(self.fc2(s)) + return x * s + + +# --------------------------------------------------------------------------- +# SE bottleneck block +# --------------------------------------------------------------------------- + +class SEResNetBottleneck1d(nn.Module): + """1-D SE-ResNet bottleneck block. + + Three-conv bottleneck (1×1 → 3×1 → 1×1) with an SE module applied + after the third convolution and before the residual addition. + + Matches ``SEResNetBottleneck1d`` in the reference ``senet1d.py``. + + Args: + in_channels (int): Number of input channels. + planes (int): Base channel width; output is ``planes * 4``. + stride (int): Stride of the 3×1 convolution. Default ``1``. + reduction (int): SE reduction ratio. Default ``16``. + + Examples: + >>> block = SEResNetBottleneck1d(64, 64) # 64 → 256 channels + >>> block(torch.randn(4, 64, 500)).shape + torch.Size([4, 256, 500]) + """ + + expansion: int = 4 + + def __init__( + self, + in_channels: int, + planes: int, + stride: int = 1, + reduction: int = 16, + ) -> None: + super().__init__() + out_channels = planes * self.expansion + + # Reference uses stride in conv1 (Caffe convention), not in conv2. + self.conv1 = nn.Conv1d(in_channels, planes, + kernel_size=1, stride=stride, bias=False) + self.bn1 = nn.BatchNorm1d(planes) + self.conv2 = nn.Conv1d(planes, planes, + kernel_size=3, padding=1, bias=False) + self.bn2 = nn.BatchNorm1d(planes) + self.conv3 = nn.Conv1d(planes, out_channels, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm1d(out_channels) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule1d(out_channels, reduction=reduction) + + self.downsample: Optional[nn.Sequential] = None + if stride != 1 or in_channels != out_channels: + # Reference uses kernel_size=1, padding=0 for SE-ResNet variants. + self.downsample = nn.Sequential( + nn.Conv1d(in_channels, out_channels, + kernel_size=1, stride=stride, padding=0, bias=False), + nn.BatchNorm1d(out_channels), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + identity = x if self.downsample is None else self.downsample(x) + + out = self.relu(self.bn1(self.conv1(x))) + out = self.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out = self.se_module(out) + identity + return self.relu(out) + + +# --------------------------------------------------------------------------- +# SEResNet50ECG (PyHealth BaseModel) +# --------------------------------------------------------------------------- + +class SEResNet50ECG(ECGBackboneModel): + """SE-ResNet-50 backbone for ECG classification (Nonaka & Seita, 2021). + + Augments a ResNet-50 backbone by inserting a + :class:`~pyhealth.models.SEModule1d` channel-attention gate into every + bottleneck block, following Hu et al. (2018). + + This is the variant the paper benchmarks as "SE-ResNet" (``se_resnet1d50`` + in the reference code). It uses three-conv bottleneck blocks + (``expansion = 4``) with layer counts ``[3, 4, 6, 3]``. + + All training/evaluation conventions (backbone output dimension, prediction + head, sliding-window protocol) are identical to + :class:`~pyhealth.models.ResNet18ECG`. + + Args: + dataset (SampleDataset): Dataset used to infer feature/label keys, + output size, and loss function. + in_channels (int): Number of ECG leads. Default ``12``. + base_channels (int): Width of the first residual stage. Default ``64``. + backbone_output_dim (int): Backbone projection output dimension. + Default ``256``. + dropout (float): Dropout probability in the prediction head. + Default ``0.25``. + reduction (int): SE bottleneck reduction ratio. Default ``16``. + + Examples: + >>> import numpy as np + >>> from pyhealth.datasets import create_sample_dataset, get_dataloader + >>> samples = [ + ... {"patient_id": "p0", "visit_id": "v0", + ... "signal": np.random.randn(12, 1250).astype(np.float32), + ... "label": [1, 0, 1, 0, 0]}, + ... {"patient_id": "p1", "visit_id": "v1", + ... "signal": np.random.randn(12, 1250).astype(np.float32), + ... "label": [0, 1, 0, 1, 0]}, + ... ] + >>> dataset = create_sample_dataset( + ... samples=samples, + ... input_schema={"signal": "tensor"}, + ... output_schema={"label": "multilabel"}, + ... dataset_name="test", + ... ) + >>> model = SEResNet50ECG(dataset=dataset) + >>> out = model(**next(iter(get_dataloader(dataset, batch_size=2)))) + >>> sorted(out.keys()) + ['logit', 'loss', 'y_prob', 'y_true'] + """ + + def __init__( + self, + dataset: SampleDataset, + in_channels: int = 12, + base_channels: int = 64, + backbone_output_dim: int = 256, + dropout: float = 0.25, + reduction: int = 16, + ) -> None: + super().__init__(dataset=dataset) + + self.backbone = ResNet1d( + in_channels=in_channels, + layers=[3, 4, 6, 3], # ResNet-50 layer counts + block=SEResNetBottleneck1d, + base_channels=base_channels, + output_dim=backbone_output_dim, + block_kwargs={"reduction": reduction}, + ) + self._build_head(backbone_output_dim, dropout) diff --git a/pyhealth/tasks/__init__.py b/pyhealth/tasks/__init__.py index 797988377..52ce0bc06 100644 --- a/pyhealth/tasks/__init__.py +++ b/pyhealth/tasks/__init__.py @@ -45,6 +45,7 @@ MortalityPredictionStageNetMIMIC4, ) from .patient_linkage import patient_linkage_mimic3_fn +from .ptbxl_multilabel_classification import PTBXLMultilabelClassification from .readmission_prediction import ( ReadmissionPredictionEICU, ReadmissionPredictionMIMIC3, diff --git a/pyhealth/tasks/ptbxl_multilabel_classification.py b/pyhealth/tasks/ptbxl_multilabel_classification.py new file mode 100644 index 000000000..cab8a86b9 --- /dev/null +++ b/pyhealth/tasks/ptbxl_multilabel_classification.py @@ -0,0 +1,371 @@ +""" +PTB-XL multi-label ECG classification task. + +This module provides :class:`PTBXLMultilabelClassification`, a +:class:`~pyhealth.tasks.BaseTask` subclass that turns a +:class:`~pyhealth.datasets.PTBXLDataset` into a multi-label classification +problem. + +Two label spaces are supported, selected via the ``label_type`` constructor +argument. This design enables the **ablation study** described in the project +paper: hold the model and training hyper-parameters constant and vary only the +label granularity (and optionally the signal sampling rate) to observe how +label coarseness affects downstream ROC-AUC and F1 performance. + +Mathematical framing +-------------------- +Let :math:`X \\in \\mathbb{R}^{C \\times T}` be a single ECG recording with +:math:`C = 12` leads and :math:`T` time-steps (1,000 at 100 Hz or 5,000 at +500 Hz). Given a label universe of :math:`K` classes, the ground-truth +annotation is a binary vector :math:`y \\in \\{0, 1\\}^K` (multi-hot). + +A model :math:`f_\\theta` maps the ECG to per-class logit scores: + +.. math:: + + \\hat{y} = \\sigma\\!\\left(f_\\theta(X) W^\\top + b\\right) \\in [0,1]^K + +Training minimises the element-wise **binary cross-entropy**: + +.. math:: + + \\mathcal{L} = -\\frac{1}{K} \\sum_{k=1}^{K} + \\Bigl[ y_k \\log \\hat{y}_k + (1 - y_k) \\log (1 - \\hat{y}_k) \\Bigr] + +Evaluation uses **macro-averaged ROC-AUC**: + +.. math:: + + \\overline{\\text{AUC}} = \\frac{1}{K} \\sum_{k=1}^{K} + \\int_0^1 \\text{TPR}_k(t)\\, d\\text{FPR}_k(t) + +and **macro-averaged F1** (at threshold 0.5): + +.. math:: + + \\overline{F_1} = \\frac{1}{K} \\sum_{k=1}^{K} + \\frac{2 \\cdot \\text{TP}_k}{2 \\cdot \\text{TP}_k + \\text{FP}_k + \\text{FN}_k} + +Label spaces +------------ +``"superdiagnostic"`` (:data:`SUPERDIAG_CLASSES` — 5 classes) + Directly mirrors the five PTB-XL superdiagnostic categories from + Strodthoff et al. (2020). SNOMED-CT codes from every recording's + ``# Dx:`` list are mapped to one or more of NORM / MI / STTC / CD / HYP + using :data:`SNOMED_TO_SUPERDIAG`. Records with no mappable code are + skipped. + +``"diagnostic"`` (:data:`CHALLENGE_SNOMED_CLASSES` — 27 classes) + Uses the 27 SNOMED-CT codes that were officially scored in the + PhysioNet/CinC Challenge 2020. Each code present in a recording's + ``# Dx:`` list that falls within this vocabulary becomes a positive label. + Records with no scored codes are skipped. + +Ablation axes +------------- +The two constructor arguments create the natural ablation grid: + ++-------------------+-----------+------------------------+ +| ``label_type`` | ``sampling_rate`` | Description | ++===================+===========+========================+ +| ``"superdiagnostic"`` | 100 | 5-class / 100 Hz | ++-------------------+-----------+------------------------+ +| ``"superdiagnostic"`` | 500 | 5-class / 500 Hz | ++-------------------+-----------+------------------------+ +| ``"diagnostic"`` | 100 | 27-class / 100 Hz | ++-------------------+-----------+------------------------+ +| ``"diagnostic"`` | 500 | 27-class / 500 Hz | ++-------------------+-----------+------------------------+ + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" + +import logging +from typing import Dict, List, Optional + +import numpy as np + +from pyhealth.data import Patient +from pyhealth.tasks import BaseTask + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Label-space definitions +# --------------------------------------------------------------------------- + +#: Mapping from SNOMED-CT code (string) to one of the 5 PTB-XL superdiagnostic +#: classes. Codes absent from this dict are silently ignored during label +#: construction. The mapping follows Table 1 of Strodthoff et al. (2020) and +#: the PhysioNet Challenge 2020 label alignment documented in the challenge +#: description paper. +SNOMED_TO_SUPERDIAG: Dict[str, str] = { + # ------ NORM — Normal sinus rhythm ----------------------------------- # + "426783006": "NORM", + # ------ MI — Myocardial Infarction ----------------------------------- # + "57054005": "MI", # Acute myocardial infarction + "164865005": "MI", # Myocardial infarction + "413444003": "MI", # Acute MI of anterolateral wall + "413867000": "MI", # Acute MI of inferior wall + "164861001": "MI", # Anterior MI + "164857002": "MI", # Inferior MI + "164860000": "MI", # Anteroseptal MI + "164864009": "MI", # Posterior MI + "164867002": "MI", # Lateral MI + # ------ STTC — ST/T-wave Change -------------------------------------- # + "164931005": "STTC", # ST elevation + "164934002": "STTC", # ST depression + "59931005": "STTC", # Inverted T-wave / T-wave abnormality + "164947007": "STTC", # Prolonged PR interval + "164917005": "STTC", # Prolonged QT interval + "251268003": "STTC", # Early repolarisation pattern + "428750005": "STTC", # Non-specific ST-T change + # ------ CD — Conduction Disturbance / Rhythm Disorder ---------------- # + "270492004": "CD", # First-degree AV block + "195042002": "CD", # Second-degree AV block + "27885002": "CD", # Third-degree AV block + "6374002": "CD", # Bundle branch block (unspecified) + "713427006": "CD", # Complete right bundle branch block (CRBBB) + "713426002": "CD", # Complete left bundle branch block (CLBBB) + "164909002": "CD", # Left bundle branch block + "59118001": "CD", # Right bundle branch block + "698252002": "CD", # Non-specific intraventricular conduction disturbance + "445118002": "CD", # Left anterior fascicular block (LAFB) + "10370003": "CD", # Pacing rhythm + "164889003": "CD", # Atrial fibrillation + "164890007": "CD", # Atrial flutter + "426627000": "CD", # Bradycardia + "427393009": "CD", # Sinus arrhythmia + "426177001": "CD", # Sinus bradycardia + "427084000": "CD", # Sinus tachycardia + "63593006": "CD", # Supraventricular premature beats + "17338001": "CD", # Ventricular premature beats + "284470004": "CD", # Premature atrial contraction + "427172004": "CD", # Premature ventricular contraction + # ------ HYP — Hypertrophy / Axis Deviation --------------------------- # + "55827005": "HYP", # Left ventricular hypertrophy + "446358003": "HYP", # Right ventricular hypertrophy + "73282002": "HYP", # Biventricular hypertrophy + "67751000119106": "HYP", # Left atrial enlargement + "446813000": "HYP", # Right atrial enlargement + "39732003": "HYP", # Left axis deviation + "47665007": "HYP", # Right axis deviation + "251146004": "HYP", # Low QRS voltage +} + +#: Ordered list of the 5 superdiagnostic class names. The ordering is +#: deterministic so that model outputs are consistently interpretable. +SUPERDIAG_CLASSES: List[str] = ["NORM", "MI", "STTC", "CD", "HYP"] + +#: The 27 SNOMED-CT codes officially scored in the PhysioNet/CinC Challenge +#: 2020 (alphabetically sorted by their clinical abbreviation for readability). +#: These form the label universe for ``label_type="diagnostic"``. +CHALLENGE_SNOMED_CLASSES: List[str] = sorted( + [ + "270492004", # IAVB — First-degree atrioventricular block + "164889003", # AF — Atrial fibrillation + "164890007", # AFL — Atrial flutter + "6374002", # BBB — Bundle branch block (unspecified) + "426627000", # Brady — Bradycardia + "713427006", # CRBBB — Complete right bundle branch block + "713426002", # CLBBB — Complete left bundle branch block + "445118002", # LAnFB — Left anterior fascicular block + "39732003", # LAD — Left axis deviation + "164909002", # LBBB — Left bundle branch block + "251146004", # LQRSV — Low QRS voltage + "698252002", # NSIVCB — Non-specific intraventricular conduction dist. + "10370003", # PR — Pacing rhythm + "164947007", # LPR — Prolonged PR interval + "164917005", # LQT — Prolonged QT interval + "47665007", # RAD — Right axis deviation + "427393009", # SA — Sinus arrhythmia + "426177001", # SB — Sinus bradycardia + "426783006", # NSR — Normal sinus rhythm + "427084000", # ST — Sinus tachycardia + "63593006", # SVPB — Supraventricular premature beats + "164934002", # STD — ST depression + "59931005", # TWA — T-wave abnormality + "164931005", # STE — ST elevation + "17338001", # VPB — Ventricular premature beats + "284470004", # PAC — Premature atrial contraction + "427172004", # PVC — Premature ventricular contraction + ] +) + +_CHALLENGE_SET: frozenset = frozenset(CHALLENGE_SNOMED_CLASSES) + + +# --------------------------------------------------------------------------- +# Task class +# --------------------------------------------------------------------------- + + +class PTBXLMultilabelClassification(BaseTask): + """Multi-label 12-lead ECG classification on PTB-XL. + + For each ECG recording this task: + + 1. Loads the ``.mat`` signal matrix via :func:`scipy.io.loadmat` + (shape ``(12, 5000)`` at 500 Hz). + 2. Optionally decimates the signal to 100 Hz (shape ``(12, 1000)``). + 3. Parses SNOMED-CT codes from the ``scp_codes`` event attribute. + 4. Maps those codes to the chosen label space (superdiagnostic or + full Challenge 27-class). + 5. Returns one sample dict per valid recording:: + + { + "signal": np.ndarray, # shape (12, T), float32 + "labels": List[str], # positive class names / SNOMED strings + } + + Args: + sampling_rate (int): Target sampling rate in Hz. Accepted values are + ``100`` (decimation ×5 from the native 500 Hz; yields ``T = 1000``) + and ``500`` (no resampling; yields ``T = 5000``). + Defaults to ``100``. + label_type (str): Label vocabulary to use. ``"superdiagnostic"`` + yields 5 classes (NORM, MI, STTC, CD, HYP); + ``"diagnostic"`` yields 27 SNOMED-CT classes from the PhysioNet + Challenge 2020 scoring list. Defaults to ``"superdiagnostic"``. + + Raises: + ValueError: If ``sampling_rate`` is not 100 or 500. + ValueError: If ``label_type`` is not ``"superdiagnostic"`` or + ``"diagnostic"``. + + Examples: + Superdiagnostic task at 100 Hz (default):: + + >>> from pyhealth.datasets import PTBXLDataset + >>> from pyhealth.tasks import PTBXLMultilabelClassification + >>> dataset = PTBXLDataset(root="/data/.../training/ptb-xl/") + >>> task = PTBXLMultilabelClassification() + >>> sample_ds = dataset.set_task(task) + >>> sample_ds[0]["labels"] # e.g. ["NORM"] or ["CD", "STTC"] + + 27-class diagnostic task at 500 Hz (ablation variant):: + + >>> task_27 = PTBXLMultilabelClassification( + ... sampling_rate=500, label_type="diagnostic" + ... ) + >>> sample_ds_27 = dataset.set_task(task_27) + + See Also: + :data:`SNOMED_TO_SUPERDIAG`, :data:`SUPERDIAG_CLASSES`, + :data:`CHALLENGE_SNOMED_CLASSES` + """ + + task_name: str = "PTBXLMultilabelClassification" + input_schema: Dict[str, str] = {"signal": "tensor"} + output_schema: Dict[str, str] = {"labels": "multilabel"} + + def __init__( + self, + sampling_rate: int = 100, + label_type: str = "superdiagnostic", + ) -> None: + super().__init__() + + if sampling_rate not in (100, 500): + raise ValueError( + f"sampling_rate must be 100 or 500, got {sampling_rate}." + ) + if label_type not in ("superdiagnostic", "diagnostic"): + raise ValueError( + "label_type must be 'superdiagnostic' or 'diagnostic', " + f"got '{label_type}'." + ) + + self.sampling_rate = sampling_rate + self.label_type = label_type + + # Disambiguate the task_name so that cached SampleDatasets from + # different configurations do not collide on disk. + self.task_name = ( + f"PTBXLSuperDiagnostic_{sampling_rate}Hz" + if label_type == "superdiagnostic" + else f"PTBXLDiagnostic27_{sampling_rate}Hz" + ) + + # ------------------------------------------------------------------ + # Core logic + # ------------------------------------------------------------------ + + def __call__(self, patient: Patient) -> List[Dict]: + """Extract samples from one patient (= one ECG recording in PTB-XL). + + Args: + patient: A :class:`~pyhealth.data.Patient` object whose events + have ``event_type="ptbxl"`` and carry attributes + ``mat_file``, ``scp_codes``, ``age``, and ``sex``. + + Returns: + A list with at most one sample dict + ``{"signal": np.ndarray, "labels": List[str]}``, or an empty list + if the recording should be skipped (missing file, unrecognised + codes, etc.). + """ + # In PTBXLDataset each patient has exactly one event in the "ptbxl" + # table (record == patient). + events = patient.get_events(event_type="ptbxl") + samples = [] + + for event in events: + # ---- 1. Load the .mat signal -------------------------------- + mat_file = getattr(event, "mat", None) + if not mat_file: + logger.debug("Skip %s: no *.mat file", event) + continue + + try: + from scipy.io import loadmat as _loadmat + mat = _loadmat(mat_file) + signal = mat["val"].astype(np.float32) # (12, 5000) @ 500 Hz + except Exception as exc: + logger.warning("Cannot load signal from %s: %s", mat_file, exc) + continue + + if signal.ndim != 2 or signal.shape[0] != 12: + logger.warning( + "Unexpected signal shape %s in %s; skipping.", + signal.shape, + mat_file, + ) + continue + + # ---- 2. Resample if needed (decimation only) ---------------- + # Native rate is 500 Hz (5000 samples / 10 s). + # Decimation by 5 gives 100 Hz (1000 samples / 10 s). + if self.sampling_rate == 100: + signal = signal[:, ::5] # shape (12, 1000) + + # ---- 3. Parse SNOMED-CT codes -------------------------------- + dx_codes: str = str(getattr(event, "dx_codes", "") or "") + codes = [c.strip() for c in dx_codes.split(",") if c.strip()] + + # ---- 4. Map to chosen label space --------------------------- + if self.label_type == "superdiagnostic": + labels = list( + { + SNOMED_TO_SUPERDIAG[c] + for c in codes + if c in SNOMED_TO_SUPERDIAG + } + ) + else: # "diagnostic" — 27-class Challenge vocabulary + labels = [c for c in codes if c in _CHALLENGE_SET] + + if not labels: + # No recognised labels → skip (consistent with other tasks). + continue + + samples.append({ + "patient_id": patient.patient_id, + "signal": signal, + "labels": labels, + }) + + return samples \ No newline at end of file diff --git a/tests/core/test_bilstm_ecg.py b/tests/core/test_bilstm_ecg.py new file mode 100644 index 000000000..1f4957b1a --- /dev/null +++ b/tests/core/test_bilstm_ecg.py @@ -0,0 +1,288 @@ +""" +Unit tests for the BiLSTMECG model. + +Covers: + - model initialisation and attribute checks + - forward pass output keys and shapes + - backward pass (gradient flow) + - embed flag (not applicable — model returns the standard four-key dict) + - paper-aligned variants (lstm_d1_h64 and lstm_d3_h128) + - custom hyperparameters + - all three output modes (multilabel, multiclass, binary) + - variable-length input signals + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" + +import unittest + +import numpy as np +import torch + +from pyhealth.datasets import create_sample_dataset, get_dataloader +from pyhealth.models.bilstm_ecg import BiLSTMECG + +# --------------------------------------------------------------------------- +# Shared fixture helpers (mirrors test_resnet_ecg.py conventions) +# --------------------------------------------------------------------------- + +_N_LEADS = 12 +_LENGTH = 1000 # 10 s @ 100 Hz — typical PTB-XL low-rate recording +_N_LABELS = 5 # number of multilabel classes + + +def _make_samples(n: int, rng: np.random.RandomState, + label_mode: str = "multilabel") -> list: + """Return ``n`` synthetic ECG samples for the given label mode. + + For multilabel, PyHealth's MultiLabelProcessor expects a list of active + class indices, not a fixed-length binary vector. Every class in + ``range(_N_LABELS)`` is forced to appear at least once across the dataset + so the full vocabulary is established. + """ + samples = [] + for i in range(n): + if label_mode == "multilabel": + active = [j for j in range(_N_LABELS) if rng.randint(0, 2)] + forced = i % _N_LABELS + if forced not in active: + active.append(forced) + label = sorted(active) + elif label_mode == "multiclass": + label = int(rng.randint(0, 3)) + else: # binary + label = int(rng.randint(0, 2)) + samples.append({ + "patient_id": f"p{i}", + "visit_id": "v0", + "signal": rng.randn(_N_LEADS, _LENGTH).astype(np.float32), + "label": label, + }) + return samples + + +def _make_dataset(samples: list, label_mode: str): + return create_sample_dataset( + samples=samples, + input_schema={"signal": "tensor"}, + output_schema={"label": label_mode}, + dataset_name=f"test_bilstm_{label_mode}", + ) + + +def _make_model(dataset, **kwargs) -> BiLSTMECG: + """Construct a BiLSTMECG with the mandatory constructor arguments.""" + return BiLSTMECG( + dataset=dataset, + feature_keys=["signal"], + label_key="label", + mode=dataset.output_schema["label"], + **kwargs, + ) + + +def _assert_forward_output(tc: unittest.TestCase, ret: dict, + batch_size: int, n_classes: int) -> None: + """Assert the standard PyHealth forward-output contract.""" + tc.assertIn("loss", ret) + tc.assertIn("y_prob", ret) + tc.assertIn("y_true", ret) + tc.assertIn("logit", ret) + tc.assertEqual(ret["loss"].dim(), 0) + tc.assertEqual(ret["y_prob"].shape[0], batch_size) + tc.assertEqual(ret["y_prob"].shape[1], n_classes) + tc.assertEqual(ret["y_true"].shape[0], batch_size) + tc.assertEqual(ret["logit"].shape[0], batch_size) + tc.assertEqual(ret["logit"].shape[1], n_classes) + tc.assertTrue(torch.isfinite(ret["loss"])) + + +# --------------------------------------------------------------------------- +# Main test class +# --------------------------------------------------------------------------- + +class TestBiLSTMECG(unittest.TestCase): + """Tests for BiLSTMECG.""" + + def setUp(self): + rng = np.random.RandomState(0) + samples = _make_samples(5, rng, "multilabel") + self.dataset = _make_dataset(samples, "multilabel") + self.model = _make_model(self.dataset) + self.batch = next(iter(get_dataloader(self.dataset, batch_size=4, shuffle=False))) + + # -- initialisation ------------------------------------------------------- + + def test_initialization(self): + self.assertIsInstance(self.model, BiLSTMECG) + self.assertEqual(self.model.feature_key, "signal") + self.assertEqual(self.model.label_key, "label") + # Default paper variant: 1 layer, hidden_size=64 + self.assertIsInstance(self.model.lstm, torch.nn.LSTM) + self.assertTrue(self.model.lstm.bidirectional) + self.assertEqual(self.model.lstm.hidden_size, 64) + self.assertEqual(self.model.lstm.num_layers, 1) + self.assertEqual(self.model.lstm.input_size, _N_LEADS) + # FC head maps hidden*2 → n_classes + self.assertIsInstance(self.model.fc, torch.nn.Linear) + self.assertEqual(self.model.fc.in_features, 64 * 2) + self.assertEqual(self.model.fc.out_features, _N_LABELS) + + def test_lstm_is_bidirectional(self): + """Bidirectional flag is set and output dim is 2 × hidden_size.""" + self.assertTrue(self.model.lstm.bidirectional) + x = torch.randn(2, _N_LEADS, _LENGTH) + # permute to (B, T, C) as forward does + out, _ = self.model.lstm(x.permute(0, 2, 1)) + self.assertEqual(out.shape, (2, _LENGTH, 64 * 2)) + + def test_pooling_over_all_timesteps(self): + """AdaptiveAvgPool1d(1) reduces the time dimension to a single vector.""" + x = torch.randn(2, _N_LEADS, _LENGTH) + out, _ = self.model.lstm(x.permute(0, 2, 1)) # (B, T, hidden*2) + pooled = self.model.pool(out.permute(0, 2, 1)).squeeze(-1) + self.assertEqual(pooled.shape, (2, 64 * 2)) + + # -- forward -------------------------------------------------------------- + + def test_forward_multilabel(self): + with torch.no_grad(): + ret = self.model(**self.batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=_N_LABELS) + # multilabel y_prob must be in [0, 1] (sigmoid output) + self.assertTrue(torch.all(ret["y_prob"] >= 0)) + self.assertTrue(torch.all(ret["y_prob"] <= 1)) + + def test_forward_multiclass(self): + rng = np.random.RandomState(1) + n_classes = 4 + samples = _make_samples(4, rng, "multiclass") + for i, s in enumerate(samples): + s["label"] = i % n_classes + ds = _make_dataset(samples, "multiclass") + model = _make_model(ds) + batch = next(iter(get_dataloader(ds, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=n_classes) + # multiclass y_prob rows sum to ~1 (softmax output) + self.assertTrue(torch.allclose(ret["y_prob"].sum(dim=1), + torch.ones(4), atol=1e-5)) + + def test_forward_binary(self): + rng = np.random.RandomState(2) + samples = _make_samples(4, rng, "binary") + for i, s in enumerate(samples): + s["label"] = i % 2 + ds = _make_dataset(samples, "binary") + model = _make_model(ds) + batch = next(iter(get_dataloader(ds, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=1) + self.assertTrue(torch.all(ret["y_prob"] >= 0)) + self.assertTrue(torch.all(ret["y_prob"] <= 1)) + + # -- backward ------------------------------------------------------------- + + def test_backward(self): + ret = self.model(**self.batch) + ret["loss"].backward() + has_grad = any( + p.requires_grad and p.grad is not None + for p in self.model.parameters() + ) + self.assertTrue(has_grad, "No parameters received gradients") + + def test_lstm_weights_receive_gradients(self): + """LSTM weight matrices (not just the FC head) receive gradients.""" + ret = self.model(**self.batch) + ret["loss"].backward() + lstm_params_with_grad = [ + name for name, p in self.model.lstm.named_parameters() + if p.requires_grad and p.grad is not None + ] + self.assertGreater(len(lstm_params_with_grad), 0, + "No LSTM parameters received gradients") + + # -- paper-aligned variants ----------------------------------------------- + + def test_paper_variant_lstm_d1_h64(self): + """lstm_d1_h64: 1 layer, hidden_size=64 (paper best variant).""" + model = _make_model(self.dataset, hidden_size=64, n_layers=1) + self.assertEqual(model.lstm.hidden_size, 64) + self.assertEqual(model.lstm.num_layers, 1) + self.assertEqual(model.fc.in_features, 128) + batch = next(iter(get_dataloader(self.dataset, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=_N_LABELS) + + def test_paper_variant_lstm_d3_h128(self): + """lstm_d3_h128: 3 layers, hidden_size=128.""" + model = _make_model(self.dataset, hidden_size=128, n_layers=3) + self.assertEqual(model.lstm.hidden_size, 128) + self.assertEqual(model.lstm.num_layers, 3) + self.assertEqual(model.fc.in_features, 256) + batch = next(iter(get_dataloader(self.dataset, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=_N_LABELS) + + # -- dropout behaviour ---------------------------------------------------- + + def test_dropout_disabled_for_single_layer(self): + """PyTorch raises a UserWarning if dropout > 0 with num_layers=1; + the implementation guards against this by passing 0.0 in that case.""" + model = _make_model(self.dataset, n_layers=1, dropout=0.5) + # PyTorch stores the effective dropout on the module + self.assertEqual(model.lstm.dropout, 0.0) + + def test_dropout_enabled_for_multi_layer(self): + """Dropout is applied between layers when n_layers > 1.""" + model = _make_model(self.dataset, n_layers=2, dropout=0.3) + self.assertAlmostEqual(model.lstm.dropout, 0.3) + + # -- custom hyperparameters ----------------------------------------------- + + def test_custom_hyperparameters(self): + model = _make_model(self.dataset, hidden_size=32, n_layers=2, dropout=0.1) + self.assertEqual(model.lstm.hidden_size, 32) + self.assertEqual(model.lstm.num_layers, 2) + self.assertEqual(model.fc.in_features, 64) # 32 * 2 (bidirectional) + batch = next(iter(get_dataloader(self.dataset, batch_size=2, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + self.assertIn("loss", ret) + self.assertEqual(ret["y_prob"].shape[1], _N_LABELS) + + # -- variable-length input ------------------------------------------------ + + def test_variable_signal_length(self): + """Model handles different signal lengths without retraining + because AdaptiveAvgPool1d(1) is length-agnostic.""" + for length in [500, 1000, 2500]: + signal = torch.randn(2, _N_LEADS, length) + batch = { + "signal": signal, + "label": self.batch["label"][:2], + } + with torch.no_grad(): + ret = self.model(**batch) + self.assertEqual(ret["logit"].shape, (2, _N_LABELS), + f"Wrong shape for signal length {length}") + + def test_high_rate_signal_length(self): + """5000-sample input (10 s @ 500 Hz, the paper's high-rate setting).""" + signal = torch.randn(4, _N_LEADS, 5000) + batch = {"signal": signal, "label": self.batch["label"]} + with torch.no_grad(): + ret = self.model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=_N_LABELS) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/core/test_ptbxl.py b/tests/core/test_ptbxl.py new file mode 100644 index 000000000..46dd2c2e9 --- /dev/null +++ b/tests/core/test_ptbxl.py @@ -0,0 +1,305 @@ +""" +Unit tests for the PTBXLDataset and PTBXLMultilabelClassification classes. + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" +import tempfile +import shutil +import unittest +from pathlib import Path +from dataclasses import dataclass +from typing import List + +import dask.dataframe as dd +import numpy as np +import pandas as pd +import scipy.io + +from pyhealth.datasets import PTBXLDataset +from pyhealth.tasks.ptbxl_multilabel_classification import PTBXLMultilabelClassification + +def write_hea_file(path, record_id, age, sex, dx): + with open(path, "w") as f: + f.write(f"{record_id} 12 500 5000\n") + for lead in ["I", "II", "III", "aVR", "aVL", "aVF", "V1", "V2", "V3", "V4", "V5", "V6"]: + f.write(f"{record_id}.mat 16+24 200/mV 16 0 0 0 0 {lead}\n") + f.write(f"#Age: {age}\n") + f.write(f"#Sex: {sex}\n") + f.write(f"#Dx: {dx}\n") + f.write("#Rx: Unknown\n") + f.write("#Hx: Unknown\n") + f.write("#Sx: Unknown\n") + +def write_mat_file(path): + scipy.io.savemat(str(path), {"val": np.random.randn(12, 1)}) + +def write_database_csv(path, records): + pd.DataFrame({ + "ecg_id": [int(r[0].replace("HR", "")) for r in records], + "strat_fold": [r[4] for r in records], + }).to_csv(path, index=False) + +@dataclass +class _DummyEvent: + """Event stub for task unit tests""" + mat: str + dx_codes: str + + # Override __getattr__ with the dummy data + def __getattr__(self, name): + if name == "ptbxl/mat": + return self.mat + if name == "ptbxl/dx_codes": + return self.dx_codes + raise AttributeError(name) + +class _DummyPatient: + """Patient stub for task unit tests""" + def __init__(self, patient_id: str, events: List[_DummyEvent]): + self.patient_id = patient_id + self._events = events + + def get_events(self, event_type=None) -> List[_DummyEvent]: + return self._events + +class TestPTBXLDataset(unittest.TestCase): + """Test PTBXLDataset with synthetic test data""" + + # Create records with (record_id, age, sex, dx_codes, strat_fold); at minimum need 1, 8, 9, 10 + RECORDS = [ + ("HR00001", 56, "Female", "251146004,426783006", 1), # train + ("HR00002", 37, "Female", "426783006", 8), # train + ("HR00003", 24, "Male", "426783006", 9), # val + ("HR00004", 45, "Female", "164889003", 10), # test + ] + + @classmethod + def setUpClass(cls): + """Create a temporary directory with 4 synthetic .hea/.mat pairs and a matching ptbxl_database.csv""" + cls.test_dir = tempfile.mkdtemp() + for record_id, age, sex, dx, _ in cls.RECORDS: + write_hea_file( + Path(cls.test_dir) / f"{record_id}.hea", + record_id, age, sex, dx + ) + write_mat_file(Path(cls.test_dir) / f"{record_id}.mat") + write_database_csv( + Path(cls.test_dir) / "ptbxl_database.csv", + cls.RECORDS + ) + + cls.dataset = PTBXLDataset(root=cls.test_dir) + cls.df = cls.dataset.load_data().compute().set_index("patient_id") + + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.test_dir) + + def test_dataset_instantiation(self): + """Test 1 - Dataset can be instantiated""" + self.assertIsNotNone(self.dataset) + + def test_dataset_name_default(self): + """Test 2 - Default dataset name is ptbxl""" + self.assertEqual(self.dataset.dataset_name, "ptbxl") + + def test_dataset_name_custom(self): + """Test 3 - Can set a custom dataset name""" + dataset = PTBXLDataset(root=self.test_dir, dataset_name="my_ptbxl") + self.assertEqual(dataset.dataset_name, "my_ptbxl") + + def test_default_task_returns_task_instance(self): + """Test 4 - default_task() returns a PTBXLMultilabelClassification instance and has correct schema""" + task = self.dataset.default_task + self.assertIsInstance(task, PTBXLMultilabelClassification) + self.assertEqual(task.input_schema, {"signal": "tensor"}) + self.assertEqual(task.output_schema, {"labels": "multilabel"}) + self.assertEqual(task.sampling_rate, 100) + self.assertEqual(task.label_type, "superdiagnostic") + + def test_classes_attribute(self): + """Test 5 - The list of strings CLASSES exists and is not empty""" + self.assertIsInstance(PTBXLDataset.CLASSES, list) + self.assertGreater(len(PTBXLDataset.CLASSES), 0) + self.assertTrue(all(isinstance(c, str) for c in PTBXLDataset.CLASSES)) + + def test_load_data_returns_dask_dataframe(self): + """Test 6 - load_data() returns a Dask DataFrame""" + self.assertIsInstance(self.dataset.load_data(), dd.DataFrame) + + def test_load_data_row_count(self): + """Test 7 - load_data() returns one row per .hea file""" + self.assertEqual(len(self.df), len(self.RECORDS)) + + def test_load_data_required_columns(self): + """Test 8 - load_data() output contains all required BaseDataset columns""" + for col in ["patient_id", "event_type", "timestamp"]: + self.assertIn(col, self.df.reset_index().columns, f"Missing required column: {col}") + + def test_load_data_attribute_columns(self): + """Test 9 - load_data() output contains all ptbxl/ attribute columns""" + for col in ["ptbxl/mat", "ptbxl/age", "ptbxl/sex", + "ptbxl/dx_codes", "ptbxl/dx_abbreviations", "ptbxl/split"]: + self.assertIn(col, self.df.columns, f"Missing attribute column: {col}") + + def test_load_data_event_type(self): + """Test 10 - All rows have event_type == ptbxl""" + self.assertTrue((self.df["event_type"] == "ptbxl").all()) + + def test_age_parsed_correctly(self): + """Test 11 - Ages are parsed correctly from .hea files""" + self.assertEqual(self.df.loc["HR00001", "ptbxl/age"], 56) + + def test_sex_parsed_correctly(self): + """Test 12 - Sex is parsed correctly from .hea files""" + self.assertEqual(self.df.loc["HR00001", "ptbxl/sex"], "Female") + self.assertEqual(self.df.loc["HR00003", "ptbxl/sex"], "Male") + + def test_dx_codes_parsed_correctly(self): + """Test 13 - SNOMED CT codes are parsed correctly from .hea files.""" + self.assertEqual(self.df.loc["HR00001", "ptbxl/dx_codes"], "251146004,426783006") + self.assertEqual(self.df.loc["HR00003", "ptbxl/dx_codes"], "426783006") + + def test_dx_abbreviations_mapped_correctly(self): + """Test 14 - SNOMED CT codes are mapped to correct abbreviations""" + self.assertIn("NSR", self.df.loc["HR00001", "ptbxl/dx_abbreviations"]) + self.assertIn("AF", self.df.loc["HR00004", "ptbxl/dx_abbreviations"]) + + def test_mat_file_path_correct(self): + """Test 15 - .mat file paths point to the correct location""" + expected = str(Path(self.test_dir) / "HR00001.mat") + self.assertEqual(self.df.loc["HR00001", "ptbxl/mat"], expected) + + def test_split_values(self): + """Test 16 - Split column only contains train, val, or test""" + self.assertTrue(self.df["ptbxl/split"].isin(["train", "val", "test"]).all()) + + def test_split_from_strat_fold(self): + """Test 17 - Splits are correctly assigned from strat_fold values""" + self.assertEqual(self.df.loc["HR00001", "ptbxl/split"], "train") # fold 1 + self.assertEqual(self.df.loc["HR00002", "ptbxl/split"], "train") # fold 8 + self.assertEqual(self.df.loc["HR00003", "ptbxl/split"], "val") # fold 9 + self.assertEqual(self.df.loc["HR00004", "ptbxl/split"], "test") # fold 10 + + def test_unknown_snomed_code_skipped(self): + """Test 18 - SNOMED codes not in mapping are skipped without error""" + test_dir = tempfile.mkdtemp() + try: + records = self.RECORDS + [("HR00099", 30, "Male", "999999999,426783006", 1)] + for record_id, age, sex, dx, _ in records: + write_hea_file(Path(test_dir) / f"{record_id}.hea", record_id, age, sex, dx) + write_mat_file(Path(test_dir) / f"{record_id}.mat") + write_database_csv(Path(test_dir) / "ptbxl_database.csv", records) + df = PTBXLDataset(root=test_dir).load_data().compute().set_index("patient_id") + self.assertEqual(df.loc["HR00099", "ptbxl/dx_abbreviations"], "NSR") + finally: + shutil.rmtree(test_dir) + + def test_invalid_age_handled(self): + """Test 19 - Non-integer age values result in None without error""" + test_dir = tempfile.mkdtemp() + try: + records = self.RECORDS + [("HR00098", "NaN", "Male", "426783006", 1)] + for record_id, age, sex, dx, _ in records: + write_hea_file(Path(test_dir) / f"{record_id}.hea", record_id, age, sex, dx) + write_mat_file(Path(test_dir) / f"{record_id}.mat") + write_database_csv(Path(test_dir) / "ptbxl_database.csv", records) + df = PTBXLDataset(root=test_dir).load_data().compute().set_index("patient_id") + self.assertTrue(pd.isna(df.loc["HR00098", "ptbxl/age"])) + finally: + shutil.rmtree(test_dir) + + def test_no_hea_files_raises_error(self): + """Test 20 - FileNotFoundError raised if no .hea files found""" + empty_dir = tempfile.mkdtemp() + try: + dataset = PTBXLDataset(root=empty_dir) + with self.assertRaises(FileNotFoundError): + dataset.load_data().compute() + finally: + shutil.rmtree(empty_dir) + + def test_missing_csv_raises_error(self): + """Test 21 - FileNotFoundError raised if ptbxl_database.csv is missing""" + no_csv_dir = tempfile.mkdtemp() + try: + write_hea_file( + Path(no_csv_dir) / "HR00001.hea", + "HR00001", 56, "Female", "426783006" + ) + write_mat_file(Path(no_csv_dir) / "HR00001.mat") + dataset = PTBXLDataset(root=no_csv_dir) + with self.assertRaises(FileNotFoundError): + dataset.load_data().compute() + finally: + shutil.rmtree(no_csv_dir) + +class TestPTBXLMultilabelClassification(unittest.TestCase): + """Test task PTBXLMultilabelClassification with synthetic test data""" + + @classmethod + def setUpClass(cls): + """Create a temporary directory with one test .mat file""" + cls.test_dir = tempfile.mkdtemp() + cls.mat_path = str(Path(cls.test_dir) / "test.mat") + scipy.io.savemat(cls.mat_path, {"val": np.random.randn(12, 5000).astype(np.float32)}) + + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.test_dir) + + def test_label_type_diagnostic(self): + """Test 22 - Test creating a new task with label_type of diagnostic""" + task = PTBXLMultilabelClassification(label_type="diagnostic", sampling_rate=500) + self.assertIn("diagnostic", task.task_name.lower()) + self.assertEqual(task.sampling_rate, 500) + self.assertEqual(task.label_type, "diagnostic") + + def test_invalid_sampling_rate_raises_error(self): + """Test 23 - Test that a unhandled sampling_rate raises a ValueError""" + with self.assertRaises(ValueError): + PTBXLMultilabelClassification(sampling_rate=99) + + def test_invalid_label_type_raises_error(self): + """Test 24 - Test that a unhandled label_type raises a ValueError""" + with self.assertRaises(ValueError): + PTBXLMultilabelClassification(label_type="diag") + + def test_superdiagnostic_abbreviations_mapped_correctly(self): + """Test 25 - Test that a valid superdiagnostic abbreviation returns a valid sample""" + task = PTBXLMultilabelClassification(label_type="superdiagnostic", sampling_rate=500) + patient = _DummyPatient("HR00001", [_DummyEvent(self.mat_path, "164890007")]) + samples = task(patient) + self.assertEqual(len(samples), 1) + self.assertIn("signal", samples[0]) + self.assertIn("labels", samples[0]) + self.assertEqual(samples[0]["signal"].shape, (12, 5000)) + self.assertIn("CD", samples[0]["labels"]) + + def test_signal_decimation(self): + """Test 26 - Test that a sampling rate of 100Hz shrinks the signal shape""" + task = PTBXLMultilabelClassification(sampling_rate=100) + patient = _DummyPatient("HR00001", [_DummyEvent(self.mat_path, "164890007")]) + samples = task(patient) + self.assertEqual(samples[0]["signal"].shape, (12, 1000)) + + def test_unknown_code_produces_no_samples(self): + """Test 27 - Test records with no mappable SNOMED codes produce no samples""" + task = PTBXLMultilabelClassification() + patient = _DummyPatient("HR00001", [_DummyEvent(self.mat_path, "999999999")]) + samples = task(patient) + self.assertEqual(len(samples), 0) + + def test_diagnostic_returns_snomed_codes(self): + """Test 28 - Test diagnostic label_type returns SNOMED codes not superclass names""" + task = PTBXLMultilabelClassification(label_type="diagnostic", sampling_rate=500) + patient = _DummyPatient("HR00001", [_DummyEvent(self.mat_path, "270492004")]) + samples = task(patient) + self.assertEqual(len(samples), 1) + self.assertIn("270492004", samples[0]["labels"]) + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/tests/core/test_resnet_ecg.py b/tests/core/test_resnet_ecg.py new file mode 100644 index 000000000..4c3a2a221 --- /dev/null +++ b/tests/core/test_resnet_ecg.py @@ -0,0 +1,604 @@ +""" +Tests for 1-D ResNet-based ECG models. + +Covers ResNet18ECG, SEResNet50ECG, and LambdaResNet18ECG, exercising: + - model initialisation and attribute checks + - forward pass output keys and shapes + - backward pass (gradient flow) + - embed flag + - custom hyperparameter variants + - all three output modes (multilabel, multiclass, binary) + - forward_sliding_window evaluation helper + +Authors: + Anurag Dixit - anuragd2@illinois.edu + Kent Spillner - kspillne@illinois.edu + John Wells - jtwells2@illinois.edu +""" + +import unittest + +import numpy as np +import torch + +from pyhealth.datasets import create_sample_dataset, get_dataloader +from pyhealth.models.resnet import ResNet18ECG +from pyhealth.models.se_resnet import SEResNet50ECG +from pyhealth.models.lambda_resnet import LambdaResNet18ECG + +# --------------------------------------------------------------------------- +# Shared fixture helpers +# --------------------------------------------------------------------------- + +_N_LEADS = 12 +_LENGTH = 1250 # 2.5 s @ 500 Hz — matches the paper's window size +_N_LABELS = 5 # number of multilabel classes used in tests + + +def _make_samples(n: int, rng: np.random.RandomState, + label_mode: str = "multilabel") -> list: + """Return ``n`` synthetic ECG samples for the given label mode. + + For multilabel, PyHealth's MultiLabelProcessor expects each label to be a + list of *active class indices* (set-style encoding), not a fixed-length + binary vector. E.g. ``[1, 3]`` means classes 1 and 3 are active. The + processor builds its vocabulary from the union of all class indices seen + across the dataset, so every class in ``range(_N_LABELS)`` must appear at + least once to guarantee a full-size output vector. + """ + samples = [] + for i in range(n): + if label_mode == "multilabel": + # Sample a random subset of class indices; ensure each class + # appears in at least one sample by cycling through them. + active = [j for j in range(_N_LABELS) if rng.randint(0, 2)] + # Guarantee the i-th class (mod _N_LABELS) is always present so + # the full vocabulary is established across the dataset. + forced = i % _N_LABELS + if forced not in active: + active.append(forced) + label = sorted(active) + elif label_mode == "multiclass": + label = int(rng.randint(0, 3)) + else: # binary + label = int(rng.randint(0, 2)) + samples.append({ + "patient_id": f"p{i}", + "visit_id": "v0", + "signal": rng.randn(_N_LEADS, _LENGTH).astype(np.float32), + "label": label, + }) + return samples + + +def _make_dataset(samples: list, label_mode: str): + return create_sample_dataset( + samples=samples, + input_schema={"signal": "tensor"}, + output_schema={"label": label_mode}, + dataset_name=f"test_ecg_{label_mode}", + ) + + +def _assert_forward_output(tc: unittest.TestCase, ret: dict, + batch_size: int, n_classes: int) -> None: + """Assert standard forward-output contract.""" + tc.assertIn("loss", ret) + tc.assertIn("y_prob", ret) + tc.assertIn("y_true", ret) + tc.assertIn("logit", ret) + tc.assertEqual(ret["loss"].dim(), 0) + tc.assertEqual(ret["y_prob"].shape[0], batch_size) + tc.assertEqual(ret["y_prob"].shape[1], n_classes) + tc.assertEqual(ret["y_true"].shape[0], batch_size) + tc.assertEqual(ret["logit"].shape[0], batch_size) + tc.assertEqual(ret["logit"].shape[1], n_classes) + tc.assertTrue(torch.isfinite(ret["loss"])) + + +# --------------------------------------------------------------------------- +# ResNet-18 +# --------------------------------------------------------------------------- + +class TestResNet18ECG(unittest.TestCase): + """Tests for ResNet18ECG.""" + + def setUp(self): + rng = np.random.RandomState(0) + samples = _make_samples(4, rng, "multilabel") + self.dataset = _make_dataset(samples, "multilabel") + self.model = ResNet18ECG(dataset=self.dataset) + self.batch = next(iter(get_dataloader(self.dataset, batch_size=4, shuffle=False))) + + # -- initialisation ------------------------------------------------------- + + def test_initialization(self): + self.assertIsInstance(self.model, ResNet18ECG) + self.assertEqual(len(self.model.feature_keys), 1) + self.assertIn("signal", self.model.feature_keys) + self.assertEqual(len(self.model.label_keys), 1) + self.assertIn("label", self.model.label_keys) + # backbone: 4 stages + self.assertEqual(len(self.model.backbone.stages), 4) + # head ends with a linear layer + self.assertIsInstance(list(self.model.head.children())[-1], torch.nn.Linear) + + def test_backbone_output_dim(self): + x = torch.randn(2, _N_LEADS, _LENGTH) + with torch.no_grad(): + out = self.model.backbone(x) + self.assertEqual(out.shape, (2, 256)) + + # -- forward -------------------------------------------------------------- + + def test_forward_multilabel(self): + with torch.no_grad(): + ret = self.model(**self.batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=_N_LABELS) + # multilabel y_prob in [0, 1] + self.assertTrue(torch.all(ret["y_prob"] >= 0)) + self.assertTrue(torch.all(ret["y_prob"] <= 1)) + + def test_forward_multiclass(self): + rng = np.random.RandomState(1) + n_classes = 4 + samples = _make_samples(4, rng, "multiclass") + # ensure all classes represented so tokeniser has correct size + for i, s in enumerate(samples): + s["label"] = i % n_classes + ds = _make_dataset(samples, "multiclass") + model = ResNet18ECG(dataset=ds) + batch = next(iter(get_dataloader(ds, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=n_classes) + # multiclass y_prob rows sum to ~1 + self.assertTrue(torch.allclose(ret["y_prob"].sum(dim=1), + torch.ones(4), atol=1e-5)) + + def test_forward_binary(self): + rng = np.random.RandomState(2) + samples = _make_samples(4, rng, "binary") + for i, s in enumerate(samples): + s["label"] = i % 2 + ds = _make_dataset(samples, "binary") + model = ResNet18ECG(dataset=ds) + batch = next(iter(get_dataloader(ds, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=1) + self.assertTrue(torch.all(ret["y_prob"] >= 0)) + self.assertTrue(torch.all(ret["y_prob"] <= 1)) + + # -- backward ------------------------------------------------------------- + + def test_backward(self): + ret = self.model(**self.batch) + ret["loss"].backward() + has_grad = any( + p.requires_grad and p.grad is not None + for p in self.model.parameters() + ) + self.assertTrue(has_grad, "No parameters received gradients") + + # -- embed ---------------------------------------------------------------- + + def test_embed_flag(self): + batch = dict(self.batch, embed=True) + with torch.no_grad(): + ret = self.model(**batch) + self.assertIn("embed", ret) + self.assertEqual(ret["embed"].shape, (4, 256)) + + # -- hyperparameters ------------------------------------------------------ + + def test_custom_hyperparameters(self): + model = ResNet18ECG( + dataset=self.dataset, + in_channels=_N_LEADS, + base_channels=32, + backbone_output_dim=128, + dropout=0.1, + ) + x = torch.randn(2, _N_LEADS, _LENGTH) + with torch.no_grad(): + emb = model.backbone(x) + self.assertEqual(emb.shape, (2, 128)) + batch = next(iter(get_dataloader(self.dataset, batch_size=2, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + self.assertIn("loss", ret) + + # -- sliding window ------------------------------------------------------- + + def test_forward_sliding_window(self): + # Full-length recording is 5 s @ 500 Hz = 2500 samples + signal = torch.randn(2, _N_LEADS, 2500) + self.model.eval() + probs = self.model.forward_sliding_window(signal, window_size=_LENGTH) + self.assertEqual(probs.shape, (2, _N_LABELS)) + self.assertTrue(torch.all(probs >= 0)) + self.assertTrue(torch.all(probs <= 1)) + + def test_forward_sliding_window_short_signal(self): + """Signal shorter than one window is zero-padded and processed.""" + signal = torch.randn(2, _N_LEADS, 500) + self.model.eval() + probs = self.model.forward_sliding_window(signal, window_size=_LENGTH) + self.assertEqual(probs.shape, (2, _N_LABELS)) + + def test_forward_sliding_window_custom_step(self): + """Custom step size produces the same output shape.""" + signal = torch.randn(2, _N_LEADS, 2500) + self.model.eval() + probs = self.model.forward_sliding_window( + signal, window_size=_LENGTH, step_size=250) + self.assertEqual(probs.shape, (2, _N_LABELS)) + + +# --------------------------------------------------------------------------- +# SE-ResNet-50 +# --------------------------------------------------------------------------- + +class TestSEResNet50ECG(unittest.TestCase): + """Tests for SEResNet50ECG.""" + + def setUp(self): + rng = np.random.RandomState(3) + samples = _make_samples(4, rng, "multilabel") + self.dataset = _make_dataset(samples, "multilabel") + self.model = SEResNet50ECG(dataset=self.dataset) + self.batch = next(iter(get_dataloader(self.dataset, batch_size=4, shuffle=False))) + + # -- initialisation ------------------------------------------------------- + + def test_initialization(self): + self.assertIsInstance(self.model, SEResNet50ECG) + self.assertIn("signal", self.model.feature_keys) + self.assertIn("label", self.model.label_keys) + # SE-ResNet-50 has 4 stages + self.assertEqual(len(self.model.backbone.stages), 4) + + def test_se_blocks_present(self): + """Every bottleneck block in the backbone contains an SEModule1d.""" + from pyhealth.models.se_resnet import SEResNetBottleneck1d, SEModule1d + for stage in self.model.backbone.stages: + for block in stage.children(): + self.assertIsInstance(block, SEResNetBottleneck1d) + self.assertIsInstance(block.se_module, SEModule1d) + + def test_backbone_output_dim(self): + x = torch.randn(2, _N_LEADS, _LENGTH) + with torch.no_grad(): + out = self.model.backbone(x) + self.assertEqual(out.shape, (2, 256)) + + # -- forward -------------------------------------------------------------- + + def test_forward_multilabel(self): + with torch.no_grad(): + ret = self.model(**self.batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=_N_LABELS) + self.assertTrue(torch.all(ret["y_prob"] >= 0)) + self.assertTrue(torch.all(ret["y_prob"] <= 1)) + + def test_forward_multiclass(self): + rng = np.random.RandomState(4) + n_classes = 3 + samples = _make_samples(4, rng, "multiclass") + for i, s in enumerate(samples): + s["label"] = i % n_classes + ds = _make_dataset(samples, "multiclass") + model = SEResNet50ECG(dataset=ds) + batch = next(iter(get_dataloader(ds, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=n_classes) + self.assertTrue(torch.allclose(ret["y_prob"].sum(dim=1), + torch.ones(4), atol=1e-5)) + + def test_forward_binary(self): + rng = np.random.RandomState(5) + samples = _make_samples(4, rng, "binary") + for i, s in enumerate(samples): + s["label"] = i % 2 + ds = _make_dataset(samples, "binary") + model = SEResNet50ECG(dataset=ds) + batch = next(iter(get_dataloader(ds, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=1) + + # -- backward ------------------------------------------------------------- + + def test_backward(self): + ret = self.model(**self.batch) + ret["loss"].backward() + has_grad = any( + p.requires_grad and p.grad is not None + for p in self.model.parameters() + ) + self.assertTrue(has_grad, "No parameters received gradients") + + # -- embed ---------------------------------------------------------------- + + def test_embed_flag(self): + batch = dict(self.batch, embed=True) + with torch.no_grad(): + ret = self.model(**batch) + self.assertIn("embed", ret) + self.assertEqual(ret["embed"].shape, (4, 256)) + + # -- hyperparameters ------------------------------------------------------ + + def test_custom_reduction_ratio(self): + """SE reduction ratio is forwarded to every SEModule1d.""" + from pyhealth.models.se_resnet import SEModule1d + model = SEResNet50ECG(dataset=self.dataset, reduction=8) + for m in model.backbone.modules(): + if isinstance(m, SEModule1d): + # fc1 maps C → C//8 + in_ch = m.fc1.in_channels + out_ch = m.fc1.out_channels + self.assertEqual(out_ch, in_ch // 8) + + def test_custom_hyperparameters(self): + model = SEResNet50ECG( + dataset=self.dataset, + backbone_output_dim=128, + dropout=0.1, + reduction=8, + ) + batch = next(iter(get_dataloader(self.dataset, batch_size=2, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + self.assertIn("loss", ret) + self.assertEqual(ret["y_prob"].shape[1], _N_LABELS) + + # -- sliding window ------------------------------------------------------- + + def test_forward_sliding_window(self): + signal = torch.randn(2, _N_LEADS, 2500) + self.model.eval() + probs = self.model.forward_sliding_window(signal, window_size=_LENGTH) + self.assertEqual(probs.shape, (2, _N_LABELS)) + self.assertTrue(torch.all(probs >= 0)) + self.assertTrue(torch.all(probs <= 1)) + + def test_forward_sliding_window_short_signal(self): + signal = torch.randn(2, _N_LEADS, 500) + self.model.eval() + probs = self.model.forward_sliding_window(signal, window_size=_LENGTH) + self.assertEqual(probs.shape, (2, _N_LABELS)) + + +# --------------------------------------------------------------------------- +# Lambda-ResNet-18 +# --------------------------------------------------------------------------- + +class TestLambdaResNet18ECG(unittest.TestCase): + """Tests for LambdaResNet18ECG.""" + + def setUp(self): + rng = np.random.RandomState(6) + samples = _make_samples(4, rng, "multilabel") + self.dataset = _make_dataset(samples, "multilabel") + self.model = LambdaResNet18ECG(dataset=self.dataset) + self.batch = next(iter(get_dataloader(self.dataset, batch_size=4, shuffle=False))) + + # -- initialisation ------------------------------------------------------- + + def test_initialization(self): + self.assertIsInstance(self.model, LambdaResNet18ECG) + self.assertIn("signal", self.model.feature_keys) + self.assertIn("label", self.model.label_keys) + # backbone has 4 stages + self.assertIsNotNone(self.model.backbone.layer1) + self.assertIsNotNone(self.model.backbone.layer4) + + def test_lambda_layers_present(self): + """Every block in the backbone contains a LambdaConv1d.""" + from pyhealth.models.lambda_resnet import LambdaBottleneck1d, LambdaConv1d + for layer in [self.model.backbone.layer1, + self.model.backbone.layer2, + self.model.backbone.layer3, + self.model.backbone.layer4]: + for block in layer.children(): + self.assertIsInstance(block, LambdaBottleneck1d) + # The lambda layer is embedded in block.conv2 (an nn.Sequential) + has_lambda = any( + isinstance(m, LambdaConv1d) + for m in block.conv2.modules() + ) + self.assertTrue(has_lambda, + "LambdaConv1d not found inside LambdaBottleneck1d") + + def test_backbone_output_dim(self): + x = torch.randn(2, _N_LEADS, _LENGTH) + with torch.no_grad(): + out = self.model.backbone(x) + self.assertEqual(out.shape, (2, 256)) + + # -- forward -------------------------------------------------------------- + + def test_forward_multilabel(self): + with torch.no_grad(): + ret = self.model(**self.batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=_N_LABELS) + self.assertTrue(torch.all(ret["y_prob"] >= 0)) + self.assertTrue(torch.all(ret["y_prob"] <= 1)) + + def test_forward_multiclass(self): + rng = np.random.RandomState(7) + n_classes = 3 + samples = _make_samples(4, rng, "multiclass") + for i, s in enumerate(samples): + s["label"] = i % n_classes + ds = _make_dataset(samples, "multiclass") + model = LambdaResNet18ECG(dataset=ds) + batch = next(iter(get_dataloader(ds, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=n_classes) + self.assertTrue(torch.allclose(ret["y_prob"].sum(dim=1), + torch.ones(4), atol=1e-5)) + + def test_forward_binary(self): + rng = np.random.RandomState(8) + samples = _make_samples(4, rng, "binary") + for i, s in enumerate(samples): + s["label"] = i % 2 + ds = _make_dataset(samples, "binary") + model = LambdaResNet18ECG(dataset=ds) + batch = next(iter(get_dataloader(ds, batch_size=4, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + _assert_forward_output(self, ret, batch_size=4, n_classes=1) + + # -- backward ------------------------------------------------------------- + + def test_backward(self): + ret = self.model(**self.batch) + ret["loss"].backward() + has_grad = any( + p.requires_grad and p.grad is not None + for p in self.model.parameters() + ) + self.assertTrue(has_grad, "No parameters received gradients") + + def test_lambda_layer_gradients(self): + """Gradients flow back through LambdaConv1d (embedding parameter).""" + from pyhealth.models.lambda_resnet import LambdaConv1d + ret = self.model(**self.batch) + ret["loss"].backward() + lambda_layers = [ + m for m in self.model.backbone.modules() + if isinstance(m, LambdaConv1d) + ] + self.assertGreater(len(lambda_layers), 0) + for lm in lambda_layers: + self.assertIsNotNone(lm.embedding.grad, + "embedding parameter has no gradient") + + # -- embed ---------------------------------------------------------------- + + def test_embed_flag(self): + batch = dict(self.batch, embed=True) + with torch.no_grad(): + ret = self.model(**batch) + self.assertIn("embed", ret) + self.assertEqual(ret["embed"].shape, (4, 256)) + + # -- hyperparameters ------------------------------------------------------ + + def test_custom_hyperparameters(self): + model = LambdaResNet18ECG( + dataset=self.dataset, + backbone_output_dim=128, + dropout=0.1, + ) + batch = next(iter(get_dataloader(self.dataset, batch_size=2, shuffle=False))) + with torch.no_grad(): + ret = model(**batch) + self.assertIn("loss", ret) + self.assertEqual(ret["y_prob"].shape[1], _N_LABELS) + + # -- sliding window ------------------------------------------------------- + + def test_forward_sliding_window(self): + signal = torch.randn(2, _N_LEADS, 2500) + self.model.eval() + probs = self.model.forward_sliding_window(signal, window_size=_LENGTH) + self.assertEqual(probs.shape, (2, _N_LABELS)) + self.assertTrue(torch.all(probs >= 0)) + self.assertTrue(torch.all(probs <= 1)) + + def test_forward_sliding_window_short_signal(self): + signal = torch.randn(2, _N_LEADS, 500) + self.model.eval() + probs = self.model.forward_sliding_window(signal, window_size=_LENGTH) + self.assertEqual(probs.shape, (2, _N_LABELS)) + + def test_clamp_stability(self): + """Extreme input values are clamped and do not produce NaN/Inf.""" + # The backbone clamps input and inter-stage activations to [-20, 20]. + # Use batch size 4 to match self.batch["label"]. + signal = torch.full((4, _N_LEADS, _LENGTH), fill_value=1e6) + batch = { + "signal": signal, + "label": self.batch["label"], + } + with torch.no_grad(): + ret = self.model(**batch) + self.assertTrue(torch.isfinite(ret["loss"]), + "Loss is not finite for clamped extreme input") + self.assertTrue(torch.all(torch.isfinite(ret["y_prob"]))) + + +# --------------------------------------------------------------------------- +# Cross-model consistency tests +# --------------------------------------------------------------------------- + +class TestECGResNetConsistency(unittest.TestCase): + """Sanity checks that hold across all three model classes.""" + + def _make_model_and_batch(self, model_cls, rng_seed=42): + rng = np.random.RandomState(rng_seed) + samples = _make_samples(4, rng, "multilabel") + ds = _make_dataset(samples, "multilabel") + model = model_cls(dataset=ds) + batch = next(iter(get_dataloader(ds, batch_size=4, shuffle=False))) + return model, batch, ds + + def test_all_models_share_head_architecture(self): + """All three models use the same HeadModule architecture.""" + for cls in [ResNet18ECG, SEResNet50ECG, LambdaResNet18ECG]: + model, _, _ = self._make_model_and_batch(cls) + children = list(model.head.children()) + self.assertIsInstance(children[0], torch.nn.Linear, f"{cls.__name__}: head[0]") + self.assertIsInstance(children[1], torch.nn.ReLU, f"{cls.__name__}: head[1]") + self.assertIsInstance(children[2], torch.nn.BatchNorm1d, f"{cls.__name__}: head[2]") + self.assertIsInstance(children[3], torch.nn.Dropout, f"{cls.__name__}: head[3]") + self.assertIsInstance(children[4], torch.nn.Linear, f"{cls.__name__}: head[4]") + # hidden size is 128 (matching HeadModule in reference code) + self.assertEqual(children[0].out_features, 128, + f"{cls.__name__}: head hidden dim should be 128") + + def test_all_models_produce_finite_output(self): + """Forward pass produces finite loss and probabilities for all models.""" + for cls in [ResNet18ECG, SEResNet50ECG, LambdaResNet18ECG]: + model, batch, _ = self._make_model_and_batch(cls) + with torch.no_grad(): + ret = model(**batch) + self.assertTrue(torch.isfinite(ret["loss"]), + f"{cls.__name__} loss is not finite") + self.assertTrue(torch.all(torch.isfinite(ret["y_prob"])), + f"{cls.__name__} y_prob contains non-finite values") + + def test_all_models_eval_train_switch(self): + """train() / eval() mode switches do not break forward.""" + for cls in [ResNet18ECG, SEResNet50ECG, LambdaResNet18ECG]: + model, batch, _ = self._make_model_and_batch(cls) + model.train() + ret_train = model(**batch) + model.eval() + with torch.no_grad(): + ret_eval = model(**batch) + self.assertIn("loss", ret_train) + self.assertIn("loss", ret_eval) + + def test_all_models_sliding_window_consistent(self): + """forward_sliding_window output shape is consistent with forward.""" + signal = torch.randn(2, _N_LEADS, 2500) + for cls in [ResNet18ECG, SEResNet50ECG, LambdaResNet18ECG]: + model, batch, _ = self._make_model_and_batch(cls) + model.eval() + probs = model.forward_sliding_window(signal, window_size=_LENGTH) + self.assertEqual(probs.shape[0], 2, + f"{cls.__name__}: sliding window batch dim wrong") + self.assertEqual(probs.shape[1], _N_LABELS, + f"{cls.__name__}: sliding window class dim wrong") + + +if __name__ == "__main__": + unittest.main()