diff --git a/README.md b/README.md index 07c88dc8850ea472f3a86715b0393aefc6289d87..18c330aba56f117e7c25d4285e41873cdbf24172 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,81 @@ ---- -title: Pavement Damage Yolo9tr -emoji: 💻 -colorFrom: pink -colorTo: gray -sdk: streamlit -sdk_version: 1.37.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +# YOLO9tr: Yolo9 with partial self attention +This is the repo for using yolov9 with partial self attention (PSA) \ +This model was developed to be used in pavement damage detection based on YOLO9s Model. +### From paper +YOLO9tr: A Lightweight Model for Pavement Damage Detection Utilizing a Generalized Efficient Layer Aggregation Network and Attention Mechanism [Access](https://arxiv.org/abs/2406.11254) + +## Authors + +Authors: Dr. Sompote Youwai, Achitaphon Chaiyaphat and Pawarotorn Chaipetch + +AI research Group \ +Department of Civil Engineering\ +King Mongkut's University of Technology Thonburi\ +Thailand + + + + + +

+ Picture11223 +

+

+ Picture11 +

+

+ detect_result +

+ + + + +## Deployment + +To deploy this project run + +```bash +git clone https://github.com/Sompote/YOLO9tr +pip install -r requirements.txt +``` + + +Reccomend to use weight for [YOLO9s](https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-s.pt) as initial training + + +### Train with Single GPU + ```bash + python train_dual.py --workers 8 --device 0 --batch 4 --data '/workspace/6400 images/data.yaml' --img 640 \ + --cfg models/detect/yolov9tr.yaml --weights '../yolov9s' --name yolov9-tr --hyp hyp.scratch-high.yaml\ + --min-items 0 --epochs 200 --close-mosaic 15 + +``` + + +### Train with Dual GPU + ```bash + torchrun --nproc_per_node 2 --master_port 9527 train_dual.py \ +--workers 8 --device 0,1 --sync-bn --batch 30 --data '/workspace/road damage/data.yaml' \ +--img 640 --cfg models/detect/yolov9tr.yaml --weights '../yolov9s' --name yolov9-c --hyp hyp.scratch-high.yaml \ +--min-items 0 --epochs 200 --close-mosaic 15 +``` + + + +### Evaluation +[YOLO9tr.pt](https://drive.google.com/file/d/1DtXXICCulTPN8DP4HbVLP3T3sk5BP5HI/view?usp=share_link) +``` +python val_dual.py --data data/coco.yaml --img 640 --batch 32 --conf 0.001\ + --iou 0.7 --device 0 --weights './yolov9tr.pt' \ +--save-json --name yolov9_c_640_val +``` +### Inference +``` +python detect_dual.py --source './data/images/horses.jpg' --img 640 --device 0 \ +--weights './yolov9tr.pt' --name yolov9_c_640_detect +``` +The file format of data can be used the same as YOLOv8 in Roboflow + + + + diff --git a/United_States_000062.jpg b/United_States_000062.jpg new file mode 100644 index 0000000000000000000000000000000000000000..363a5db45d1fb0999ed574e74e14a72a4479a058 Binary files /dev/null and b/United_States_000062.jpg differ diff --git a/United_States_000502.jpg b/United_States_000502.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5bc0951f1e3f9bffa42f80929f7c3871bb49b09f Binary files /dev/null and b/United_States_000502.jpg differ diff --git a/YOLO9_run.ipynb b/YOLO9_run.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..8fb36adb820331dcf52b68b69724800bb582845d --- /dev/null +++ b/YOLO9_run.ipynb @@ -0,0 +1,1123 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Mw7RoF7eRJEG", + "outputId": "1aa46197-e525-4227-b78a-5068873ebd65" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cloning into 'YOLO9_KMUTT'...\n", + "remote: Enumerating objects: 182, done.\u001b[K\n", + "remote: Counting objects: 100% (182/182), done.\u001b[K\n", + "remote: Compressing objects: 100% (132/132), done.\u001b[K\n", + "remote: Total 182 (delta 49), reused 179 (delta 46), pack-reused 0\u001b[K\n", + "Receiving objects: 100% (182/182), 2.27 MiB | 8.47 MiB/s, done.\n", + "Resolving deltas: 100% (49/49), done.\n" + ] + } + ], + "source": [ + "!git clone https://github.com/Sompote/YOLO9_KMUTT" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "6KgJYQKfryz2", + "outputId": "f359b42d-f9d5-41ac-bbc6-d4c623672e92" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/workspace/YOLO9_KMUTT/yolov9\n" + ] + } + ], + "source": [ + "%cd /workspace/YOLO9_KMUTT/yolov9/" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "AnohaOgqq5rQ", + "outputId": "d4a78b77-33f3-43dd-af6e-c1ef5e32a8f9" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting gitpython (from -r requirements.txt (line 5))\n", + " Downloading GitPython-3.1.43-py3-none-any.whl.metadata (13 kB)\n", + "Requirement already satisfied: ipython in /opt/conda/lib/python3.10/site-packages (from -r requirements.txt (line 6)) (8.20.0)\n", + "Collecting matplotlib>=3.2.2 (from -r requirements.txt (line 7))\n", + " Downloading matplotlib-3.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (11 kB)\n", + "Requirement already satisfied: numpy>=1.18.5 in /opt/conda/lib/python3.10/site-packages (from -r requirements.txt (line 8)) (1.26.3)\n", + "Collecting opencv-python>=4.1.1 (from -r requirements.txt (line 9))\n", + " Downloading opencv_python-4.10.0.82-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (20 kB)\n", + "Requirement already satisfied: Pillow>=7.1.2 in /opt/conda/lib/python3.10/site-packages (from -r requirements.txt (line 10)) (10.0.1)\n", + "Requirement already satisfied: psutil in /opt/conda/lib/python3.10/site-packages (from -r requirements.txt (line 11)) (5.9.0)\n", + "Requirement already satisfied: PyYAML>=5.3.1 in /opt/conda/lib/python3.10/site-packages (from -r requirements.txt (line 12)) (6.0.1)\n", + "Requirement already satisfied: requests>=2.23.0 in /opt/conda/lib/python3.10/site-packages (from -r requirements.txt (line 13)) (2.31.0)\n", + "Collecting scipy>=1.4.1 (from -r requirements.txt (line 14))\n", + " Downloading scipy-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (60 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.6/60.6 kB\u001b[0m \u001b[31m9.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting thop>=0.1.1 (from -r requirements.txt (line 15))\n", + " Downloading thop-0.1.1.post2209072238-py3-none-any.whl.metadata (2.7 kB)\n", + "Requirement already satisfied: torch>=1.7.0 in /opt/conda/lib/python3.10/site-packages (from -r requirements.txt (line 16)) (2.2.0)\n", + "Requirement already satisfied: torchvision>=0.8.1 in /opt/conda/lib/python3.10/site-packages (from -r requirements.txt (line 17)) (0.17.0)\n", + "Requirement already satisfied: tqdm>=4.64.0 in /opt/conda/lib/python3.10/site-packages (from -r requirements.txt (line 18)) (4.65.0)\n", + "Collecting tensorboard>=2.4.1 (from -r requirements.txt (line 22))\n", + " Downloading tensorboard-2.17.0-py3-none-any.whl.metadata (1.6 kB)\n", + "Collecting pandas>=1.1.4 (from -r requirements.txt (line 27))\n", + " Downloading pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (19 kB)\n", + "Collecting seaborn>=0.11.0 (from -r requirements.txt (line 28))\n", + " Downloading seaborn-0.13.2-py3-none-any.whl.metadata (5.4 kB)\n", + "Collecting albumentations>=1.0.3 (from -r requirements.txt (line 46))\n", + " Downloading albumentations-1.4.8-py3-none-any.whl.metadata (37 kB)\n", + "Collecting pycocotools>=2.0 (from -r requirements.txt (line 47))\n", + " Downloading pycocotools-2.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.1 kB)\n", + "Collecting gitdb<5,>=4.0.1 (from gitpython->-r requirements.txt (line 5))\n", + " Downloading gitdb-4.0.11-py3-none-any.whl.metadata (1.2 kB)\n", + "Requirement already satisfied: decorator in /opt/conda/lib/python3.10/site-packages (from ipython->-r requirements.txt (line 6)) (5.1.1)\n", + "Requirement already satisfied: jedi>=0.16 in /opt/conda/lib/python3.10/site-packages (from ipython->-r requirements.txt (line 6)) (0.18.1)\n", + "Requirement already satisfied: matplotlib-inline in /opt/conda/lib/python3.10/site-packages (from ipython->-r requirements.txt (line 6)) (0.1.6)\n", + "Requirement already satisfied: prompt-toolkit<3.1.0,>=3.0.41 in /opt/conda/lib/python3.10/site-packages (from ipython->-r requirements.txt (line 6)) (3.0.43)\n", + "Requirement already satisfied: pygments>=2.4.0 in /opt/conda/lib/python3.10/site-packages (from ipython->-r requirements.txt (line 6)) (2.15.1)\n", + "Requirement already satisfied: stack-data in /opt/conda/lib/python3.10/site-packages (from ipython->-r requirements.txt (line 6)) (0.2.0)\n", + "Requirement already satisfied: traitlets>=5 in /opt/conda/lib/python3.10/site-packages (from ipython->-r requirements.txt (line 6)) (5.7.1)\n", + "Requirement already satisfied: exceptiongroup in /opt/conda/lib/python3.10/site-packages (from ipython->-r requirements.txt (line 6)) (1.2.0)\n", + "Requirement already satisfied: pexpect>4.3 in /opt/conda/lib/python3.10/site-packages (from ipython->-r requirements.txt (line 6)) (4.8.0)\n", + "Collecting contourpy>=1.0.1 (from matplotlib>=3.2.2->-r requirements.txt (line 7))\n", + " Downloading contourpy-1.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (5.8 kB)\n", + "Collecting cycler>=0.10 (from matplotlib>=3.2.2->-r requirements.txt (line 7))\n", + " Downloading cycler-0.12.1-py3-none-any.whl.metadata (3.8 kB)\n", + "Collecting fonttools>=4.22.0 (from matplotlib>=3.2.2->-r requirements.txt (line 7))\n", + " Downloading fonttools-4.53.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (162 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m162.2/162.2 kB\u001b[0m \u001b[31m22.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting kiwisolver>=1.3.1 (from matplotlib>=3.2.2->-r requirements.txt (line 7))\n", + " Downloading kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl.metadata (6.4 kB)\n", + "Requirement already satisfied: packaging>=20.0 in /opt/conda/lib/python3.10/site-packages (from matplotlib>=3.2.2->-r requirements.txt (line 7)) (23.1)\n", + "Collecting pyparsing>=2.3.1 (from matplotlib>=3.2.2->-r requirements.txt (line 7))\n", + " Downloading pyparsing-3.1.2-py3-none-any.whl.metadata (5.1 kB)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /opt/conda/lib/python3.10/site-packages (from matplotlib>=3.2.2->-r requirements.txt (line 7)) (2.9.0.post0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /opt/conda/lib/python3.10/site-packages (from requests>=2.23.0->-r requirements.txt (line 13)) (2.0.4)\n", + "Requirement already satisfied: idna<4,>=2.5 in /opt/conda/lib/python3.10/site-packages (from requests>=2.23.0->-r requirements.txt (line 13)) (3.4)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/conda/lib/python3.10/site-packages (from requests>=2.23.0->-r requirements.txt (line 13)) (1.26.18)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.10/site-packages (from requests>=2.23.0->-r requirements.txt (line 13)) (2023.11.17)\n", + "Requirement already satisfied: filelock in /opt/conda/lib/python3.10/site-packages (from torch>=1.7.0->-r requirements.txt (line 16)) (3.13.1)\n", + "Requirement already satisfied: typing-extensions>=4.8.0 in /opt/conda/lib/python3.10/site-packages (from torch>=1.7.0->-r requirements.txt (line 16)) (4.9.0)\n", + "Requirement already satisfied: sympy in /opt/conda/lib/python3.10/site-packages (from torch>=1.7.0->-r requirements.txt (line 16)) (1.12)\n", + "Requirement already satisfied: networkx in /opt/conda/lib/python3.10/site-packages (from torch>=1.7.0->-r requirements.txt (line 16)) (3.1)\n", + "Requirement already satisfied: jinja2 in /opt/conda/lib/python3.10/site-packages (from torch>=1.7.0->-r requirements.txt (line 16)) (3.1.2)\n", + "Requirement already satisfied: fsspec in /opt/conda/lib/python3.10/site-packages (from torch>=1.7.0->-r requirements.txt (line 16)) (2023.12.2)\n", + "Collecting absl-py>=0.4 (from tensorboard>=2.4.1->-r requirements.txt (line 22))\n", + " Downloading absl_py-2.1.0-py3-none-any.whl.metadata (2.3 kB)\n", + "Collecting grpcio>=1.48.2 (from tensorboard>=2.4.1->-r requirements.txt (line 22))\n", + " Downloading grpcio-1.64.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.3 kB)\n", + "Collecting markdown>=2.6.8 (from tensorboard>=2.4.1->-r requirements.txt (line 22))\n", + " Downloading Markdown-3.6-py3-none-any.whl.metadata (7.0 kB)\n", + "Collecting protobuf!=4.24.0,<5.0.0,>=3.19.6 (from tensorboard>=2.4.1->-r requirements.txt (line 22))\n", + " Downloading protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl.metadata (541 bytes)\n", + "Requirement already satisfied: setuptools>=41.0.0 in /opt/conda/lib/python3.10/site-packages (from tensorboard>=2.4.1->-r requirements.txt (line 22)) (68.2.2)\n", + "Requirement already satisfied: six>1.9 in /opt/conda/lib/python3.10/site-packages (from tensorboard>=2.4.1->-r requirements.txt (line 22)) (1.16.0)\n", + "Collecting tensorboard-data-server<0.8.0,>=0.7.0 (from tensorboard>=2.4.1->-r requirements.txt (line 22))\n", + " Downloading tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl.metadata (1.1 kB)\n", + "Collecting werkzeug>=1.0.1 (from tensorboard>=2.4.1->-r requirements.txt (line 22))\n", + " Downloading werkzeug-3.0.3-py3-none-any.whl.metadata (3.7 kB)\n", + "Requirement already satisfied: pytz>=2020.1 in /opt/conda/lib/python3.10/site-packages (from pandas>=1.1.4->-r requirements.txt (line 27)) (2023.3.post1)\n", + "Collecting tzdata>=2022.7 (from pandas>=1.1.4->-r requirements.txt (line 27))\n", + " Downloading tzdata-2024.1-py2.py3-none-any.whl.metadata (1.4 kB)\n", + "Collecting scikit-image>=0.21.0 (from albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading scikit_image-0.23.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (14 kB)\n", + "Collecting scikit-learn>=1.3.2 (from albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading scikit_learn-1.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (11 kB)\n", + "Collecting pydantic>=2.7.0 (from albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading pydantic-2.7.3-py3-none-any.whl.metadata (108 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m109.0/109.0 kB\u001b[0m \u001b[31m31.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting albucore>=0.0.4 (from albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading albucore-0.0.9-py3-none-any.whl.metadata (3.1 kB)\n", + "Collecting opencv-python-headless>=4.9.0.80 (from albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading opencv_python_headless-4.10.0.82-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (20 kB)\n", + "Requirement already satisfied: tomli>=2.0.1 in /opt/conda/lib/python3.10/site-packages (from albucore>=0.0.4->albumentations>=1.0.3->-r requirements.txt (line 46)) (2.0.1)\n", + "Collecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->gitpython->-r requirements.txt (line 5))\n", + " Downloading smmap-5.0.1-py3-none-any.whl.metadata (4.3 kB)\n", + "Requirement already satisfied: parso<0.9.0,>=0.8.0 in /opt/conda/lib/python3.10/site-packages (from jedi>=0.16->ipython->-r requirements.txt (line 6)) (0.8.3)\n", + "Requirement already satisfied: ptyprocess>=0.5 in /opt/conda/lib/python3.10/site-packages (from pexpect>4.3->ipython->-r requirements.txt (line 6)) (0.7.0)\n", + "Requirement already satisfied: wcwidth in /opt/conda/lib/python3.10/site-packages (from prompt-toolkit<3.1.0,>=3.0.41->ipython->-r requirements.txt (line 6)) (0.2.5)\n", + "Collecting annotated-types>=0.4.0 (from pydantic>=2.7.0->albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading annotated_types-0.7.0-py3-none-any.whl.metadata (15 kB)\n", + "Collecting pydantic-core==2.18.4 (from pydantic>=2.7.0->albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.5 kB)\n", + "Collecting imageio>=2.33 (from scikit-image>=0.21.0->albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading imageio-2.34.1-py3-none-any.whl.metadata (4.9 kB)\n", + "Collecting tifffile>=2022.8.12 (from scikit-image>=0.21.0->albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading tifffile-2024.5.22-py3-none-any.whl.metadata (30 kB)\n", + "Collecting lazy-loader>=0.4 (from scikit-image>=0.21.0->albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading lazy_loader-0.4-py3-none-any.whl.metadata (7.6 kB)\n", + "Collecting joblib>=1.2.0 (from scikit-learn>=1.3.2->albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading joblib-1.4.2-py3-none-any.whl.metadata (5.4 kB)\n", + "Collecting threadpoolctl>=3.1.0 (from scikit-learn>=1.3.2->albumentations>=1.0.3->-r requirements.txt (line 46))\n", + " Downloading threadpoolctl-3.5.0-py3-none-any.whl.metadata (13 kB)\n", + "Requirement already satisfied: MarkupSafe>=2.1.1 in /opt/conda/lib/python3.10/site-packages (from werkzeug>=1.0.1->tensorboard>=2.4.1->-r requirements.txt (line 22)) (2.1.3)\n", + "Requirement already satisfied: executing in /opt/conda/lib/python3.10/site-packages (from stack-data->ipython->-r requirements.txt (line 6)) (0.8.3)\n", + "Requirement already satisfied: asttokens in /opt/conda/lib/python3.10/site-packages (from stack-data->ipython->-r requirements.txt (line 6)) (2.0.5)\n", + "Requirement already satisfied: pure-eval in /opt/conda/lib/python3.10/site-packages (from stack-data->ipython->-r requirements.txt (line 6)) (0.2.2)\n", + "Requirement already satisfied: mpmath>=0.19 in /opt/conda/lib/python3.10/site-packages (from sympy->torch>=1.7.0->-r requirements.txt (line 16)) (1.3.0)\n", + "Downloading GitPython-3.1.43-py3-none-any.whl (207 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m207.3/207.3 kB\u001b[0m \u001b[31m35.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading matplotlib-3.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (8.3 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m8.3/8.3 MB\u001b[0m \u001b[31m127.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading opencv_python-4.10.0.82-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (62.5 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.5/62.5 MB\u001b[0m \u001b[31m89.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading scipy-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (38.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m38.6/38.6 MB\u001b[0m \u001b[31m100.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading thop-0.1.1.post2209072238-py3-none-any.whl (15 kB)\n", + "Downloading tensorboard-2.17.0-py3-none-any.whl (5.5 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.5/5.5 MB\u001b[0m \u001b[31m148.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (13.0 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.0/13.0 MB\u001b[0m \u001b[31m151.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m0:01\u001b[0m\n", + "\u001b[?25hDownloading seaborn-0.13.2-py3-none-any.whl (294 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m294.9/294.9 kB\u001b[0m \u001b[31m50.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading albumentations-1.4.8-py3-none-any.whl (156 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m156.8/156.8 kB\u001b[0m \u001b[31m35.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading pycocotools-2.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (426 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m426.2/426.2 kB\u001b[0m \u001b[31m61.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading absl_py-2.1.0-py3-none-any.whl (133 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m133.7/133.7 kB\u001b[0m \u001b[31m33.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading albucore-0.0.9-py3-none-any.whl (7.9 kB)\n", + "Downloading contourpy-1.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (305 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m305.2/305.2 kB\u001b[0m \u001b[31m50.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading cycler-0.12.1-py3-none-any.whl (8.3 kB)\n", + "Downloading fonttools-4.53.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.6/4.6 MB\u001b[0m \u001b[31m141.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading gitdb-4.0.11-py3-none-any.whl (62 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.7/62.7 kB\u001b[0m \u001b[31m13.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading grpcio-1.64.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.6/5.6 MB\u001b[0m \u001b[31m140.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m115.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading Markdown-3.6-py3-none-any.whl (105 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m105.4/105.4 kB\u001b[0m \u001b[31m20.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading opencv_python_headless-4.10.0.82-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (49.9 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.9/49.9 MB\u001b[0m \u001b[31m93.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl (294 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m294.6/294.6 kB\u001b[0m \u001b[31m49.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading pydantic-2.7.3-py3-none-any.whl (409 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m409.6/409.6 kB\u001b[0m \u001b[31m70.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.0 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m117.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading pyparsing-3.1.2-py3-none-any.whl (103 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m103.2/103.2 kB\u001b[0m \u001b[31m21.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading scikit_image-0.23.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (14.7 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m14.7/14.7 MB\u001b[0m \u001b[31m145.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading scikit_learn-1.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (13.3 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.3/13.3 MB\u001b[0m \u001b[31m137.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl (6.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.6/6.6 MB\u001b[0m \u001b[31m96.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0mta \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading tzdata-2024.1-py2.py3-none-any.whl (345 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m345.4/345.4 kB\u001b[0m \u001b[31m51.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading werkzeug-3.0.3-py3-none-any.whl (227 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.3/227.3 kB\u001b[0m \u001b[31m42.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading annotated_types-0.7.0-py3-none-any.whl (13 kB)\n", + "Downloading imageio-2.34.1-py3-none-any.whl (313 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m313.5/313.5 kB\u001b[0m \u001b[31m56.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading joblib-1.4.2-py3-none-any.whl (301 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m301.8/301.8 kB\u001b[0m \u001b[31m49.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading lazy_loader-0.4-py3-none-any.whl (12 kB)\n", + "Downloading smmap-5.0.1-py3-none-any.whl (24 kB)\n", + "Downloading threadpoolctl-3.5.0-py3-none-any.whl (18 kB)\n", + "Downloading tifffile-2024.5.22-py3-none-any.whl (225 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m225.5/225.5 kB\u001b[0m \u001b[31m49.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hInstalling collected packages: werkzeug, tzdata, tifffile, threadpoolctl, tensorboard-data-server, smmap, scipy, pyparsing, pydantic-core, protobuf, opencv-python-headless, opencv-python, markdown, lazy-loader, kiwisolver, joblib, imageio, grpcio, fonttools, cycler, contourpy, annotated-types, absl-py, tensorboard, scikit-learn, scikit-image, pydantic, pandas, matplotlib, gitdb, albucore, thop, seaborn, pycocotools, gitpython, albumentations\n", + "Successfully installed absl-py-2.1.0 albucore-0.0.9 albumentations-1.4.8 annotated-types-0.7.0 contourpy-1.2.1 cycler-0.12.1 fonttools-4.53.0 gitdb-4.0.11 gitpython-3.1.43 grpcio-1.64.1 imageio-2.34.1 joblib-1.4.2 kiwisolver-1.4.5 lazy-loader-0.4 markdown-3.6 matplotlib-3.9.0 opencv-python-4.10.0.82 opencv-python-headless-4.10.0.82 pandas-2.2.2 protobuf-4.25.3 pycocotools-2.0.7 pydantic-2.7.3 pydantic-core-2.18.4 pyparsing-3.1.2 scikit-image-0.23.2 scikit-learn-1.5.0 scipy-1.13.1 seaborn-0.13.2 smmap-5.0.1 tensorboard-2.17.0 tensorboard-data-server-0.7.2 thop-0.1.1.post2209072238 threadpoolctl-3.5.0 tifffile-2024.5.22 tzdata-2024.1 werkzeug-3.0.3\n", + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", + "\u001b[0m" + ] + } + ], + "source": [ + "!pip install -r requirements.txt" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hit:1 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64 InRelease\n", + "Hit:2 http://archive.ubuntu.com/ubuntu jammy InRelease \n", + "Hit:3 http://security.ubuntu.com/ubuntu jammy-security InRelease\n", + "Hit:4 http://archive.ubuntu.com/ubuntu jammy-updates InRelease\n", + "Hit:5 http://archive.ubuntu.com/ubuntu jammy-backports InRelease\n", + "Reading package lists... Done\n", + "Reading package lists... Done\n", + "Building dependency tree... Done\n", + "Reading state information... Done\n", + "The following additional packages will be installed:\n", + " libdrm-amdgpu1 libdrm-common libdrm-intel1 libdrm-nouveau2 libdrm-radeon1\n", + " libdrm2 libgl1 libgl1-amber-dri libgl1-mesa-dri libglapi-mesa libglvnd0\n", + " libglx-mesa0 libglx0 libllvm15 libpciaccess0 libsensors-config libsensors5\n", + " libx11-6 libx11-data libx11-xcb1 libxau6 libxcb-dri2-0 libxcb-dri3-0\n", + " libxcb-glx0 libxcb-present0 libxcb-randr0 libxcb-shm0 libxcb-sync1\n", + " libxcb-xfixes0 libxcb1 libxdmcp6 libxext6 libxfixes3 libxshmfence1\n", + " libxxf86vm1\n", + "Suggested packages:\n", + " pciutils lm-sensors\n", + "The following NEW packages will be installed:\n", + " libdrm-amdgpu1 libdrm-common libdrm-intel1 libdrm-nouveau2 libdrm-radeon1\n", + " libdrm2 libgl1 libgl1-amber-dri libgl1-mesa-dri libgl1-mesa-glx\n", + " libglapi-mesa libglvnd0 libglx-mesa0 libglx0 libllvm15 libpciaccess0\n", + " libsensors-config libsensors5 libx11-6 libx11-data libx11-xcb1 libxau6\n", + " libxcb-dri2-0 libxcb-dri3-0 libxcb-glx0 libxcb-present0 libxcb-randr0\n", + " libxcb-shm0 libxcb-sync1 libxcb-xfixes0 libxcb1 libxdmcp6 libxext6\n", + " libxfixes3 libxshmfence1 libxxf86vm1\n", + "0 upgraded, 36 newly installed, 0 to remove and 56 not upgraded.\n", + "Need to get 40.1 MB of archives.\n", + "After this operation, 173 MB of additional disk space will be used.\n", + "Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libdrm-common all 2.4.113-2~ubuntu0.22.04.1 [5450 B]\n", + "Get:2 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libdrm2 amd64 2.4.113-2~ubuntu0.22.04.1 [38.1 kB]\n", + "Get:3 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxau6 amd64 1:1.0.9-1build5 [7634 B]\n", + "Get:4 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxdmcp6 amd64 1:1.1.3-0ubuntu5 [10.9 kB]\n", + "Get:5 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxcb1 amd64 1.14-3ubuntu3 [49.0 kB]\n", + "Get:6 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libx11-data all 2:1.7.5-1ubuntu0.3 [120 kB]\n", + "Get:7 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libx11-6 amd64 2:1.7.5-1ubuntu0.3 [667 kB]\n", + "Get:8 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxext6 amd64 2:1.3.4-1build1 [31.8 kB]\n", + "Get:9 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libdrm-amdgpu1 amd64 2.4.113-2~ubuntu0.22.04.1 [19.9 kB]\n", + "Get:10 http://archive.ubuntu.com/ubuntu jammy/main amd64 libpciaccess0 amd64 0.16-3 [19.1 kB]\n", + "Get:11 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libdrm-intel1 amd64 2.4.113-2~ubuntu0.22.04.1 [66.7 kB]\n", + "Get:12 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libdrm-nouveau2 amd64 2.4.113-2~ubuntu0.22.04.1 [17.5 kB]\n", + "Get:13 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libdrm-radeon1 amd64 2.4.113-2~ubuntu0.22.04.1 [21.6 kB]\n", + "Get:14 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libglapi-mesa amd64 23.2.1-1ubuntu3.1~22.04.2 [37.1 kB]\n", + "Get:15 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libgl1-amber-dri amd64 21.3.9-0ubuntu1~22.04.1 [4218 kB]\n", + "Get:16 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libllvm15 amd64 1:15.0.7-0ubuntu0.22.04.3 [25.4 MB]\n", + "Get:17 http://archive.ubuntu.com/ubuntu jammy/main amd64 libsensors-config all 1:3.6.0-7ubuntu1 [5274 B]\n", + "Get:18 http://archive.ubuntu.com/ubuntu jammy/main amd64 libsensors5 amd64 1:3.6.0-7ubuntu1 [26.3 kB]\n", + "Get:19 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxcb-dri3-0 amd64 1.14-3ubuntu3 [6968 B]\n", + "Get:20 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libgl1-mesa-dri amd64 23.2.1-1ubuntu3.1~22.04.2 [8860 kB]\n", + "Get:21 http://archive.ubuntu.com/ubuntu jammy/main amd64 libglvnd0 amd64 1.4.0-1 [73.6 kB]\n", + "Get:22 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libx11-xcb1 amd64 2:1.7.5-1ubuntu0.3 [7802 B]\n", + "Get:23 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxcb-dri2-0 amd64 1.14-3ubuntu3 [7206 B]\n", + "Get:24 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxcb-glx0 amd64 1.14-3ubuntu3 [25.9 kB]\n", + "Get:25 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxcb-present0 amd64 1.14-3ubuntu3 [5734 B]\n", + "Get:26 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxcb-randr0 amd64 1.14-3ubuntu3 [18.3 kB]\n", + "Get:27 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxcb-shm0 amd64 1.14-3ubuntu3 [5780 B]\n", + "Get:28 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxcb-sync1 amd64 1.14-3ubuntu3 [9416 B]\n", + "Get:29 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxcb-xfixes0 amd64 1.14-3ubuntu3 [9996 B]\n", + "Get:30 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxfixes3 amd64 1:6.0.0-1 [11.7 kB]\n", + "Get:31 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxshmfence1 amd64 1.3-1build4 [5394 B]\n", + "Get:32 http://archive.ubuntu.com/ubuntu jammy/main amd64 libxxf86vm1 amd64 1:1.1.4-1build3 [10.4 kB]\n", + "Get:33 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 libglx-mesa0 amd64 23.2.1-1ubuntu3.1~22.04.2 [158 kB]\n", + "Get:34 http://archive.ubuntu.com/ubuntu jammy/main amd64 libglx0 amd64 1.4.0-1 [41.0 kB]\n", + "Get:35 http://archive.ubuntu.com/ubuntu jammy/main amd64 libgl1 amd64 1.4.0-1 [110 kB]\n", + "Get:36 http://archive.ubuntu.com/ubuntu jammy-updates/universe amd64 libgl1-mesa-glx amd64 23.0.4-0ubuntu1~22.04.1 [5584 B]\n", + "Fetched 40.1 MB in 3s (14.7 MB/s)\n", + "debconf: delaying package configuration, since apt-utils is not installed\n", + "Selecting previously unselected package libdrm-common.\n", + "(Reading database ... 22469 files and directories currently installed.)\n", + "Preparing to unpack .../00-libdrm-common_2.4.113-2~ubuntu0.22.04.1_all.deb ...\n", + "Unpacking libdrm-common (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Selecting previously unselected package libdrm2:amd64.\n", + "Preparing to unpack .../01-libdrm2_2.4.113-2~ubuntu0.22.04.1_amd64.deb ...\n", + "Unpacking libdrm2:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Selecting previously unselected package libxau6:amd64.\n", + "Preparing to unpack .../02-libxau6_1%3a1.0.9-1build5_amd64.deb ...\n", + "Unpacking libxau6:amd64 (1:1.0.9-1build5) ...\n", + "Selecting previously unselected package libxdmcp6:amd64.\n", + "Preparing to unpack .../03-libxdmcp6_1%3a1.1.3-0ubuntu5_amd64.deb ...\n", + "Unpacking libxdmcp6:amd64 (1:1.1.3-0ubuntu5) ...\n", + "Selecting previously unselected package libxcb1:amd64.\n", + "Preparing to unpack .../04-libxcb1_1.14-3ubuntu3_amd64.deb ...\n", + "Unpacking libxcb1:amd64 (1.14-3ubuntu3) ...\n", + "Selecting previously unselected package libx11-data.\n", + "Preparing to unpack .../05-libx11-data_2%3a1.7.5-1ubuntu0.3_all.deb ...\n", + "Unpacking libx11-data (2:1.7.5-1ubuntu0.3) ...\n", + "Selecting previously unselected package libx11-6:amd64.\n", + "Preparing to unpack .../06-libx11-6_2%3a1.7.5-1ubuntu0.3_amd64.deb ...\n", + "Unpacking libx11-6:amd64 (2:1.7.5-1ubuntu0.3) ...\n", + "Selecting previously unselected package libxext6:amd64.\n", + "Preparing to unpack .../07-libxext6_2%3a1.3.4-1build1_amd64.deb ...\n", + "Unpacking libxext6:amd64 (2:1.3.4-1build1) ...\n", + "Selecting previously unselected package libdrm-amdgpu1:amd64.\n", + "Preparing to unpack .../08-libdrm-amdgpu1_2.4.113-2~ubuntu0.22.04.1_amd64.deb ...\n", + "Unpacking libdrm-amdgpu1:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Selecting previously unselected package libpciaccess0:amd64.\n", + "Preparing to unpack .../09-libpciaccess0_0.16-3_amd64.deb ...\n", + "Unpacking libpciaccess0:amd64 (0.16-3) ...\n", + "Selecting previously unselected package libdrm-intel1:amd64.\n", + "Preparing to unpack .../10-libdrm-intel1_2.4.113-2~ubuntu0.22.04.1_amd64.deb ...\n", + "Unpacking libdrm-intel1:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Selecting previously unselected package libdrm-nouveau2:amd64.\n", + "Preparing to unpack .../11-libdrm-nouveau2_2.4.113-2~ubuntu0.22.04.1_amd64.deb ...\n", + "Unpacking libdrm-nouveau2:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Selecting previously unselected package libdrm-radeon1:amd64.\n", + "Preparing to unpack .../12-libdrm-radeon1_2.4.113-2~ubuntu0.22.04.1_amd64.deb ...\n", + "Unpacking libdrm-radeon1:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Selecting previously unselected package libglapi-mesa:amd64.\n", + "Preparing to unpack .../13-libglapi-mesa_23.2.1-1ubuntu3.1~22.04.2_amd64.deb ...\n", + "Unpacking libglapi-mesa:amd64 (23.2.1-1ubuntu3.1~22.04.2) ...\n", + "Selecting previously unselected package libgl1-amber-dri:amd64.\n", + "Preparing to unpack .../14-libgl1-amber-dri_21.3.9-0ubuntu1~22.04.1_amd64.deb ...\n", + "Unpacking libgl1-amber-dri:amd64 (21.3.9-0ubuntu1~22.04.1) ...\n", + "Selecting previously unselected package libllvm15:amd64.\n", + "Preparing to unpack .../15-libllvm15_1%3a15.0.7-0ubuntu0.22.04.3_amd64.deb ...\n", + "Unpacking libllvm15:amd64 (1:15.0.7-0ubuntu0.22.04.3) ...\n", + "Selecting previously unselected package libsensors-config.\n", + "Preparing to unpack .../16-libsensors-config_1%3a3.6.0-7ubuntu1_all.deb ...\n", + "Unpacking libsensors-config (1:3.6.0-7ubuntu1) ...\n", + "Selecting previously unselected package libsensors5:amd64.\n", + "Preparing to unpack .../17-libsensors5_1%3a3.6.0-7ubuntu1_amd64.deb ...\n", + "Unpacking libsensors5:amd64 (1:3.6.0-7ubuntu1) ...\n", + "Selecting previously unselected package libxcb-dri3-0:amd64.\n", + "Preparing to unpack .../18-libxcb-dri3-0_1.14-3ubuntu3_amd64.deb ...\n", + "Unpacking libxcb-dri3-0:amd64 (1.14-3ubuntu3) ...\n", + "Selecting previously unselected package libgl1-mesa-dri:amd64.\n", + "Preparing to unpack .../19-libgl1-mesa-dri_23.2.1-1ubuntu3.1~22.04.2_amd64.deb ...\n", + "Unpacking libgl1-mesa-dri:amd64 (23.2.1-1ubuntu3.1~22.04.2) ...\n", + "Selecting previously unselected package libglvnd0:amd64.\n", + "Preparing to unpack .../20-libglvnd0_1.4.0-1_amd64.deb ...\n", + "Unpacking libglvnd0:amd64 (1.4.0-1) ...\n", + "Selecting previously unselected package libx11-xcb1:amd64.\n", + "Preparing to unpack .../21-libx11-xcb1_2%3a1.7.5-1ubuntu0.3_amd64.deb ...\n", + "Unpacking libx11-xcb1:amd64 (2:1.7.5-1ubuntu0.3) ...\n", + "Selecting previously unselected package libxcb-dri2-0:amd64.\n", + "Preparing to unpack .../22-libxcb-dri2-0_1.14-3ubuntu3_amd64.deb ...\n", + "Unpacking libxcb-dri2-0:amd64 (1.14-3ubuntu3) ...\n", + "Selecting previously unselected package libxcb-glx0:amd64.\n", + "Preparing to unpack .../23-libxcb-glx0_1.14-3ubuntu3_amd64.deb ...\n", + "Unpacking libxcb-glx0:amd64 (1.14-3ubuntu3) ...\n", + "Selecting previously unselected package libxcb-present0:amd64.\n", + "Preparing to unpack .../24-libxcb-present0_1.14-3ubuntu3_amd64.deb ...\n", + "Unpacking libxcb-present0:amd64 (1.14-3ubuntu3) ...\n", + "Selecting previously unselected package libxcb-randr0:amd64.\n", + "Preparing to unpack .../25-libxcb-randr0_1.14-3ubuntu3_amd64.deb ...\n", + "Unpacking libxcb-randr0:amd64 (1.14-3ubuntu3) ...\n", + "Selecting previously unselected package libxcb-shm0:amd64.\n", + "Preparing to unpack .../26-libxcb-shm0_1.14-3ubuntu3_amd64.deb ...\n", + "Unpacking libxcb-shm0:amd64 (1.14-3ubuntu3) ...\n", + "Selecting previously unselected package libxcb-sync1:amd64.\n", + "Preparing to unpack .../27-libxcb-sync1_1.14-3ubuntu3_amd64.deb ...\n", + "Unpacking libxcb-sync1:amd64 (1.14-3ubuntu3) ...\n", + "Selecting previously unselected package libxcb-xfixes0:amd64.\n", + "Preparing to unpack .../28-libxcb-xfixes0_1.14-3ubuntu3_amd64.deb ...\n", + "Unpacking libxcb-xfixes0:amd64 (1.14-3ubuntu3) ...\n", + "Selecting previously unselected package libxfixes3:amd64.\n", + "Preparing to unpack .../29-libxfixes3_1%3a6.0.0-1_amd64.deb ...\n", + "Unpacking libxfixes3:amd64 (1:6.0.0-1) ...\n", + "Selecting previously unselected package libxshmfence1:amd64.\n", + "Preparing to unpack .../30-libxshmfence1_1.3-1build4_amd64.deb ...\n", + "Unpacking libxshmfence1:amd64 (1.3-1build4) ...\n", + "Selecting previously unselected package libxxf86vm1:amd64.\n", + "Preparing to unpack .../31-libxxf86vm1_1%3a1.1.4-1build3_amd64.deb ...\n", + "Unpacking libxxf86vm1:amd64 (1:1.1.4-1build3) ...\n", + "Selecting previously unselected package libglx-mesa0:amd64.\n", + "Preparing to unpack .../32-libglx-mesa0_23.2.1-1ubuntu3.1~22.04.2_amd64.deb ...\n", + "Unpacking libglx-mesa0:amd64 (23.2.1-1ubuntu3.1~22.04.2) ...\n", + "Selecting previously unselected package libglx0:amd64.\n", + "Preparing to unpack .../33-libglx0_1.4.0-1_amd64.deb ...\n", + "Unpacking libglx0:amd64 (1.4.0-1) ...\n", + "Selecting previously unselected package libgl1:amd64.\n", + "Preparing to unpack .../34-libgl1_1.4.0-1_amd64.deb ...\n", + "Unpacking libgl1:amd64 (1.4.0-1) ...\n", + "Selecting previously unselected package libgl1-mesa-glx:amd64.\n", + "Preparing to unpack .../35-libgl1-mesa-glx_23.0.4-0ubuntu1~22.04.1_amd64.deb ...\n", + "Unpacking libgl1-mesa-glx:amd64 (23.0.4-0ubuntu1~22.04.1) ...\n", + "Setting up libpciaccess0:amd64 (0.16-3) ...\n", + "Setting up libxau6:amd64 (1:1.0.9-1build5) ...\n", + "Setting up libxdmcp6:amd64 (1:1.1.3-0ubuntu5) ...\n", + "Setting up libxcb1:amd64 (1.14-3ubuntu3) ...\n", + "Setting up libxcb-xfixes0:amd64 (1.14-3ubuntu3) ...\n", + "Setting up libglvnd0:amd64 (1.4.0-1) ...\n", + "Setting up libxcb-glx0:amd64 (1.14-3ubuntu3) ...\n", + "Setting up libsensors-config (1:3.6.0-7ubuntu1) ...\n", + "Setting up libxcb-shm0:amd64 (1.14-3ubuntu3) ...\n", + "Setting up libxcb-present0:amd64 (1.14-3ubuntu3) ...\n", + "Setting up libx11-data (2:1.7.5-1ubuntu0.3) ...\n", + "Setting up libxcb-sync1:amd64 (1.14-3ubuntu3) ...\n", + "Setting up libsensors5:amd64 (1:3.6.0-7ubuntu1) ...\n", + "Setting up libglapi-mesa:amd64 (23.2.1-1ubuntu3.1~22.04.2) ...\n", + "Setting up libxcb-dri2-0:amd64 (1.14-3ubuntu3) ...\n", + "Setting up libxshmfence1:amd64 (1.3-1build4) ...\n", + "Setting up libxcb-randr0:amd64 (1.14-3ubuntu3) ...\n", + "Setting up libllvm15:amd64 (1:15.0.7-0ubuntu0.22.04.3) ...\n", + "Setting up libx11-6:amd64 (2:1.7.5-1ubuntu0.3) ...\n", + "Setting up libdrm-common (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Setting up libxcb-dri3-0:amd64 (1.14-3ubuntu3) ...\n", + "Setting up libx11-xcb1:amd64 (2:1.7.5-1ubuntu0.3) ...\n", + "Setting up libxext6:amd64 (2:1.3.4-1build1) ...\n", + "Setting up libxxf86vm1:amd64 (1:1.1.4-1build3) ...\n", + "Setting up libxfixes3:amd64 (1:6.0.0-1) ...\n", + "Setting up libdrm2:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Setting up libdrm-amdgpu1:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Setting up libdrm-nouveau2:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Setting up libdrm-radeon1:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Setting up libdrm-intel1:amd64 (2.4.113-2~ubuntu0.22.04.1) ...\n", + "Setting up libgl1-mesa-dri:amd64 (23.2.1-1ubuntu3.1~22.04.2) ...\n", + "Setting up libgl1-amber-dri:amd64 (21.3.9-0ubuntu1~22.04.1) ...\n", + "Setting up libglx-mesa0:amd64 (23.2.1-1ubuntu3.1~22.04.2) ...\n", + "Setting up libglx0:amd64 (1.4.0-1) ...\n", + "Setting up libgl1:amd64 (1.4.0-1) ...\n", + "Setting up libgl1-mesa-glx:amd64 (23.0.4-0ubuntu1~22.04.1) ...\n", + "Processing triggers for libc-bin (2.35-0ubuntu3.4) ...\n", + "yes: standard output: Broken pipe\n" + ] + } + ], + "source": [ + "!sudo apt-get update\n", + "!yes | sudo apt-get install libgl1-mesa-glx" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/workspace/yolov9\n" + ] + } + ], + "source": [ + "%cd /workspace/yolov9" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting Pillow==9.5.0\n", + " Downloading Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl.metadata (9.5 kB)\n", + "Downloading Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl (3.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m54.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hInstalling collected packages: Pillow\n", + " Attempting uninstall: Pillow\n", + " Found existing installation: Pillow 10.0.1\n", + " Uninstalling Pillow-10.0.1:\n", + " Successfully uninstalled Pillow-10.0.1\n", + "Successfully installed Pillow-9.5.0\n", + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "pip install Pillow==9.5.0" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "jjB9EA9yRCyR", + "outputId": "2e388fee-6ec5-492d-b698-55d2d1cec2f2" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mtrain_dual: \u001b[0mweights=, cfg=/workspace/YOLO9_KMUTT/yolov9/models/detect/yolov9-kmuttX.yaml, data=/workspace/6400 images/data.yaml, hyp=hyp.scratch-high.yaml, epochs=500, batch_size=4, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=None, image_weights=False, device=0, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=yolov9-c, exist_ok=False, quad=False, cos_lr=False, flat_cos_lr=False, fixed_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, min_items=0, close_mosaic=15, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", + "YOLO 🚀 2024-6-7 Python-3.10.13 torch-2.2.0 CUDA:0 (NVIDIA GeForce RTX 4090, 24210MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, cls_pw=1.0, obj=0.7, obj_pw=1.0, dfl=1.5, iou_t=0.2, anchor_t=5.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.9, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.15, copy_paste=0.3\n", + "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLO 🚀 in ClearML\n", + "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLO 🚀 runs in Comet\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", + "Overriding model.yaml nc=80 with nc=10\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 0 models.common.Silence [] \n", + " 1 -1 1 1856 models.common.Conv [3, 64, 3, 2] \n", + " 2 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 3 -1 1 252160 models.common.RepNCSPELAN4 [128, 256, 128, 64, 2] \n", + " 4 -1 1 164352 models.common.ADown [256, 256] \n", + " 5 -1 1 1004032 models.common.RepNCSPELAN4 [256, 512, 256, 128, 2] \n", + " 6 -1 1 656384 models.common.ADown [512, 512] \n", + " 7 -1 1 4006912 models.common.RepNCSPELAN4 [512, 1024, 512, 256, 2] \n", + " 8 -1 1 2623488 models.common.ADown [1024, 1024] \n", + " 9 -1 1 4269056 models.common.RepNCSPELAN4 [1024, 1024, 512, 256, 2] \n", + " 10 1 1 4160 models.common.CBLinear [64, [64]] \n", + " 11 3 1 49344 models.common.CBLinear [256, [64, 128]] \n", + " 12 5 1 229824 models.common.CBLinear [512, [64, 128, 256]] \n", + " 13 7 1 984000 models.common.CBLinear [1024, [64, 128, 256, 512]] \n", + " 14 9 1 2033600 models.common.CBLinear [1024, [64, 128, 256, 512, 1024]]\n", + " 15 0 1 1856 models.common.Conv [3, 64, 3, 2] \n", + " 16[10, 11, 12, 13, 14, -1] 1 0 models.common.CBFuse [[0, 0, 0, 0, 0]] \n", + " 17 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 18[11, 12, 13, 14, -1] 1 0 models.common.CBFuse [[1, 1, 1, 1]] \n", + " 19 -1 1 252160 models.common.RepNCSPELAN4 [128, 256, 128, 64, 2] \n", + " 20 -1 1 164352 models.common.ADown [256, 256] \n", + " 21 [12, 13, 14, -1] 1 0 models.common.CBFuse [[2, 2, 2]] \n", + " 22 -1 1 1004032 models.common.RepNCSPELAN4 [256, 512, 256, 128, 2] \n", + " 23 -1 1 656384 models.common.ADown [512, 512] \n", + " 24 [13, 14, -1] 1 0 models.common.CBFuse [[3, 3]] \n", + " 25 -1 1 4006912 models.common.RepNCSPELAN4 [512, 1024, 512, 256, 2] \n", + " 26 -1 1 2623488 models.common.ADown [1024, 1024] \n", + " 27 [14, -1] 1 0 models.common.CBFuse [[4]] \n", + " 28 -1 1 4269056 models.common.RepNCSPELAN4 [1024, 1024, 512, 256, 2] \n", + " 29 9 1 787968 models.common.SPPELAN [1024, 512, 256] \n", + " 30 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 31 [-1, 7] 1 0 models.common.Concat [1] \n", + " 32 -1 1 4005888 models.common.RepNCSPELAN4 [1536, 512, 512, 256, 2] \n", + " 33 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 34 [-1, 5] 1 0 models.common.Concat [1] \n", + " 35 -1 1 1069056 models.common.RepNCSPELAN4 [1024, 256, 256, 128, 2] \n", + " 36 28 1 787968 models.common.SPPELAN [1024, 512, 256] \n", + " 37 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 38 [-1, 25] 1 0 models.common.Concat [1] \n", + " 39 -1 1 4005888 models.common.RepNCSPELAN4 [1536, 512, 512, 256, 2] \n", + " 40 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 41 [-1, 22] 1 0 models.common.Concat [1] \n", + " 42 -1 1 1069056 models.common.RepNCSPELAN4 [1024, 256, 256, 128, 2] \n", + " 43 -1 1 164352 models.common.ADown [256, 256] \n", + " 44 [-1, 39] 1 0 models.common.Concat [1] \n", + " 45 -1 1 3612672 models.common.RepNCSPELAN4 [768, 512, 512, 256, 2] \n", + " 46 -1 1 656384 models.common.ADown [512, 512] \n", + " 47 [-1, 36] 1 0 models.common.Concat [1] \n", + " 48 -1 1 13910016 models.common.RepNCSPELAN4 [1024, 1024, 1024, 512, 2] \n", + " 49 -1 1 3948032 models.common.PSA [1024, 1024] \n", + " 50 35 1 249728 models.common.PSA [256, 256] \n", + " 51 32 1 990976 models.common.PSA [512, 512] \n", + " 52 29 1 990976 models.common.PSA [512, 512] \n", + " 53 42 1 249728 models.common.PSA [256, 256] \n", + " 54 45 1 990976 models.common.PSA [512, 512] \n", + " 55[50, 51, 52, 53, 54, 49] 1 12471260 models.yolo.DualDDetect [10, [256, 512, 512, 256, 512, 1024]]\n", + "yolov9-kmuttX summary: 1643 layers, 79366300 parameters, 79366268 gradients, 263.8 GFLOPs\n", + "\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 398 weight(decay=0.0), 417 weight(decay=0.0005), 415 bias\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /workspace/6400 images/train/labels.cache... 5120 images, 7 back\u001b[0m\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /workspace/6400 images/valid/labels.cache... 640 images, 0 backgro\u001b[0m\n", + "Plotting labels to runs/train/yolov9-c10/labels.jpg... \n", + "Image sizes 640 train, 640 val\n", + "Using 4 dataloader workers\n", + "Logging results to \u001b[1mruns/train/yolov9-c10\u001b[0m\n", + "Starting training for 500 epochs...\n", + "\n", + " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n", + " 0/499 16.5G 4.939 7.156 5.232 11 640: WARNING ⚠️ TensorBoard graph visualization failure Only tensors, lists, tuples of tensors, or dictionary of tensors can be output from traced functions\n", + " 0/499 16.7G 5.236 7.764 5.234 23 640: ^C\n", + " 0/499 16.7G 5.236 7.764 5.234 23 640: \n", + "Traceback (most recent call last):\n", + " File \"/workspace/YOLO9_KMUTT/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/YOLO9_KMUTT/yolov9/train_dual.py\", line 538, in main\n", + " train(opt.hyp, opt, device, callbacks)\n", + " File \"/workspace/YOLO9_KMUTT/yolov9/train_dual.py\", line 314, in train\n", + " pred = model(imgs) # forward\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/YOLO9_KMUTT/yolov9/models/yolo.py\", line 633, in forward\n", + " return self._forward_once(x, profile, visualize) # single-scale inference, train\n", + " File \"/workspace/YOLO9_KMUTT/yolov9/models/yolo.py\", line 533, in _forward_once\n", + " x = m(x) # run\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/YOLO9_KMUTT/yolov9/models/common.py\", line 613, in forward\n", + " y.extend((m(y[-1])) for m in [self.cv2, self.cv3])\n", + " File \"/workspace/YOLO9_KMUTT/yolov9/models/common.py\", line 613, in \n", + " y.extend((m(y[-1])) for m in [self.cv2, self.cv3])\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/container.py\", line 217, in forward\n", + " input = module(input)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/YOLO9_KMUTT/yolov9/models/common.py\", line 384, in forward\n", + " return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/YOLO9_KMUTT/yolov9/models/common.py\", line 54, in forward\n", + " return self.act(self.bn(self.conv(x)))\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py\", line 175, in forward\n", + " return F.batch_norm(\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/functional.py\", line 2482, in batch_norm\n", + " return torch.batch_norm(\n", + "KeyboardInterrupt\n" + ] + } + ], + "source": [ + "!python train_dual.py --workers 8 --device 0 --batch 4 --data '/workspace/6400 images/data.yaml' --img 640 --cfg /workspace/YOLO9_KMUTT/yolov9/models/detect/yolov9-kmuttX.yaml --weights '' --name yolov9-c --hyp hyp.scratch-high.yaml --min-items 0 --epochs 500 --close-mosaic 15" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "VRMBngDDsIok" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2024-06-01 23:55:21,192] torch.distributed.run: [WARNING] \n", + "[2024-06-01 23:55:21,192] torch.distributed.run: [WARNING] *****************************************\n", + "[2024-06-01 23:55:21,192] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \n", + "[2024-06-01 23:55:21,192] torch.distributed.run: [WARNING] *****************************************\n", + "\u001b[34m\u001b[1mtrain_dual: \u001b[0mweights=, cfg=models/detect/yolov9-c.yaml, data=/workspace/norway/data.yaml, hyp=hyp.scratch-high.yaml, epochs=500, batch_size=30, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=None, image_weights=False, device=0,1, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=True, workers=8, project=runs/train, name=yolov9-c, exist_ok=False, quad=False, cos_lr=False, flat_cos_lr=False, fixed_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, min_items=0, close_mosaic=15, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 530, in main\n", + " assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'\n", + "AssertionError: --batch-size 30 must be multiple of WORLD_SIZE\n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 530, in main\n", + " assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'\n", + "AssertionError: --batch-size 30 must be multiple of WORLD_SIZE\n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 530, in main\n", + " assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'\n", + "AssertionError: --batch-size 30 must be multiple of WORLD_SIZE\n", + "YOLO 🚀 v0.1-94-gcd88016 Python-3.10.13 torch-2.2.0 CUDA:0 (NVIDIA GeForce RTX 4090, 24217MiB)\n", + " CUDA:1 (NVIDIA GeForce RTX 4090, 24217MiB)\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 530, in main\n", + " assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'\n", + "AssertionError: --batch-size 30 must be multiple of WORLD_SIZE\n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 530, in main\n", + " assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'\n", + "AssertionError: --batch-size 30 must be multiple of WORLD_SIZE\n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 530, in main\n", + " assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'\n", + "AssertionError: --batch-size 30 must be multiple of WORLD_SIZE\n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 530, in main\n", + " assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'\n", + "AssertionError: --batch-size 30 must be multiple of WORLD_SIZE\n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 530, in main\n", + " assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'\n", + "AssertionError: --batch-size 30 must be multiple of WORLD_SIZE\n", + "[2024-06-01 23:55:26,209] torch.distributed.elastic.multiprocessing.api: [ERROR] failed (exitcode: 1) local_rank: 0 (pid: 4360) of binary: /opt/conda/bin/python\n", + "Traceback (most recent call last):\n", + " File \"/opt/conda/bin/torchrun\", line 33, in \n", + " sys.exit(load_entry_point('torch==2.2.0', 'console_scripts', 'torchrun')())\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py\", line 347, in wrapper\n", + " return f(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/distributed/run.py\", line 812, in main\n", + " run(args)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/distributed/run.py\", line 803, in run\n", + " elastic_launch(\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/distributed/launcher/api.py\", line 135, in __call__\n", + " return launch_agent(self._config, self._entrypoint, list(args))\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/distributed/launcher/api.py\", line 268, in launch_agent\n", + " raise ChildFailedError(\n", + "torch.distributed.elastic.multiprocessing.errors.ChildFailedError: \n", + "============================================================\n", + "train_dual.py FAILED\n", + "------------------------------------------------------------\n", + "Failures:\n", + "[1]:\n", + " time : 2024-06-01_23:55:26\n", + " host : de3bed18af84\n", + " rank : 1 (local_rank: 1)\n", + " exitcode : 1 (pid: 4361)\n", + " error_file: \n", + " traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html\n", + "[2]:\n", + " time : 2024-06-01_23:55:26\n", + " host : de3bed18af84\n", + " rank : 2 (local_rank: 2)\n", + " exitcode : 1 (pid: 4362)\n", + " error_file: \n", + " traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html\n", + "[3]:\n", + " time : 2024-06-01_23:55:26\n", + " host : de3bed18af84\n", + " rank : 3 (local_rank: 3)\n", + " exitcode : 1 (pid: 4363)\n", + " error_file: \n", + " traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html\n", + "[4]:\n", + " time : 2024-06-01_23:55:26\n", + " host : de3bed18af84\n", + " rank : 4 (local_rank: 4)\n", + " exitcode : 1 (pid: 4364)\n", + " error_file: \n", + " traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html\n", + "[5]:\n", + " time : 2024-06-01_23:55:26\n", + " host : de3bed18af84\n", + " rank : 5 (local_rank: 5)\n", + " exitcode : 1 (pid: 4365)\n", + " error_file: \n", + " traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html\n", + "[6]:\n", + " time : 2024-06-01_23:55:26\n", + " host : de3bed18af84\n", + " rank : 6 (local_rank: 6)\n", + " exitcode : 1 (pid: 4366)\n", + " error_file: \n", + " traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html\n", + "[7]:\n", + " time : 2024-06-01_23:55:26\n", + " host : de3bed18af84\n", + " rank : 7 (local_rank: 7)\n", + " exitcode : 1 (pid: 4367)\n", + " error_file: \n", + " traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html\n", + "------------------------------------------------------------\n", + "Root Cause (first observed failure):\n", + "[0]:\n", + " time : 2024-06-01_23:55:26\n", + " host : de3bed18af84\n", + " rank : 0 (local_rank: 0)\n", + " exitcode : 1 (pid: 4360)\n", + " error_file: \n", + " traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html\n", + "============================================================\n" + ] + } + ], + "source": [ + "!torchrun --nproc_per_node 2 --master_port 9527 train_dual.py \\\n", + "--workers 8 --device 0,1 --sync-bn --batch 30 --data '/workspace/norway/data.yaml' \\\n", + "--img 640 --cfg models/detect/yolov9-c.yaml --weights '' --name yolov9-c --hyp hyp.scratch-high.yaml \\\n", + "--min-items 0 --epochs 500 --close-mosaic 15" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2024-06-02 02:12:23,563] torch.distributed.run: [WARNING] \n", + "[2024-06-02 02:12:23,563] torch.distributed.run: [WARNING] *****************************************\n", + "[2024-06-02 02:12:23,563] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \n", + "[2024-06-02 02:12:23,563] torch.distributed.run: [WARNING] *****************************************\n", + "\u001b[34m\u001b[1mtrain_dual: \u001b[0mweights=, cfg=models/detect/yolov9-c.yaml, data=/workspace/road damage/data.yaml, hyp=hyp.scratch-high.yaml, epochs=500, batch_size=30, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=None, image_weights=False, device=0,1, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=True, workers=8, project=runs/train, name=yolov9-c, exist_ok=False, quad=False, cos_lr=False, flat_cos_lr=False, fixed_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, min_items=0, close_mosaic=15, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", + "YOLO 🚀 v0.1-94-gcd88016 Python-3.10.13 torch-2.2.0 CUDA:0 (NVIDIA GeForce RTX 4090, 24217MiB)\n", + " CUDA:1 (NVIDIA GeForce RTX 4090, 24217MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, cls_pw=1.0, obj=0.7, obj_pw=1.0, dfl=1.5, iou_t=0.2, anchor_t=5.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.9, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.15, copy_paste=0.3\n", + "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLO 🚀 in ClearML\n", + "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLO 🚀 runs in Comet\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", + "Overriding model.yaml nc=80 with nc=7\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 0 models.common.Silence [] \n", + " 1 -1 1 1856 models.common.Conv [3, 64, 3, 2] \n", + " 2 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 3 -1 1 291456 models.common.RepNCSPELAN4 [128, 256, 128, 64, 3] \n", + " 4 -1 1 164352 models.common.ADown [256, 256] \n", + " 5 -1 1 1160448 models.common.RepNCSPELAN4 [256, 512, 256, 128, 3] \n", + " 6 -1 1 656384 models.common.ADown [512, 512] \n", + " 7 -1 1 4105728 models.common.RepNCSPELAN4 [512, 512, 512, 256, 3] \n", + " 8 -1 1 656384 models.common.ADown [512, 512] \n", + " 9 -1 1 4105728 models.common.RepNCSPELAN4 [512, 512, 512, 256, 3] \n", + " 10 -1 1 656896 models.common.SPPELAN [512, 512, 256] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 7] 1 0 models.common.Concat [1] \n", + " 13 -1 1 3119616 models.common.RepNCSPELAN4 [1024, 512, 512, 256, 1] \n", + " 14 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 15 [-1, 5] 1 0 models.common.Concat [1] \n", + " 16 -1 1 912640 models.common.RepNCSPELAN4 [1024, 256, 256, 128, 1] \n", + " 17 -1 1 164352 models.common.ADown [256, 256] \n", + " 18 [-1, 13] 1 0 models.common.Concat [1] \n", + " 19 -1 1 2988544 models.common.RepNCSPELAN4 [768, 512, 512, 256, 1] \n", + " 20 -1 1 656384 models.common.ADown [512, 512] \n", + " 21 [-1, 10] 1 0 models.common.Concat [1] \n", + " 22 -1 1 3119616 models.common.RepNCSPELAN4 [1024, 512, 512, 256, 1] \n", + " 23 5 1 131328 models.common.CBLinear [512, [256]] \n", + " 24 7 1 393984 models.common.CBLinear [512, [256, 512]] \n", + " 25 9 1 656640 models.common.CBLinear [512, [256, 512, 512]] \n", + " 26 0 1 1856 models.common.Conv [3, 64, 3, 2] \n", + " 27 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 28 -1 1 212864 models.common.RepNCSPELAN4 [128, 256, 128, 64, 1] \n", + " 29 -1 1 164352 models.common.ADown [256, 256] \n", + " 30 [23, 24, 25, -1] 1 0 models.common.CBFuse [[0, 0, 0]] \n", + " 31 -1 1 847616 models.common.RepNCSPELAN4 [256, 512, 256, 128, 1] \n", + " 32 -1 1 656384 models.common.ADown [512, 512] \n", + " 33 [24, 25, -1] 1 0 models.common.CBFuse [[1, 1]] \n", + " 34 -1 1 2857472 models.common.RepNCSPELAN4 [512, 512, 512, 256, 1] \n", + " 35 -1 1 656384 models.common.ADown [512, 512] \n", + " 36 [25, -1] 1 0 models.common.CBFuse [[2]] \n", + " 37 -1 1 2857472 models.common.RepNCSPELAN4 [512, 512, 512, 256, 1] \n", + " 38[31, 34, 37, 16, 19, 22] 1 21556682 models.yolo.DualDDetect [7, [512, 512, 512, 256, 512, 512]]\n", + "yolov9-c summary: 1170 layers, 53901386 parameters, 53901354 gradients, 252.0 GFLOPs\n", + "\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 286 weight(decay=0.0), 303 weight(decay=0.00046875), 301 bias\n", + "Using SyncBatchNorm()\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /workspace/road damage/train/labels... 4672 images, 345 backgrou\u001b[0m\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mWARNING ⚠️ /workspace/road damage/train/images/Japan_006916_jpg.rf.5dc73c813f7053289379ab9cf4e81173.jpg: 1 duplicate labels removed\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /workspace/road damage/train/labels.cache\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /workspace/road damage/valid/labels... 450 images, 34 backgrounds,\u001b[0m\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /workspace/road damage/valid/labels.cache\n", + "Plotting labels to runs/train/yolov9-c4/labels.jpg... \n", + "Image sizes 640 train, 640 val\n", + "Using 16 dataloader workers\n", + "Logging results to \u001b[1mruns/train/yolov9-c4\u001b[0m\n", + "Starting training for 500 epochs...\n", + "\n", + " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n", + " 0/499 20.4G 4.8 6.503 5.217 74 640: Exception in thread Thread-11 (plot_images):\n", + "Traceback (most recent call last):\n", + " File \"/opt/conda/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\n", + " self.run()\n", + " File \"/opt/conda/lib/python3.10/threading.py\", line 953, in run\n", + " self._target(*self._args, **self._kwargs)\n", + " File \"/workspace/yolov9/utils/plots.py\", line 300, in plot_images\n", + " annotator.box_label(box, label, color=color)\n", + " File \"/workspace/yolov9/utils/plots.py\", line 86, in box_label\n", + " w, h = self.font.getsize(label) # text width, height\n", + "AttributeError: 'FreeTypeFont' object has no attribute 'getsize'\n", + " 0/499 20.4G 4.903 6.657 5.212 61 640: Exception in thread Thread-12 (plot_images):\n", + "Traceback (most recent call last):\n", + " File \"/opt/conda/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\n", + " self.run()\n", + " File \"/opt/conda/lib/python3.10/threading.py\", line 953, in run\n", + " self._target(*self._args, **self._kwargs)\n", + " File \"/workspace/yolov9/utils/plots.py\", line 300, in plot_images\n", + " annotator.box_label(box, label, color=color)\n", + " File \"/workspace/yolov9/utils/plots.py\", line 86, in box_label\n", + " w, h = self.font.getsize(label) # text width, height\n", + "AttributeError: 'FreeTypeFont' object has no attribute 'getsize'\n", + " 0/499 20.4G 4.703 6.511 5.215 57 640: Exception in thread Thread-13 (plot_images):\n", + "Traceback (most recent call last):\n", + " File \"/opt/conda/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\n", + " self.run()\n", + " File \"/opt/conda/lib/python3.10/threading.py\", line 953, in run\n", + " self._target(*self._args, **self._kwargs)\n", + " File \"/workspace/yolov9/utils/plots.py\", line 300, in plot_images\n", + " annotator.box_label(box, label, color=color)\n", + " File \"/workspace/yolov9/utils/plots.py\", line 86, in box_label\n", + " w, h = self.font.getsize(label) # text width, height\n", + "AttributeError: 'FreeTypeFont' object has no attribute 'getsize'\n", + " 0/499 20.5G 4.674 6.804 5.251 49 640: ^C\n", + "[2024-06-02 02:12:52,085] torch.distributed.elastic.agent.server.api: [WARNING] Received Signals.SIGINT death signal, shutting down workers\n", + "[2024-06-02 02:12:52,086] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 11363 closing signal SIGINT\n", + "[2024-06-02 02:12:52,086] torch.distributed.elastic.multiprocessing.api: [WARNING] Sending process 11364 closing signal SIGINT\n", + " 0/499 20.5G 4.674 6.804 5.251 49 640: \n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 538, in main\n", + " train(opt.hyp, opt, device, callbacks)\n", + " File \"/workspace/yolov9/train_dual.py\", line 314, in train\n", + " pred = model(imgs) # forward\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/parallel/distributed.py\", line 1523, in forward\n", + " else self._run_ddp_forward(*inputs, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/parallel/distributed.py\", line 1359, in _run_ddp_forward\n", + " return self.module(*inputs, **kwargs) # type: ignore[index]\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/yolov9/models/yolo.py\", line 633, in forward\n", + " return self._forward_once(x, profile, visualize) # single-scale inference, train\n", + " File \"/workspace/yolov9/models/yolo.py\", line 533, in _forward_once\n", + " x = m(x) # run\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/yolov9/models/yolo.py\", line 225, in forward\n", + " d1.append(torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1))\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/container.py\", line 217, in forward\n", + " input = module(input)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/yolov9/models/common.py\", line 54, in forward\n", + " return self.act(self.bn(self.conv(x)))\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py\", line 767, in forward\n", + " return sync_batch_norm.apply(\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/autograd/function.py\", line 553, in apply\n", + " return super().apply(*args, **kwargs) # type: ignore[misc]\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/_functions.py\", line 89, in forward\n", + " counts = count_all.view(-1)\n", + "KeyboardInterrupt\n", + "Traceback (most recent call last):\n", + " File \"/workspace/yolov9/train_dual.py\", line 644, in \n", + " main(opt)\n", + " File \"/workspace/yolov9/train_dual.py\", line 538, in main\n", + " train(opt.hyp, opt, device, callbacks)\n", + " File \"/workspace/yolov9/train_dual.py\", line 314, in train\n", + " pred = model(imgs) # forward\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/parallel/distributed.py\", line 1523, in forward\n", + " else self._run_ddp_forward(*inputs, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/parallel/distributed.py\", line 1359, in _run_ddp_forward\n", + " return self.module(*inputs, **kwargs) # type: ignore[index]\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/yolov9/models/yolo.py\", line 633, in forward\n", + " return self._forward_once(x, profile, visualize) # single-scale inference, train\n", + " File \"/workspace/yolov9/models/yolo.py\", line 533, in _forward_once\n", + " x = m(x) # run\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/yolov9/models/yolo.py\", line 225, in forward\n", + " d1.append(torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1))\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/container.py\", line 217, in forward\n", + " input = module(input)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/workspace/yolov9/models/common.py\", line 54, in forward\n", + " return self.act(self.bn(self.conv(x)))\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n", + " return self._call_impl(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n", + " return forward_call(*args, **kwargs)\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py\", line 767, in forward\n", + " return sync_batch_norm.apply(\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/autograd/function.py\", line 553, in apply\n", + " return super().apply(*args, **kwargs) # type: ignore[misc]\n", + " File \"/opt/conda/lib/python3.10/site-packages/torch/nn/modules/_functions.py\", line 89, in forward\n", + " counts = count_all.view(-1)\n", + "KeyboardInterrupt\n" + ] + } + ], + "source": [ + "!torchrun --nproc_per_node 2 --master_port 9527 train_dual.py \\\n", + "--workers 8 --device 0,1 --sync-bn --batch 30 --data '/workspace/road damage/data.yaml' \\\n", + "--img 640 --cfg models/detect/yolov9-c.yaml --weights '' --name yolov9-c --hyp hyp.scratch-high.yaml \\\n", + "--min-items 0 --epochs 500 --close-mosaic 15" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!torchrun --nproc_per_node 2 --master_port 9527 train_dual.py \\\n", + "--workers 8 --device 0,1 --sync-bn --batch 30 --data '/workspace/road damage/data.yaml' \\\n", + "--img 640 --cfg models/detect/yolov9-c.yaml --weights '' --name yolov9-c --hyp hyp.scratch-high.yaml \\\n", + "--min-items 0 --epochs 500 --close-mosaic 15" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "L4", + "machine_shape": "hm", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/panoptic/predict.py b/panoptic/predict.py new file mode 100644 index 0000000000000000000000000000000000000000..9d7d2d800efd3e53ecee8dc87d108ef5faf0e010 --- /dev/null +++ b/panoptic/predict.py @@ -0,0 +1,246 @@ +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, + strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.segment.general import masks2segments, process_mask +from utils.torch_utils import select_device, smart_inference_mode + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolo-pan.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/predict-seg', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride + retina_masks=False, +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + bs = 1 # batch_size + if webcam: + view_img = check_imshow(warn=True) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: + with dt[0]: + im = torch.from_numpy(im).to(model.device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred, proto = model(im, augment=augment, visualize=visualize)[:2] + + # NMS + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) + + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + + # Segments + if save_txt: + segments = reversed(masks2segments(masks)) + segments = [scale_segments(im.shape[2:], x, im0.shape, normalize=True) for x in segments] + + # Print results + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Mask plotting + annotator.masks(masks, + colors=[colors(x, True) for x in det[:, 5]], + im_gpu=None if retina_masks else im[i]) + + # Write results + for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): + if save_txt: # Write to file + segj = segments[j].reshape(-1) # (n,2) to (n*2) + line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format + with open(f'{txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + if cv2.waitKey(1) == ord('q'): # 1 millisecond + exit() + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo-pan.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/panoptic/train.py b/panoptic/train.py new file mode 100644 index 0000000000000000000000000000000000000000..e20244c9949ac801f6040a5b5a19037bf98592bc --- /dev/null +++ b/panoptic/train.py @@ -0,0 +1,662 @@ +import argparse +import math +import os +import random +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import panoptic.val as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, one_cycle, one_flat_cycle, print_args, print_mutation, strip_optimizer, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import plot_evolve, plot_labels +from utils.panoptic.dataloaders import create_dataloader +from utils.panoptic.loss_tal import ComputeLoss +from utils.panoptic.metrics import KEYS, fitness +from utils.panoptic.plots import plot_images_and_masks, plot_results_with_masks +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = None#check_git_info() + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio + # callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + logger = GenericLogger(opt=opt, console_logger=LOGGER) + + # Config + plots = not evolve and not opt.noplots # create plots + overlap = not opt.no_overlap + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = SegmentationModel(cfg, ch=3, nc=nc).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + #v.requires_grad = True # train all layers + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + logger.update_params({"batch_size": batch_size}) + # loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + elif opt.flat_cos_lr: + lf = one_flat_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + elif opt.fixed_lr: + lf = lambda x: 1.0 + elif opt.poly_lr: + power = 0.9 + lf = lambda x: ((1 - (x / epochs)) ** power) * (1.0 - hyp['lrf']) + hyp['lrf'] + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader( + train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + close_mosaic=opt.close_mosaic != 0, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + ) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + prefix=colorstr('val: '))[0] + + if not resume: + #if not opt.noautoanchor: + # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + if plots: + plot_labels(labels, names, save_dir) + # callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + #hyp['box'] *= 3 / nl # scale to layers + #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model, overlap=overlap) # init loss class + # callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + # callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + if epoch == (epochs - opt.close_mosaic): + LOGGER.info("Closing dataloader mosaic") + dataset.mosaic = False + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(6, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 10) % + ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'cls_loss', 'dfl_loss', 'fcl_loss', 'dic_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _, masks, semasks) in pbar: # batch ------------------------------------------------------ + # callbacks.run('on_train_batch_start') + #print(imgs.shape) + #print(semasks.shape) + #print(masks.shape) + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float(), + semasks=semasks.to(device).float()) + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + torch.use_deterministic_algorithms(False) + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 8) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) + # if callbacks.stop_training: + # return + + # Mosaic plots + if plots: + if ni < 10: + plot_images_and_masks(imgs, targets, masks, semasks, paths, save_dir / f"train_batch{ni}.jpg") + if ni == 10: + files = sorted(save_dir.glob('train*.jpg')) + logger.log_images(files, "Mosaics", epoch) + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + # callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + if (opt.save_period > 0 and epoch % opt.save_period == 0) or (epoch > (epochs - 2 * opt.close_mosaic)): + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + # Log val metrics and media + metrics_dict = dict(zip(KEYS, log_vals)) + logger.log_metrics(metrics_dict, epoch) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'opt': vars(opt), + 'git': GIT_INFO, # {remote, branch, commit} if a git repo + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + logger.log_model(w / f'epoch{epoch}.pt') + del ckpt + # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) # val best model with plots + if is_coco: + # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) + logger.log_metrics(metrics_dict, epoch) + + # callbacks.run('on_train_end', last, best, epoch, results) + # on train end callback using genericLogger + logger.log_metrics(dict(zip(KEYS[6:22], results)), epochs) + if not opt.evolve: + logger.log_model(best, epoch) + if plots: + plot_results_with_masks(file=save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + logger.log_images(files, "Results", epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolo-pan.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-pan', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--flat-cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--fixed-lr', action='store_true', help='fixed LR scheduler') + parser.add_argument('--poly-lr', action='store_true', help='fixed LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental') + + # Instance Segmentation Args + parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') + parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + #check_git_status() + #check_requirements() + + # Resume + if opt.resume and not opt.evolve: # resume from specified or most recent last.pt + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLO Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/panoptic/val.py b/panoptic/val.py new file mode 100644 index 0000000000000000000000000000000000000000..569b7efe0c50e58739d3c85ece94079e28796c10 --- /dev/null +++ b/panoptic/val.py @@ -0,0 +1,597 @@ +import argparse +import json +import os +import sys +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F +import torchvision.transforms as transforms +from pycocotools import mask as maskUtils +from models.common import DetectMultiBackend +from models.yolo import SegmentationModel +from utils.callbacks import Callbacks +from utils.coco_utils import getCocoIds, getMappingId, getMappingIndex +from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, + check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, + non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, box_iou +from utils.plots import output_to_target, plot_val_study +from utils.panoptic.dataloaders import create_dataloader +from utils.panoptic.general import mask_iou, process_mask, process_mask_upsample, scale_image +from utils.panoptic.metrics import Metrics, ap_per_class_box_and_mask, Semantic_Metrics +from utils.panoptic.plots import plot_images_and_masks +from utils.torch_utils import de_parallel, select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map, pred_masks): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode + + def single_encode(x): + rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] + rle["counts"] = rle["counts"].decode("utf-8") + return rle + + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + +def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val-pan', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + overlap=False, + mask_downsample_ratio=1, + compute_loss=None, + callbacks=Callbacks(), +): + if save_json: + check_requirements(['pycocotools']) + process = process_mask_upsample # more accurate + else: + process = process_mask # faster + + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + nm = de_parallel(model).model[-1].nm # number of masks + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + stuff_names = data.get('stuff_names', []) # names of stuff classes + stuff_nc = len(stuff_names) # number of stuff classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Semantic Segmentation + img_id_list = [] + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f'{task}: '), + overlap_mask=overlap, + mask_downsample_ratio=mask_downsample_ratio)[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 12) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", + "mAP50", "mAP50-95)", 'S(MIoU', 'FWIoU)') + dt = Profile(), Profile(), Profile() + metrics = Metrics() + semantic_metrics = Semantic_Metrics(nc = (nc + stuff_nc), device = device) + loss = torch.zeros(6, device=device) + jdict, stats = [], [] + semantic_jdict = [] + # callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar + for batch_i, (im, targets, paths, shapes, masks, semasks) in enumerate(pbar): + # callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + masks = masks.to(device) + semasks = semasks.to(device) + masks = masks.float() + semasks = semasks.float() + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, train_out = model(im)# if compute_loss else (*model(im, augment=augment)[:2], None) + #train_out, preds, protos = p if len(p) == 3 else p[1] + #preds = p + #train_out = p[1][0] if len(p[1]) == 3 else p[0] + # protos = train_out[-1] + #print(preds.shape) + #print(train_out[0].shape) + #print(train_out[1].shape) + #print(train_out[2].shape) + _, pred_masks, protos, psemasks = train_out + + # Loss + if compute_loss: + loss += compute_loss(train_out, targets, masks, semasks = semasks)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det, + nm=nm) + + # Metrics + plot_masks = [] # masks for plotting + plot_semasks = [] # masks for plotting + + if training: + semantic_metrics.update(psemasks, semasks) + else: + _, _, smh, smw = semasks.shape + semantic_metrics.update(torch.nn.functional.interpolate(psemasks, size = (smh, smw), mode = 'bilinear', align_corners = False), semasks) + + if plots and batch_i < 3: + plot_semasks.append(psemasks.clone().detach().cpu()) + + for si, (pred, proto, psemask) in enumerate(zip(preds, protos, psemasks)): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + image_id = path.stem + img_id_list.append(image_id) + correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + else: + # Masks + midx = [si] if overlap else targets[:, 0] == si + gt_masks = masks[midx] + pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct_bboxes = process_batch(predn, labelsn, iouv) + correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if plots and batch_i < 3: + plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + pred_masks = scale_image(im[si].shape[1:], + pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) + save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary + # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Semantic Segmentation + h0, w0 = shape + + # resize + _, mask_h, mask_w = psemask.shape + h_ratio = mask_h / h0 + w_ratio = mask_w / w0 + + if h_ratio == w_ratio: + psemask = torch.nn.functional.interpolate(psemask[None, :], size = (h0, w0), mode = 'bilinear', align_corners = False) + else: + transform = transforms.CenterCrop((h0, w0)) + + if (1 != h_ratio) and (1 != w_ratio): + h_new = h0 if (h_ratio < w_ratio) else int(mask_h / w_ratio) + w_new = w0 if (h_ratio > w_ratio) else int(mask_w / h_ratio) + psemask = torch.nn.functional.interpolate(psemask[None, :], size = (h_new, w_new), mode = 'bilinear', align_corners = False) + + psemask = transform(psemask) + + psemask = torch.squeeze(psemask) + + nc, h, w = psemask.shape + + semantic_mask = torch.flatten(psemask, start_dim = 1).permute(1, 0) # class x h x w -> (h x w) x class + + max_idx = semantic_mask.argmax(1) + output_masks = torch.zeros(semantic_mask.shape).scatter(1, max_idx.cpu().unsqueeze(1), 1.0) # one hot: (h x w) x class + output_masks = torch.reshape(output_masks.permute(1, 0), (nc, h, w)) # (h x w) x class -> class x h x w + psemask = output_masks.to(device = device) + + # TODO: check is_coco + instances_ids = getCocoIds(name = 'instances') + stuff_mask = torch.zeros((h, w), device = device) + check_semantic_mask = False + for idx, pred_semantic_mask in enumerate(psemask): + category_id = int(getMappingId(idx)) + if 183 == category_id: + # set all non-stuff pixels to other + pred_semantic_mask = (torch.logical_xor(stuff_mask, torch.ones((h, w), device = device))).int() + + # ignore the classes which all zeros / unlabeled class + if (0 >= torch.max(pred_semantic_mask)) or (0 >= category_id): + continue + + if category_id not in instances_ids: + # record all stuff mask + stuff_mask = torch.logical_or(stuff_mask, pred_semantic_mask) + + if (category_id not in instances_ids): + rle = maskUtils.encode(np.asfortranarray(pred_semantic_mask.cpu(), dtype = np.uint8)) + rle['counts'] = rle['counts'].decode('utf-8') + + temp_d = { + 'image_id': int(image_id) if image_id.isnumeric() else image_id, + 'category_id': category_id, + 'segmentation': rle, + 'score': 1 + } + + semantic_jdict.append(temp_d) + check_semantic_mask = True + + if not check_semantic_mask: + # append a other mask for evaluation if the image without any mask + other_mask = (torch.ones((h, w), device = device)).int() + + rle = maskUtils.encode(np.asfortranarray(other_mask.cpu(), dtype = np.uint8)) + rle['counts'] = rle['counts'].decode('utf-8') + + temp_d = { + 'image_id': int(image_id) if image_id.isnumeric() else image_id, + 'category_id': 183, + 'segmentation': rle, + 'score': 1 + } + + semantic_jdict.append(temp_d) + + # Plot images + if plots and batch_i < 3: + if len(plot_masks): + plot_masks = torch.cat(plot_masks, dim=0) + if len(plot_semasks): + plot_semasks = torch.cat(plot_semasks, dim = 0) + plot_images_and_masks(im, targets, masks, semasks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) + plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, plot_semasks, paths, + save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + # callbacks.run('on_val_batch_end') + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) + metrics.update(results) + nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 10 # print format + LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results(), *semantic_metrics.results())) + if nt.sum() == 0: + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(metrics.ap_class_index): + LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i), *semantic_metrics.results())) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + # callbacks.run('on_val_end') + + mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() + miou_sem, fwiou_sem = semantic_metrics.results() + semantic_metrics.reset() + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_path = Path(data.get('path', '../coco')) + anno_json = str(anno_path / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + semantic_anno_json = str(anno_path / 'annotations/stuff_val2017.json') # annotations json + semantic_pred_json = str(save_dir / f"{w}_predictions_stuff.json") # predictions json + LOGGER.info(f'\nsaving {semantic_pred_json}...') + with open(semantic_pred_json, 'w') as f: + json.dump(semantic_jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + results = [] + for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) + map_bbox, map50_bbox, map_mask, map50_mask = results + + # Semantic Segmentation + from utils.stuff_seg.cocostuffeval import COCOStuffeval + + LOGGER.info(f'\nEvaluating pycocotools stuff... ') + imgIds = [int(x) for x in img_id_list] + + stuffGt = COCO(semantic_anno_json) # initialize COCO ground truth api + stuffDt = stuffGt.loadRes(semantic_pred_json) # initialize COCO pred api + + cocoStuffEval = COCOStuffeval(stuffGt, stuffDt) + cocoStuffEval.params.imgIds = imgIds # image IDs to evaluate + cocoStuffEval.evaluate() + stats, statsClass = cocoStuffEval.summarize() + stuffIds = getCocoIds(name = 'stuff') + title = ' {:<5} | {:^6} | {:^6} '.format('class', 'iou', 'macc') if (0 >= len(stuff_names)) else \ + ' {:<5} | {:<20} | {:^6} | {:^6} '.format('class', 'class name', 'iou', 'macc') + print(title) + for idx, (iou, macc) in enumerate(zip(statsClass['ious'], statsClass['maccs'])): + id = (idx + 1) + if id not in stuffIds: + continue + content = ' {:<5} | {:0.4f} | {:0.4f} '.format(str(id), iou, macc) if (0 >= len(stuff_names)) else \ + ' {:<5} | {:<20} | {:0.4f} | {:0.4f} '.format(str(id), str(stuff_names[getMappingIndex(id, name = 'stuff')]), iou, macc) + print(content) + + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask, miou_sem, fwiou_sem + return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-pan.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo-pan.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val-pan', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + # opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + #check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ + LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + if opt.save_hybrid: + LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..26a22827a1f6356b25794547a8d924d723d7b9a1 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,49 @@ +# requirements +# Usage: pip install -r requirements.txt + +# Base ------------------------------------------------------------------------ +gitpython +ipython +matplotlib>=3.2.2 +numpy>=1.18.5 +opencv-python>=4.1.1 +Pillow>=7.1.2 +psutil +PyYAML>=5.3.1 +requests>=2.23.0 +scipy>=1.4.1 +thop>=0.1.1 +opencv-python +opencv-contrib-python +torch +torchvision +tqdm>=4.64.0 +# protobuf<=3.20.1 + +# Logging --------------------------------------------------------------------- +tensorboard>=2.4.1 +# clearml>=1.2.0 +# comet + +# Plotting -------------------------------------------------------------------- +pandas>=1.1.4 +seaborn>=0.11.0 + +# Export ---------------------------------------------------------------------- +# coremltools>=6.0 +# onnx>=1.9.0 +# onnx-simplifier>=0.4.1 +# nvidia-pyindex +# nvidia-tensorrt +# scikit-learn<=1.1.2 +# tensorflow>=2.4.1 +# tensorflowjs>=3.9.0 +# openvino-dev + +# Deploy ---------------------------------------------------------------------- +# tritonclient[all]~=2.24.0 + +# Extras ---------------------------------------------------------------------- +# mss +albumentations>=1.0.3 +pycocotools>=2.0 diff --git a/runs/detect/yolov9_c_640_detect/United_States_000062.jpg b/runs/detect/yolov9_c_640_detect/United_States_000062.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a52412fa66337f2b163d67ac211cce9f9f383ae Binary files /dev/null and b/runs/detect/yolov9_c_640_detect/United_States_000062.jpg differ diff --git a/runs/detect/yolov9_c_640_detect/United_States_000502.jpg b/runs/detect/yolov9_c_640_detect/United_States_000502.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b45a6591c25b82486f4a433f5ddae838e3107cc Binary files /dev/null and b/runs/detect/yolov9_c_640_detect/United_States_000502.jpg differ diff --git a/runs/detect/yolov9_c_640_detect/temp_image.jpg b/runs/detect/yolov9_c_640_detect/temp_image.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6edb989c4a1c65b213e9170c3f9c474e92f16849 Binary files /dev/null and b/runs/detect/yolov9_c_640_detect/temp_image.jpg differ diff --git a/scripts/get_coco.sh b/scripts/get_coco.sh new file mode 100644 index 0000000000000000000000000000000000000000..524f8dd9e2cae992a4047476520a7e4e1402e6de --- /dev/null +++ b/scripts/get_coco.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# COCO 2017 dataset http://cocodataset.org +# Download command: bash ./scripts/get_coco.sh + +# Download/unzip labels +d='./' # unzip directory +url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ +f='coco2017labels-segments.zip' # or 'coco2017labels.zip', 68 MB +echo 'Downloading' $url$f ' ...' +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background + +# Download/unzip images +d='./coco/images' # unzip directory +url=http://images.cocodataset.org/zips/ +f1='train2017.zip' # 19G, 118k images +f2='val2017.zip' # 1G, 5k images +f3='test2017.zip' # 7G, 41k images (optional) +for f in $f1 $f2 $f3; do + echo 'Downloading' $url$f '...' + curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background +done +wait # finish background tasks diff --git a/segment/predict.py b/segment/predict.py new file mode 100644 index 0000000000000000000000000000000000000000..aeab78781748d70452eaa6292ab3e601639ce32d --- /dev/null +++ b/segment/predict.py @@ -0,0 +1,246 @@ +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, + strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.segment.general import masks2segments, process_mask +from utils.torch_utils import select_device, smart_inference_mode + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolo-seg.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) + data=ROOT / 'data/coco.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/predict-seg', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride + retina_masks=False, +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + bs = 1 # batch_size + if webcam: + view_img = check_imshow(warn=True) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: + with dt[0]: + im = torch.from_numpy(im).to(model.device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred, proto = model(im, augment=augment, visualize=visualize)[:2] + + # NMS + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) + + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + + # Segments + if save_txt: + segments = reversed(masks2segments(masks)) + segments = [scale_segments(im.shape[2:], x, im0.shape, normalize=True) for x in segments] + + # Print results + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Mask plotting + annotator.masks(masks, + colors=[colors(x, True) for x in det[:, 5]], + im_gpu=None if retina_masks else im[i]) + + # Write results + for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): + if save_txt: # Write to file + segj = segments[j].reshape(-1) # (n,2) to (n*2) + line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format + with open(f'{txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + if cv2.waitKey(1) == ord('q'): # 1 millisecond + exit() + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo-seg.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/train.py b/segment/train.py new file mode 100644 index 0000000000000000000000000000000000000000..311f21d9d3a73ad9bf5d8644a17987994cbae989 --- /dev/null +++ b/segment/train.py @@ -0,0 +1,646 @@ +import argparse +import math +import os +import random +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import segment.val as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import plot_evolve, plot_labels +from utils.segment.dataloaders import create_dataloader +from utils.segment.loss_tal import ComputeLoss +from utils.segment.metrics import KEYS, fitness +from utils.segment.plots import plot_images_and_masks, plot_results_with_masks +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = None#check_git_info() + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio + # callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + logger = GenericLogger(opt=opt, console_logger=LOGGER) + + # Config + plots = not evolve and not opt.noplots # create plots + overlap = not opt.no_overlap + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = SegmentationModel(cfg, ch=3, nc=nc).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + #v.requires_grad = True # train all layers + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + logger.update_params({"batch_size": batch_size}) + # loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader( + train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + close_mosaic=opt.close_mosaic != 0, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + ) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + prefix=colorstr('val: '))[0] + + if not resume: + #if not opt.noautoanchor: + # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + if plots: + plot_labels(labels, names, save_dir) + # callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + #hyp['box'] *= 3 / nl # scale to layers + #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model, overlap=overlap) # init loss class + # callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + # callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + if epoch == (epochs - opt.close_mosaic): + LOGGER.info("Closing dataloader mosaic") + dataset.mosaic = False + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(4, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 8) % + ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ + # callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 6) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) + # if callbacks.stop_training: + # return + + # Mosaic plots + if plots: + if ni < 3: + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + if ni == 10: + files = sorted(save_dir.glob('train*.jpg')) + logger.log_images(files, "Mosaics", epoch) + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + # callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + # Log val metrics and media + metrics_dict = dict(zip(KEYS, log_vals)) + logger.log_metrics(metrics_dict, epoch) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'opt': vars(opt), + 'git': GIT_INFO, # {remote, branch, commit} if a git repo + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + logger.log_model(w / f'epoch{epoch}.pt') + del ckpt + # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) # val best model with plots + if is_coco: + # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) + logger.log_metrics(metrics_dict, epoch) + + # callbacks.run('on_train_end', last, best, epoch, results) + # on train end callback using genericLogger + logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs) + if not opt.evolve: + logger.log_model(best, epoch) + if plots: + plot_results_with_masks(file=save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + logger.log_images(files, "Results", epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolo-seg.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental') + + # Instance Segmentation Args + parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') + parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + #check_git_status() + #check_requirements() + + # Resume + if opt.resume and not opt.evolve: # resume from specified or most recent last.pt + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLO Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/train_dual.py b/segment/train_dual.py new file mode 100644 index 0000000000000000000000000000000000000000..1411f245b498fb2fb0cb7bd2ad94461f93fbf8e4 --- /dev/null +++ b/segment/train_dual.py @@ -0,0 +1,647 @@ +import argparse +import math +import os +import random +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import segment.val_dual as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import plot_evolve, plot_labels +from utils.segment.dataloaders import create_dataloader +from utils.segment.loss_tal_dual import ComputeLoss +#from utils.segment.loss_tal_dual import ComputeLossLH as ComputeLoss +from utils.segment.metrics import KEYS, fitness +from utils.segment.plots import plot_images_and_masks, plot_results_with_masks +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = None#check_git_info() + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio + # callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + logger = GenericLogger(opt=opt, console_logger=LOGGER) + + # Config + plots = not evolve and not opt.noplots # create plots + overlap = not opt.no_overlap + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = SegmentationModel(cfg, ch=3, nc=nc).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + #v.requires_grad = True # train all layers + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + logger.update_params({"batch_size": batch_size}) + # loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader( + train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + close_mosaic=opt.close_mosaic != 0, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + ) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + prefix=colorstr('val: '))[0] + + if not resume: + #if not opt.noautoanchor: + # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + if plots: + plot_labels(labels, names, save_dir) + # callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + #hyp['box'] *= 3 / nl # scale to layers + #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model, overlap=overlap) # init loss class + # callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + # callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + if epoch == (epochs - opt.close_mosaic): + LOGGER.info("Closing dataloader mosaic") + dataset.mosaic = False + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(4, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 8) % + ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ + # callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 6) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) + # if callbacks.stop_training: + # return + + # Mosaic plots + if plots: + if ni < 3: + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + if ni == 10: + files = sorted(save_dir.glob('train*.jpg')) + logger.log_images(files, "Mosaics", epoch) + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + # callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + # Log val metrics and media + metrics_dict = dict(zip(KEYS, log_vals)) + logger.log_metrics(metrics_dict, epoch) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'opt': vars(opt), + 'git': GIT_INFO, # {remote, branch, commit} if a git repo + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + logger.log_model(w / f'epoch{epoch}.pt') + del ckpt + # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) # val best model with plots + if is_coco: + # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) + logger.log_metrics(metrics_dict, epoch) + + # callbacks.run('on_train_end', last, best, epoch, results) + # on train end callback using genericLogger + logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs) + if not opt.evolve: + logger.log_model(best, epoch) + if plots: + plot_results_with_masks(file=save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + logger.log_images(files, "Results", epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolo-seg.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental') + + # Instance Segmentation Args + parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') + parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + #check_git_status() + #check_requirements() + + # Resume + if opt.resume and not opt.evolve: # resume from specified or most recent last.pt + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLO Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/val.py b/segment/val.py new file mode 100644 index 0000000000000000000000000000000000000000..479a09a086d7f394a8ce544e86d59759e112bc59 --- /dev/null +++ b/segment/val.py @@ -0,0 +1,457 @@ +import argparse +import json +import os +import sys +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F + +from models.common import DetectMultiBackend +from models.yolo import SegmentationModel +from utils.callbacks import Callbacks +from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, + check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, + non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, box_iou +from utils.plots import output_to_target, plot_val_study +from utils.segment.dataloaders import create_dataloader +from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image +from utils.segment.metrics import Metrics, ap_per_class_box_and_mask +from utils.segment.plots import plot_images_and_masks +from utils.torch_utils import de_parallel, select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map, pred_masks): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode + + def single_encode(x): + rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] + rle["counts"] = rle["counts"].decode("utf-8") + return rle + + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + +def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val-seg', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + overlap=False, + mask_downsample_ratio=1, + compute_loss=None, + callbacks=Callbacks(), +): + if save_json: + check_requirements(['pycocotools']) + process = process_mask_upsample # more accurate + else: + process = process_mask # faster + + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + nm = de_parallel(model).model[-1].nm # number of masks + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f'{task}: '), + overlap_mask=overlap, + mask_downsample_ratio=mask_downsample_ratio)[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", + "mAP50", "mAP50-95)") + dt = Profile(), Profile(), Profile() + metrics = Metrics() + loss = torch.zeros(4, device=device) + jdict, stats = [], [] + # callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar + for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): + # callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + masks = masks.to(device) + masks = masks.float() + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, train_out = model(im)# if compute_loss else (*model(im, augment=augment)[:2], None) + #train_out, preds, protos = p if len(p) == 3 else p[1] + #preds = p + #train_out = p[1][0] if len(p[1]) == 3 else p[0] + protos = train_out[-1] + #print(preds.shape) + #print(train_out[0].shape) + #print(train_out[1].shape) + #print(train_out[2].shape) + + # Loss + if compute_loss: + loss += compute_loss(train_out, targets, masks)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det, + nm=nm) + + # Metrics + plot_masks = [] # masks for plotting + for si, (pred, proto) in enumerate(zip(preds, protos)): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Masks + midx = [si] if overlap else targets[:, 0] == si + gt_masks = masks[midx] + pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct_bboxes = process_batch(predn, labelsn, iouv) + correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if plots and batch_i < 3: + plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + pred_masks = scale_image(im[si].shape[1:], + pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) + save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary + # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + if len(plot_masks): + plot_masks = torch.cat(plot_masks, dim=0) + plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) + plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, + save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + # callbacks.run('on_val_batch_end') + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) + metrics.update(results) + nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format + LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + if nt.sum() == 0: + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(metrics.ap_class_index): + LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + # callbacks.run('on_val_end') + + mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + results = [] + for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) + map_bbox, map50_bbox, map_mask, map50_mask = results + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask + return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo-seg.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + # opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + #check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + if opt.save_hybrid: + LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/val_dual.py b/segment/val_dual.py new file mode 100644 index 0000000000000000000000000000000000000000..c30f12fa26853158b0ecced26b7a4eb69888db3c --- /dev/null +++ b/segment/val_dual.py @@ -0,0 +1,458 @@ +import argparse +import json +import os +import sys +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F + +from models.common import DetectMultiBackend +from models.yolo import SegmentationModel +from utils.callbacks import Callbacks +from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, + check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, + non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, box_iou +from utils.plots import output_to_target, plot_val_study +from utils.segment.dataloaders import create_dataloader +from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image +from utils.segment.metrics import Metrics, ap_per_class_box_and_mask +from utils.segment.plots import plot_images_and_masks +from utils.torch_utils import de_parallel, select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map, pred_masks): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode + + def single_encode(x): + rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] + rle["counts"] = rle["counts"].decode("utf-8") + return rle + + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + +def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val-seg', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + overlap=False, + mask_downsample_ratio=1, + compute_loss=None, + callbacks=Callbacks(), +): + if save_json: + check_requirements(['pycocotools']) + process = process_mask_upsample # more accurate + else: + process = process_mask # faster + + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + nm = de_parallel(model).model[-1].nm # number of masks + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f'{task}: '), + overlap_mask=overlap, + mask_downsample_ratio=mask_downsample_ratio)[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", + "mAP50", "mAP50-95)") + dt = Profile(), Profile(), Profile() + metrics = Metrics() + loss = torch.zeros(4, device=device) + jdict, stats = [], [] + # callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar + for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): + # callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + masks = masks.to(device) + masks = masks.float() + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, train_out = model(im)# if compute_loss else (*model(im, augment=augment)[:2], None) + #preds = preds[1] + #train_out, preds, protos = p if len(p) == 3 else p[1] + #preds = p + #train_out = p[1][0] if len(p[1]) == 3 else p[0] + protos = train_out[-1] + #print(preds.shape) + #print(train_out[0].shape) + #print(train_out[1].shape) + #print(train_out[2].shape) + + # Loss + #if compute_loss: + # loss += compute_loss(train_out, targets, masks)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det, + nm=nm) + + # Metrics + plot_masks = [] # masks for plotting + for si, (pred, proto) in enumerate(zip(preds, protos)): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Masks + midx = [si] if overlap else targets[:, 0] == si + gt_masks = masks[midx] + pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct_bboxes = process_batch(predn, labelsn, iouv) + correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if plots and batch_i < 3: + plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + pred_masks = scale_image(im[si].shape[1:], + pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) + save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary + # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + if len(plot_masks): + plot_masks = torch.cat(plot_masks, dim=0) + plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) + plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, + save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + # callbacks.run('on_val_batch_end') + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) + metrics.update(results) + nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format + LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + if nt.sum() == 0: + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(metrics.ap_class_index): + LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + # callbacks.run('on_val_end') + + mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + results = [] + for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) + map_bbox, map50_bbox, map_mask, map50_mask = results + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask + return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo-seg.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + # opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + #check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + if opt.save_hybrid: + LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/streamlit.ipynb b/streamlit.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..3ea78fb289622c92cb0fff2f471027f74cb02652 --- /dev/null +++ b/streamlit.ipynb @@ -0,0 +1,231 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/Users/sompoteyouwai/env/YOLO/YOLO9tr/yolov9\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/sompoteyouwai/Library/Python/3.11/lib/python/site-packages/IPython/core/magics/osm.py:417: UserWarning: using dhist requires you to install the `pickleshare` library.\n", + " self.shell.db['dhist'] = compress_dhist(dhist)[-100:]\n" + ] + } + ], + "source": [ + "%cd yolov9" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mdetect_dual: \u001b[0mweights=['models/detect/yolov9tr.pt'], source=/Users/sompoteyouwai/env/YOLO/YOLO9tr/United_States_000062.jpg, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=cpu, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=yolov9_c_640_detect, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", + "YOLO 🚀 2024-7-27 Python-3.11.4 torch-2.2.2 CPU\n", + "\n", + "Fusing layers... \n", + "yolov9-sKMUTT summary: 721 layers, 10053866 parameters, 0 gradients, 40.2 GFLOPs\n", + "image 1/1 /Users/sompoteyouwai/env/YOLO/YOLO9tr/United_States_000062.jpg: 640x640 1 D00, 3 D10s, 651.1ms\n", + "Speed: 3.1ms pre-process, 651.1ms inference, 2.8ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/detect/yolov9_c_640_detect5\u001b[0m\n" + ] + } + ], + "source": [ + "!python detect_dual.py --source '/Users/sompoteyouwai/env/YOLO/YOLO9tr/United_States_000062.jpg' --img 640 --device cpu \\\n", + "--weights 'models/detect/yolov9tr.pt' --name yolov9_c_640_detect\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAGFCAYAAAASI+9IAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOz9R5MkSbagi32qxpwHJxmRrLKyWFd19e3u2zNz33szeBCZBUSA9wQrbLHB78GfwAoiWGCBBZ7MwxBgZu69c8k0L5o8OHXu5kZUsTDiZubmERaRkVU1V/JURUa4uZrqMTXVw89RobXWvIf38B7ew3t4D4D8sRF4D+/hPbyH9/DTgfdM4T28h/fwHt5DCu+Zwnt4D+/hPbyHFN4zhffwHt7De3gPKbxnCu/hPbyH9/AeUnjPFN7De3gP7+E9pPCeKbyH9/Ae3sN7SOE9U3gP7+E9vIf3kIJZteH/9d9PgWyem0YBGolGoBGgNAIAgUACgvnUOI0Qyd8SrUGFxH1HX1TJp1vURsw6L21Xdp9SES5SRveGoUIIke9Lk+I3u+/6vufwBkKtyUxCOU7o3HBaJx8EaEH53DJ3TaPRKLTWmT6I30+2lWL2fmXUXsXjIaJ+RNRbglfan5ZkLiJQBZyK80TcIj8HZfMgxPVtrhrrumtJ/z90DqfWGqXUwu+L6/gqCMMwXa9CiAXPIiiu33mc8js8wbPYd7osMm2qvLsyuLaNFoBxNd4oiNfvVfOWPEtxvOw1pSRSWGitEALCMMAwBUHg4/sepmlgWTZhGGIYBlJKlFLpO5Ay2jvT6RTLsjBNEyEEYRiitUbK4r7zIty1CZigDcBACAUiei4hBEpplPaBEISi1WoSBgrPU4BEChOt8jK+1vlnFELwf/u/OFfOJdyAKWgkZAkCOv4o0AKEFiBImcLc6roKhIpfPukD3HaT3ua+aLwZQb3JhvxvBWbE78Z3xv8K4rf+TwKS+XibtfYefgjQZAXGH2K8aGkItI4Iv1IBYRhimiamaaCUwjAiRpUwA9M0U0bveR62bacMA5gXMlMQJT8pKiBAKYWQAlMYgMBxLMbjCQKJlDZaRUxDFHenSOYOEJKqZK06U5jjcAnWsQ1KZF6cBmJCWz4ReeSjNrMJKXL05Nq1OFbUHha1KZMc/ilA8mzpc1WigSKziH4YhvCu5r2s3+x8LBr3XTOL66TaKu2qQpUn0aJiwxJ4Z3tGEEsyixET+X/eah8b0kCpEIikeo0iDEIsy4wJ+4zAK6XSdRSGIQCu61Kr1VImkfws0uIEEoQEjEjjTnHOCMkSlPIxDIFlmXiejyFNBCZay5nGXnzcDP0VouT7BVCZKSTIapHhR1ojdGw80hqdtEn/qdq1yN0jxG0k2tvDDz3ejwGJNhTNdYmdoPwu0pUkxNtRjZ8YlDGEuQ37T02LuI4o/FQfVSw2sxXhbd+XH3iAwLZtPM/D96exVpBoArEgHAvJUkqklIRhyHQ6pdFoLGQApbiJxDQWM4Qc8RSARBBSr9toNFN3imFYaC1QKvneiN9tQPYlRmtb39gCUp0pEBKpVAIVsx2RsSNn1byEdCwitmWaQMToZ/qHKKzgKi970Sa+/t4CLrdkEpWJyA+oiETvIn0jN0SgoP2VUg0x//GaKYjWanHO3y0Bvkrz/LEIf1VtuIxZFT9X2fBVPF5XTcVNNPGbzGlWsv8xfDtZn4llRc+iVIBSPvV6HRGbcKTMm4yS+6fTKb7vU6vVUvx9369kopQiYi4p5RPRO0h8E4ZhYFkCP5wSBCGGYaK1QCuJEEasLQi0ChCyXKOKtBsNVGOulZlC3QjwQo0QJgJJoCLnooiFTlG6yef7mU1Otn3e8VpFYrnOJDA/3uL7Ewn6pk7H25i4Zvfd7t58P/PXSoUR3kYIjBhDmfEob17KCgjFdsVnTe693hH5riEZ97aBC7eF6whpQkjKnKPFe/POyxKcC79vimdu/Fv08XZwI+fklcxxETPNzzv4vofvB1iWhVIhUsrUXJSMEzEJied5CCGo1+szjEveUe6JckwwMUklhDtiQL4/pV6vI6Vk6rkgFFJIlCK6R4uMsBdboObM8rczoVVmCm0rYILGDRW+NjGwYnORQGiJiOJlbgHVCek/KVX+PbyHAswTqLyJ6zpicxVcbZWPx7tVz7eHn4LvLpnzxCk8cSeEoY/j2NiOCVrG/oKESYNpWigVpBFGiVRfdawyDUJKYi0hRBpQs0ykoQlDPzbEJNGcRhyhJFObTBSpdLWWdxOozBT2n33N5s4DmnaLnusjpZnwqHgxyVJ5sigRJpN7G7iN1PZTWHjv4e7gx5LcfyhY9Hw/ROisFgL5rvzFP8F9mMxlEkkUBAGGlDhOLWUSSgUxQQbDMIm0hBDP85BSpvdWZdhlmglolA4RKIRUWJaFAFx3RBhGTCgK5zGQGCCMmAFohEzCVuGu2HplpiD9AdobgJQYwgZT44czE0IaJJBC+SRVXdNzzYp93WBzVImXvjE+bwNZ8/4CSBVDXfhcqfO3gQoLa84C+MMQzx+CcL8rpnPbsYvmm7cmrotuL7zTcgPUTTpc0PoO9uK7AM/zUEphmhJQhGFEbKU0kNIgDMOYUWgmExfbNrEsK70+79fJCsgi/Tfn2UsYglIIobEsE9OS+P40cmgLMIyYieisiTDREPTst5j3w94WKjOFg2//I5b4kq2nv8STBhMUoQS0xiRECh0FCejicsp6bSPntKaYZJFXbjWJM3sxxPNZCd528UUJOxoVR0kJIp94YknPuW+vkegEEc/Pfi1IEujyDZNZmtmEo/FnQUDzRgEhCtqaFiTJbouWLPG32fci0jbxwouvG9loaB210anfIbkx7zO5STBrtEGqOU5vAlXXQNEBnZUA3zURq+ponvu+SueC7JuL75u/U4lkLcS3aZ2GlOj4s0ZFNm0RJammOYxzgQxVMNOFvxfdI1JpuEgvqkLiME6SzJJIIs+bEoYKx7GJktaiiCJ0NFaoQoSQ+L6H53nU67McBCnFnLw6G1Bn9oNM50cTImScD6E16CjvQQiDqesTBESWGKURSISQqSNaoxD4MZ2JiEYyHXe1ZSozBSfoMT7bI9h6iNWoEUhBqCJiEJnDFDrz4iDKoJ2RUeLHul7aqUJE4ly5HNy1JJlV9bSGJGFQk68PIjJS1rzGVNLvHJLzJFsU+kieVyctxRXbR2fv06DFXP/zKCQtkoVb4jCew1zP/s0OKjIZziXDXfV+b8oMFtnhc+NVCjao5miuet9dwI36rjBtuVgOrfO3JOtJz95PIhgke3j2OcqiFSTO0bm0qTnJtdyhn5jGKPmu7OGyjuFqpposJAwh+3cQRIlprVaLMAwj4Uwn1QpmmppSUchpvV5PfRCg0lyFSFvIPpkmzd0Syfg6dlyDlIIg9FG+T73ewLJsptMpQaCRwgZNbMoTuTnKMU4xm5e7hMq1j/zBiIPnz7k4OKCuNY4KcbTGAUwkUhmFCKTkTd/85cmqD/kj2ClF8q4zUOER5/vJ/JR/+xav+urOr7nxLlrd/r28Cy3hbSEbsvhTw+3OQVBh/Yj0vx8DbvsOTHMmAwsh8H2fIAhoNpu4rovv+6mPINEmooxmheu6NBqNNOoom7hWjP7KQ1bbVhiGiBiC7yOAdruNZVnx+EHsP7h+A89HUt1qSkqhsqYw6vXxpM/Z/h4bD75ACBvbsvCVJi2GUhQ9uOpz4dtc2JuYI7xzoPPmmvTyO5TcpJ7ZBOfk7tSkk4dFdWiEuOrzFR3+gDC3+fS7zWouhgjeVWjw2xDyuZDMn5Ad/McCkWitIpnbasEj5fWRsn+Xv6fSMOtb7PvIcZzUAYts+Y1GI61jZJom0+k0TUiDKCfB930ajQZaaxzHwfO8tM+k7fz4WSMcgEZpFWkAhsCQklrNJghDfM9F68jhHQYqHrs6vYw+z7e57VqtzBQ++PgTji9GvHr9gnsfH9G518HFx8dACQMhJVqFGdWuqOKVq3ylC6E0waGEyRT40O03bN5GKcQ8CiJutoi+JIwhZ7q5kTM8HYXI9Fb51ncCpWYYSCcmIgolE3VH497Evv6uiPZtokmqwtskY/74UK7JJowi3U635MXXvc8yhlBW2qIYERSGAaZpEoYhYRhSq9VShqC1TiOKEkHA9318309zEAzDIAgCYJZcljCX8rWS2sVAxE5jpYAoY9r3gvjeyPQkhEAaRYfyDw+VzUcXwym+MJiGAf/23/0b9vaeIaSPUxcYNmhDZ54hYyssPFf08PmfRYjlf3Q1zfZWUMassn9HnD3BJcUh/kML4jDixYv1x4K7tnaIzB9Xmb5u3G8JM7ithP8unNRXvcd3bVL6ocxWpWTtCoVf6+zcJELaFZv6ur4XYHXb509wS36klEynU7TW2LadmoKyJSuEEJHEHoYEQUCtVsv5IZJ2Ef4ivWce60SQjKKDpFBoFSClxjIlYezLuK1AUdR67pLMVNYUxoGgP55SbzQ56vbonh/x8OnH9CYjhFEnCZlKhMfIARXznJwlJG8qWWheKVypKnjcLpehMFbWZ5ohViLxxiUCAGQiL2bXReHeH9IOvVDCv6ZdlUWVaAez11nNOVvFnFOU6oqJPmX9XInrDe+r6qC+Lo/gpnhe1X/lfm6xvKo75MUVn7L9kUbX3MTwed3eyGvQ1foqSw4TQuC6bmomyn7veV56XWuN7/upJpFoBGV9ZsctXtMadKgwjdiCgsKxLUzDxPcCpBYIYaJQ1zze4tm8jr7c2vdStWEY2qgwwB9N2Vha4fDlc1ZWNtl5/AkeIToIwLAJtYjOVRDZhxHJqgGqL5gs3GShvSsoGsOyfxcLFf6Td0j+SHCX8/pTykl4q76iDnPXfmwNtQze5Z5YRCATgh05kU1s2yKKJlKp2SjrUM5qEom56LaaSlLXSEqBaVkIIZlOXVSokdKK3pHM4HyTYLM5Bnh1u5tAZaYwGYU0a0t0lpc5PjtHCJ9v/us/stJZo95ew7ZrDENNiMghOgt8iyt1ZDSGRVD2TZmHojq8hYEzhogpxVKHFjkXhy6+05ghJq6mOcmi4pi32dZlDq/b91boO/33uid4+/kug7sMOf4pOJDvjCkV7bDvOCBgMVz/3mfP/G58UcnfCeEPgiA1+9i2lTvjIGsWjMJBg5wmobVOE9SqQd70aRoGSvk4NRspBZPJOCrLLQyUDpHCmpmg56bjCvpYwvii0TPjM7/Gq855Zaaw8/ghgRdyetLl4c5HHBwdc3Z2wlf/9e/xVMAvfvMb7KV7hIEJwgIhCXQBUZHYIa8ZbMF8aK1KGl21CHXhJ2mfvzfCZ179m59ERcIeRNKPBkPPtsMssUzEXxeTycqw1rnfM4yT+IVonOTbrBZ23VRGi35WyCvPJLLzUMV+NPc0ZHMskpnRufmO7iveA3nVPsH1No7msjZzBHeuQqQgyt9IP5XgXdb5oi9uDjc1ay1yPEdY5/G+Ux6XWx6zGcuvwux6SrJts7gv6nw256UoV5pvAUKjQoVhGmgVVxMlohmWZcYO5oBQBQgEhikJggApJNKQBIHCMCWWFSWNJRCGQf6xE7R08kzJTgUhdLqOtNKEoaTVauL7UyYTF8MwMQ0DDSgVhaYqZusgnzmSJ/ALnzyZ2OSd54I/dLw/8/TlOqjMFF6fHvPhB5/Q8G18HD786EumX/+Bi7MjzvvHYAz51f/wf8A0OigkIQKEROnZS5c5tOKSsXpe7SvLTdG6YHurmMCSO31oNnOZHyhOVvlm1Zm+ZjHaIrNoU2IucnflN6iYp5FlkGMIYoZTFut5zMt7mtWjjwhhnnsV7F5XdhU/YZKwpGMnYIyVjtvMZUuX9F3ZXH4n0rwueb7ZfEaf5tuU8oQfUASvovoXRR6I56nk1luxMpFn+qU9iWTN5/eaKDYqfI6EvAyuCyf3Osyj2j9CapSKqpv6vhcRfUNiWlGimUbFh+hE5StsO9ICPG8KIsljSNZ3fk3MCxnx7kmzllUkDsWCq21ZWHaN8Xgyy3/Qs9BVKUBphUp2dHFj50bK54SUCwYiPesmYVYi/UalrapA5eijQJlc9Cd8/MWXdLY22Xi0w/YHu4RCMZ6M2Hv1kr3XLzElGDKqRz5zEmXjiCJVd5YMVG3x3xZmXVcnfleGl6V/p7rC/PssXlzY8FpMrml/Cwr1FlOtr8H/xo/3jmAu0ayobv8A4/+Y8FN4B5WgyoKpNJeRBhWVipiZjYQQuRBUmJ2FkByKMx6PCcMwPT6zuqmI1PQDmiR/zTQMao6DaZpMJlFCXHSMpxlnSecJgbh2j181/kygjqirKERpFnX4alBZU/j04y+wbIeDk0OaSx0GQZ/Q8HCakvWVJqPBkMvzY3ae+BiGhSFFRnqA6DB4YjoXGxlSkbqi1H8ryJbeiEcr0NLqm1jkfv9YUuNM3eSnRwEykmUCV0v4724Scw68rBnmDoeuunbepc8i2WZZDTXRA/9bgsXCWDWNKckXGI1GWJaFbdsA6fnKCdE3TRPf99OzlJMQ1MU4zI+WuHFEfE9UtE4hZcRcJq6LUgLLslJHd9HGn5Clm2aGl/lPoq4yxWp0pMWI1DJRfYzKmsLg7AQRTllqm/z+d/8JKYasrVlsrzdYb9cwQw/bENRsE5EUyMsbQSLOlYibyaT+1IhaDIsXR4kaqRMGkVfk34oQvI1Ef5txNXNx3Yvt+tnnLUIVVQwQKv5JTFJq/geuxWWu64KWIJKd+45hTju5BRSf86Z5LjnyGVtBsj9lY107vzpZT/Ptk+t3zX4WzWXZ9cTakJSnmE6n1Go1HMdJ2yVRRok2EQRB7izlufVyLYKJ2Uin9EsKkY7pulOSQnYJZPMh8l3NWxyqGhbKmEz6kzItnfnyjn0Ke9//kZ2tf45p2DTliP/0b/+frCy3ef3sGS2jjqUNuufnjIYj7HontWdJKdBa5orGaaJki5twyDknWxVRKBMGm2oj6eerx56PO85ugMQfQk4805lnXNRPFRCZP1Jht6Sbsim42XjZZXeT+zLnvt7aaJRx/MbH980cd/nnzjqOk411HcG88jtmG4fMc5T7PubUyp+UCC4Kv5NPxSOvbopycSoWuCmi91N2wzuGZE0k0r+Ok7kSyT/VpjPrRIgoS1lrHR+zOR/sUAbZNRePHgstglCFmIaBbTsoP4w1gjihrbB2y8fRNzDsLMZTMCvDkwhaIg4y0SjQV1RzLUBlpqCDPt9/9Q/YdRPcc+RkxCgY0rYtQk9hmU08TxMGGilNCGdnA80qpb5dqNycM/qK7+NW5bKsyG+lcgZwFdytuavMiTVj6vNEuywErfjdokiVtMfcmDcldKKwoWZ43izhK9tmfpTrzHxvH6I62yjpG72Goc/PXcWRbnDPVe+utH3yb0YAmhMKC6940Rhz5o13bfa64VwmPoOEISTC2nQ6xXGcNN8gywwie77C87xUWyiV2hfMSfbMBB2ryULGjnKtIwe11qm/QmDEEUzze3Z+L1SjJLrQRxZPIQRCa2QmyEOLWRxg8rnqXFfPUwgGHB4N6LRqCB0QDgcE0mZ96zHHp2OorzKcKM7Outj1FSyzhhdEFfp1YYWWCV9FuKu1KArEb6HVY+E9uW8yv+fbCH5YP8MiKC7ASjiJ6npbwgQSaeg2UJziUnNUUvb7DmyMcwylZLi7hOtwXsiwrzFjVIpI0pE5I/1MNN9J3cqF9y0Yr1wbyTb6YZUnKaNw0lnpCg/TMnNmomxtoqQshI4JeFaTqDKfWTONUgqlFYJIK6k367iuy3Q6xZDxyWgJmxaz+xeNVUnTzkgrxb5m/gSRvvPY2Jd2m2pyd60p4BiMR0PE0MORAqkk0qqzs/uU5rrDd2/OeHz/KTWnhTvxsBs2QhrxIR2xGgVxCFbmYUsl/ndBXGcv6jZ9R9y5pC+YmSDSRysacG8+XimU7trCx7kFWK0uisjJFdeMn1nst3tPM80xHb/kvYjYMJqo4NflMFSFEv2i9OpiyMnct8OhRCotIx534aBO1mmRh89JnCxequKqvfrWGN4MgiDAcRx832c4HNJoNLBsI60HlPU9BEGA53kIETl9s5AUw8tC2XvJMhjDMDClxHYiX0R/0EcrjSktDBmFnSoVn/mcCaNfbPapzhBY4LOKoo9m2qEQRIeUxReSCKeqwlVlpuC7PpZp4zg23miI7wVYhLx8dcBv/sf/ic0PLSbCYmP3HproAB5JGNvds8d2ZCG7GWcUJ3oBV+MzIyKVxOCSsYv3lujbOSgmP2Wb6vSXSMJAov+j5JQ01nhR3+UYJ7+TuSg3Zlz3/IL885dpDzrGt9zMM+spe9xKhulk8IgO9bkap5m/IMuoF9+zaCPM/Dw5JOfGF5l/E5xTfDP/zkOxf0E+Ca74RheZCG5ONm92TzlzyX4ryO+2RFgQ+StzPUe0ZfZ+hZg/UEekfWXGmOPwFZ6i9JlF+ktHRnpM08R1J2itabdb6Nh0o2NijIAwjDSDwA9QWuPYNlKaBEEIGqQ0SBPssmqjTrSj7LsVsXYgsEwL0zLwPDeOXqqhwyhZTQgj8wwig/oVD19I8lswM6hU+5OxCU2n5jcde5ASi4xOIwtEvB9n/VSBykyhY9QYT0cEVogXBkhhIIVk0L1gcHnK6sOnnPrgyQDLsNBTP8owFAZJHFIY/9axV16jZzWDdHHyrrZ3LkrQmQNVJIaplS3Tb/6WMmIzywwW6W9dDNwXGpklJLHqrVP9LTveYmlw1qvIrFUxc+ol94mYIGdQSFL4s30LYeaIlqpgKy+3p8ebRWcS1XSyCGMSqvWC+SwbsaB6FGCWWZp/d1mJWumQ3KyUrQsN+TNudbLgslbXElxm7zKJconO7i22Nai2GPOwiHkU7cVlbfI4JhJpsl7ygkA00yoXalicpvldkWIw/0XJ/M4dHRsncWXNG0UoE7Pm3zEgZseiRuGd3qx2UfLsWs72lQatQjzPR0pJo16LTUggZFT0LtTR37OE1Myc68gbmmgbYRhiGSaGESW3+ROPMNBYwoZAIJFoNCrMMM6YiWQU9txzzeagRLBh1jZa41FSnNYCw4hC++OjGTBMgZASpUO0DmeznDK6KLu8lFEvgMpMIQimhCpkOAywDBvbsgm9ECl9/uY//VvCf/wvPPzi1zx+tIPnTzFMgyij2UCrhKDpmBPnD/xLnuJqqe1t4OYbdmEXabTRInVwwcVbPda8JjX7dNNnyjOl21sl7mAuKw8lyAXflzUhb/a6qZP2Vii9u+5vBz/gK7kOsqv1RmGe2T7mfGI6dfQm5qFarQZEpiTbtlHhrPT1dDrF931s28ZxnNT/IGQi0EWI5n1icZXnmOIKKUjOOTAMgWkaSAm+HxCEITKOMErzAubWnYg1kbeDyDegkCZIqdFEGpFhxo5vfEAjtI8ZS6A6Iz1GU3iFlaMEKjOFaTBFJOFf2sT3QQchmgmBDplM+3z9DxO+/OwTrMYy0m4RSINAmfEJTRodS3WRZjNTNud4pbj5QqoKt41YmUnSWWm6Go5i7r5Kt5UjcScEaT487cemc+Xvu6CJld44L4W+S6aQn7sSKfo9APOEvSqzLmMISaRRUsE06xtIIosS043v+yilcBwHKSW+72f6VqkIMYvgg9kaE1EboQCF0iFCg2XbaBXg+WHsVxCgxNyzzUfH3WzOFkFy3rNCQxiZv8IgpFmvEQRTDCkwLYEIgqR1pAmK2XPdhDFUTl4TRqTq+KHG8zSWWY/UpsBDBBNsPUa6l/zjf/xfsfEwtI9SOrV1aaFnEy70jBWIzO+kjggz1SnrYLxLuE3fiTo4u7fiOOl9yULRhZ+yawmrT+YoRSLT/vaQxUdUoL0/BBTfeZSROfuJ5kHlfgS3e5e3x7E43k9g4n6CkH0fVd9L8T0mzCBxGJummR56k0QTZQ+5SUpWJG2SENEocUykxH72OykJMfsvii4KERJMU2JaBr4/JQi9WKid3ZuYZtK+iuu15JluN5cSRGRxUVphmoJWw8axFLYR4JgKFYxnQnc6b1Hhi+jHQGBcOxbcJPpImBiWpGk3mY59ao0WbqjxgynNmsXAn9CsQTC8YO/ZV9TWdqC+glVfjizwQhOEUxynhucrlI7UMykEQSa+NjKMZB2aM5hPBLm91H2dY7Ps+8j8dZUt/LaifMk9c2tIZ/7KGtryfH1eWhFolXch5qWICthlPN3ZJyxVlASx003k770FzPt2FsRHXaexlbyWfJCCXtiwUgjo3Lp4d5puFYhC6We+nSrvQkB6gFLymdzfi5/nOi1gUfjnong3pVRGAxBMJhMMw0hPS0uuJw5my7KiKqSmxLEtdCzlG6YBxMdlShBaIXTkDxRCQuqngDQ4RM4YkmEYqaaRfQYp5U0tMm8BIvWFCBRSBJgCZOjhTSe4bp+lTgOhQ7zQwrZrcSVYGTnTkQRBmB4gVAWqMwUMtI6qn9qNOiPPRWuFYVq4U59mu8PIddncWEZNe9TFCqOhh2GEOFaNwWhErdZA+VMMEdkDFbH9S2SJXPRb6nmGUDZhWUfg4nZXQ+VokZSQZBlT1ixUdexCu1yEwGKITHAJM8zjUYZPglNyz02ZaFYtTp2g2SeImcutrWHXEs58z1dGp2TazM9B1THm57QScS9ZF9f1cZMkv9tC2VwsGjPFPtNcQC7McVG7GyXaZYW6EnyllJimSRBE1U7H43GapZysQ8Mw8DwPy7KwLIt+v49lmViWSUTrVWTiiSl3FNeSRAjlBgQUhjRTrc/3Q5yajWGY6dhp+ewYlFbIIvYlZsxK8xHdmJuDIkhhgpKowMc0NIbyMeUU17+gd/qS0LVptFeotbdx3R6GYcWH+AT4QcRgA+3H+LWuxakyU7DMFr3xALNuYtcs/MkU27FQgWJtc4uT7iXSqvH1N1/z8y9tDr4fsHlvF2M0JRAWg/MBrKzTWt7Fjz3nWusolhfisq9RNJL8J26jjfxbGamSEhl4TvpNmF/8V2XTxcx0xVs4v0SGKiSEoUxAFzFut2OWZfD2iyGVBmcXIvyjicyMcUvpftFk/MiQN1vcVou9O1yqtEn8AkII+v0+juNgWVZqLoIov8BxHABGoxGmaUQMITbploYqRyOQlHtIsNEowmCartlEG/F9D0OaeFM/DnOVmV6SDkqEO9KOqWKdT/dzYQ6y706ikVJjOiZCjTAYIdSQpeaE+o5Jt7uP9oc0ay3qjsR1x0hhIjCxZBR+q1Tkh4C1a3GqzBTqdpvhxENp6A66ONJg6k2xjDquL5j6JqZlsrm+jg6nHL35no4dYOs19g/P8I0m52eHfPzFEtJxkCKu/61iZkCyrzT6R1S9fxAQ8zJFVcKZ0jHK1+WC4UiYQ1Xn+OzeK9qLcryLNtTbM4WiKaekRXFfLsI3e70o6ua/uDH8FH0L5Tb9d4fjXZjLsglik8kkLViXLWcthEirnCZlqW3bRuswDttM2sGc1qeTgpxJGx3bUqMabZYVhRZ7vgAtIePTyPUDsV/iZuu6VGMUC1Zi1q8iNEJ7aOWCGlCr+ZhyhOQSu9nHklMMe0TTvmQwmNBx6riuhxAWtVqd6dRHaY3nToAn1+JZnSk029xr1jm8OARp44YhO9s7+FPNwckF0rTZWlvn0cP71IyAsQPPvvpHPv3ZF8jA5/zsAo8648ElLauDNO0oclpICPO23TQ4dY6Y/Ngbr2zzly2MjClh4brJ6bGl7cpO04pSI8RM7qtKcMU1SziDQnkUxUyfScbO/U7MWpXfURXpfJF0e4t1kEsSytr+o/4WWgyvmd+E0c76ya/dH8O3kON/C8w16VPp/FykRDXf4/yduvjtIrXxCohfSXautI7qGNm2PfMrALOKozoqfhcqTMOINIbEJxkvwkRbzT1MyhBEbPaMvovqIEWmpsiZbCLErHKqUuH8E4oFazczlzq9Z2Z2LZqK8j1kd3vcOn4WQ2iUmtCoK5o2BNMz3NEbataIMLggDLrUaqsIr4FNSNPpMB11cUwHM7DotBooDR3bpwpUZgquP6A7mmCaHUauh1Nr0Vp7ggp9lHVMv3vO+PKQP//9Sz7/7Cnb60scHY25OD9nZfMR/emI44sJp69eUTfr1Na3UIaB5ym0NBEhmGiE1GgVgg4zXD8pRlU+nelUlm3gon52a7heGoxs/uradvNOz8XMJmtk0jJabDkWeq05KNowYk6VlXOtskRTSpjlwYlMTzqOaoht1lnMY8aSZVYCMEpcijprQstpL3dPRGeJh/lrycbXcXLQzWz6RUI5m893mydRpCoxs9aZz7nKvXmBKwdZhhB/LK6SOYF70aNdt+TnVLrZmtNkSl+7LvV6Pb1HCAFhFM0jhSBUCted4jg2pmEiQk2oVMQ0MjJEJAPIeH0KEAItVcwQiPMQiH0VCj+INA0pSIvZJfWEVKKp5DTCKoGbasahdWSqytkIdGIq13GEZtS9EGCIqLK0VAIIaDY0rfoA5T7HCF+z0Roi9ADZmDIYnaP8Ad2jLo2lTbxpj2Y95PTwDaaa0tzd5d72DodHp8C/vhbrykzh5OQIbTiEaJqNJTa273Nv5wH//t/9G+7dW0VIzfJSh5WWxYtnz/jFF5+wvrbOwck5q5tP2FzexPcHBJMRey++5mGjRljT2FaDwJsRD7QmBJJiaBEkqtTs75mN/RpCPbsxuuutNmyZBD2DxV0XcSxjAGWSVl52gOTgomSh3eRZEiZTPl8y4zDWOiNkzc1vQvaz0tPVVCOVkjJXNCk9S7Ug5vp6d1J28blmFTevvTPz3m9b++ntII97Zi5jKbgcpTlJZL7fReNVwenq0crRyUQ/hGFUdjo5k0CpiKBqpbCMKMM4CAKCIKBeq2HbNkEQRJnNUs6VCo9wytMP0KlrIHFqh2EQRyfJeA0U6ExhnrLv/rr5SLWF7EKfa5T0nyAGoAjDKbYhkVLj2D4Gl0h1jvIPmA5eEOouhnBptU28ySndywH3dz4Drehf+CyvrOCISyajU04PThkPniHjQ4eug8pM4cuf/5yz8z4X/QlOs0H37ARvZ4u/+MXPOTs7ZHV1FUVIf9jHsCRn52cIBDoIuDg9JaDNz55+SrfX5bd/+j2j0OfTv/yXqMDEwom5ukAjI9kh0hnjSf2hTmn7YSCVphMoNbtUoza3ilgRgnmvRrI+Z+aPuzJ9zOGY+ffq+yCnYC+M2Ll6Dsrsy1fhdzVUwbsc55vO57s0Pd227zvFScWqSdZkFB+CI2UUrq6AwI+if7InqCVF7q7KhShGJEbab1QEzzSTE9FCwlBhCiuiPyU+t+IY1eYgL7RmHci5VjLWloSB0DLW6n0adRPUCMIRlpzQtHpob49J93u6J1/RsD3aTRPl2Iigz2pbc38jwA3HWBK80QkyGGDqETpwOTvdQ9rVyH3l5DV34tNstHj64RN04BFMR7x49i3Nhk2rVaM/6OLUTJZWW7Q7dZyagZABQoR0z0+wpKZ3foL2J5j4jHpnKG9Ew5LYUmMS1w3SEYdPIgl+qKSkHxJSa1H8E5mBCj8/MCidlBmOiKiU1ZLzbgPV32dRM7s7hIoEo2yd/VNbdz8mZJO4kh/btvE8jyAIaLfbqQ8hIZ5JfaMkX6BWq2FZVnp/9kzmmagVm34EhWuKUPnYjonjWIRhgOtOAYlh2Ggt0NqIzazz6+BmDAEg/6wL75WxDUkIoipKAkOEmHKMDo9RwWuWnBPU8M+Mz//A+OLPNO0+gXuE9i8Zdg8QwYCdjTq2HDDuHyDCCRcnR1ycHKNVgArHtDsmtUa186ern7y2f4LvB9y7/wBUiD8dc37iMhlfsLq2zM+/+BzbHLK13iCYNjk73scyLM7Oj6g3NVgtzLHP2dk5a6ur+KZFo2bjeRMMGrGWIFLiJITImZCyfqO7hndbFqEwVtnFMimHYlnjkuqUZdEM18S/z+axaE7Ijz6zU5dh/LbaWlUtKMElvmvBe7q78Nfb9SUyfquy3I67wq28H13xVSQmjHcDZT6DMuKa9eW47gStorIVoR+g0ZiGiSGNdEv4Uw+hodFopIfdJGcqJ8Ufo4QznTe36gzjJ44uMk0MI0qGC4KQer1BGETVTcMwktFnzGT+2a7SRubmY8Fcz5v+YiksDCNfAgG26aH9Y1pOF7vZx1TnbKxN+O7sGZ982OHi9IKNhzuYpslg2GcyDWnYkeZjWTX23pzTaG5ydHCO70+pt9pMA4/epFeKUxEqMwXpdDDNgLPLPlopGo0aCIXnjphOLLa3PsWymvQv92jWTVx/ytHxMZ3VJVxfoaRASVjbvsd5b4gSCj/UCEMS+H5kNBJpcCrpuW0imUxIPsyyNasRxSzcPpHo+jmqDPOmzgJC8zjcdvj55xWgS2zhomD1Lx0wq/4uYgwldZXmiOM1SC+AMmm+PLagrM3VG/22BDsyF4uFRHDmyL65ObA0q/4tFuJdMs8sLAzwWNBOa00QBKCh7jhpwIJpRORIhSEIgQoVUsxOWMsWxcudg5CjG8m1GWE2jDhQRcE0CNAaTNNCqdh0o0DOnA1XMtnyGkclZ2Nk/k3alEPsfRAhtqlQXo+65dKpTzFlj+nkDco7RFoe97cES50AWzSAkLOzcxrNJh89/ZjhaMSbwy7dkYXT3CakhhuYbG5tgPS57PcZ+XesKew++ZzDgzegfQg98EK0CjCE5vz0mDcvX7K9s8TUA3TA1r1H9Ice0m5imQ18YfHog084OLrENwSDiea857KytowwDFSoo/KvIpG84o2Ws/G9G39C2Ut917CAD0SQCD4/HDoV4Q6lTZH/I9JgfnpP/N8u/LhzWRZ+CRFRDcMQz/Oo2Q6mNFKJXwVhSvzdiRsnqM0z7bmjNMWiVamR0kiT3wIvOj/ZMq3Ih6djenKFSegqk2LRNFSsknp9ZCCxZSvAMsCWAcIeUjN6eOMXjCbfo7wDNpcC/EkPSZ9+f4qQgulUYdfrOLUWh8fnBErQHYSEsoXWUfFARZQcPByN8KYhlxeD6/HhJiGpOLjY3L93D4IRp0evGQ2GmAaEvse3X3/F5eUGDx/uMOgP6XZPEXIZu76GaXQ46ymOux5ma4ut9i7D/QOGU82q5aDS0EaFFCoqcxZmJrZMsr1juJXD9pagyfO6spF+bPJYVaO6df85oiFy38zGu7Phbg13tS7eldMe5tfPYsf6u5nQ0mcr0RaFEClDqNfrcfDNLGEt0SB83099CUni2nU2/bLLhmHG/giPSIONMntVODvrwTAkJTM4h3sZDtc5ka93ygmkVpFTXU9QwYClpotUx2j/EKb7jLvPcaXErHnUmpLzywusWgMvMDg+6aLpY1sOk6lPe3mT/mCIrxWNRoftzQammNBp2wwHYzaWry9xATdgCv/iX/1Lnn37Hcd7z9m9t8NocIY36aMCD0NIwiAkDEwO9vso5aF0DdO2mQYNlld2cZY7DF3B2tYa7tTjySdf0Oxs4IUCoaIEDQQoVHzKkEku3a/UBD5LEXkrmHt55Se/Xbul7tDv8cPTw0U20EyLO0Qq3UBx9NBcKOAdj3cbKGeMV92hC7+TfqDKGr2NiSm6r1yDe1fvrhpO81b1RCNIzkJI2iX3K6XwPA/HcdICbgmziHyMIsNIIDffBYE/SXZLCtolmqjAiIrjxYEss4rNSU/ReiR7RYg0GCR7fsL876uZy9y6IKJzQgeYpsJkglCXNJ0Jy8t1zg5CbGWy1BY06iah8Gh16lhOi9fdC3wtGI+mtFo1Ts+6XHYnjH1Ba3kTxwppNgJ219uMhue0a2BUjD6qzBQujr7iYP8lvYseg94+08EZqABCG9tqMJ6OmExgZW0LLwxwPZ/a8jIhFl3X5N79LRoYrG6ts9moMXY96k0HCNGGIhSziAMJiDBAGwa+AkNGEQFJuW0tVcQvEm1iwaRHM6+v3BHxkozONM1eL6qkoqT/4sLXIj7fKrs6Z/bkGX8rLrwSvEQRgQwauUVc3iLHoUQ2cSuhviLFOWpdcF4mv+YGuGa+S6CYuiaIohF1Bo/kAKYU11vkYEBCGLJEIynvcYPuroD5JEqIM2tI5yZTNhmII1sUlCYpXgeFcpyVGIwikwBMNAd5Qef2JtOSRSFE5OzVOn2PWkeH2kiRnEoW1ROybTs9YF4pnXYVBAFhGKaZzKmzHg1GclqgjPuezXNUjj+yyaOjJDYpo+qokZYh4mqhAiVlxqIzO4xm5hKK8So+X5pCkFRvjtZrwgbmI53i23JlNRK/R+E419BChQLL9Kk7I0b9r5nyivNgj+WmT62uOD07ZGe7g7Rtzi8njNwJZxdTRgPQysGdTOldatATFDAdHeB1bFZWbI6OTzg/PaHXHSGFU+kNV48+eva3qDG44zFT6aH8KRvrGzx+8DnfPn/FatuiVmth1NuYYQBqwoeffMny2ib/+e/+ge9evubJRx/TXmsTCgOnXo8lAx9vOsF2bAIVYlsQ+grLqBNKiU9UBUNoHSM7m3yd1iqc+RrmnZxcu4fK7Pul5u0iMSgIB9GxlCJ3s844B268BUscg9VIsij8XtB1OjcCdCHjN8E757sTqW6W9FGUBxdNWyne2b4TnpDqgDonmUG5JF2moRelyLJchiqwqCprXgIvYabzLDDz9w1gUfNSAVTkvtO6WNtZIMQdZV2XWkky60WQEn4pBUEQMhoNabVaGHEeAkRLzrKihKrpdJrmIER9Rp2JxIksonLVOj6jOHqlCTOOqiAYhkRKM2IAKsniF+n8FPmpzr6uRGNNVnTuGYtaUHpOWywr6ozgNdunQudWLxT2CnEpb0MqUGNsY8JUXWDKSwb91+xurGNgsL68wWjcp3fe5fS8z+6jz7gcHGM5MOwFmEIhhY3SAYHnMRpd0mquMxwM2Fq/x/FxwOef/YzLszv2KZwff4WQmzimjenU8I02y5s7OEsrPPyszv75a1yhsQ1othp0lpucnbyi17/g6dMP8GWDjZ0HeKGFIsC2DITSCK347d//Vx4+2OX+7ga901O0go21XaZTF6veJAwFWsXZhnNS9luajrKQMVWVSlJlm7GM2t1cmL5zSKWsquMv0nSvU8RuIXEKQeb0vVnX0Zu9w/f5kwKRcNGb33fNTbeV+m91Xy76bDEkEULj8RilFCsrK3iel4aVCiGQpsF4PMH3fVqtyN5dGt2T/hHL6hoiDhGVpkBoTCNiLlpFWmjKDISEktyD5Pnn/76+krCAVByd+cayUlZUIkPk3ndSryyv1UojwBQetq3pnh0xHXYJ5SWTwTnjfkDNCZkGipev97joTVha26DXvWQ8GmDIJtII6PUGNGp1arUGzeY9+oMhoPjk458xHJxQt5v0upcsLS1d+VwJVGYKpyev+eiTD3jy8Rf83X/9PZ2lDcZTcEPou2P67oDHDz9kpdnmwfYa7uACFQQMJj2+//qAT375L0FKRu6QTqeJO/YwhOb4YJ8Hmxv89q//I81/8SuGw1NGoxF1IWhtbOOKgGngY8s6hFkJuBrxeBuH6V04Gas4Bu8S8nbVeUn+bTufK1ehr5+nhU7WnBYy3/+d4v4OoUoIbJm2cm34dP6fG+JwuzVe7aZ59bvMGet5HqZppucjJKGlQkQlK3zPxTQtGo1G6m+Yg+TwG2ZrW8XMQIjZucyWaaUH8EQmK4FGgU4YwuIchKJ/4NrzyRKhJrNa5xlZbDqam978hZAowddxbDxp4Fg1DCX55JOP0eExjbpNfzAh1DYKD99T9LpdltottLYYDUY0myabGyvYdo2jkx7DocdgMGBtfZ2Dvec0bMX2ozVazfp1TwbcgCkYjRWUs0R9ZRezfoASgtXVHfqXA85O3mCaE2w5RQLff/U9lnLx3QmtziYtw+L84BnNzgr7r4/ptZsIHaC8CeNeDwfBg41lXnz9O9pNk373got2A2Uoaqs7WKaV2oF0LGHeZCnfND57ESOpsomrVwm9Ht4uJj0qDVK0Jd8tXJ+TsPjO4vYQpO4Ooq1bLap6Qf+LIkLuGMr7L5/zhPHBTd7tVeaqchxKn7tgD606L9eZ0BY9h1IqPTAnGStL+JOSFUkWc4LTXFgnOiWu0fGZYWQ4FiANGfsbiUtFi7R+kdaQlL9Of5eYcOYcxpl/F0/KrBRESolS/13ml8iyjZI1IUAJkwCLsa9Z3dhlfHFIw9jhzf4/0mmBr3wCX1JvrjCZCpY6azj1JoeHp/T7PTbW1+leDjAtOD45pN8LQDTxQ8m3375ka71D7+IAHfpExvjroTJTULVtGitPOB8IlNHB88cIbeD2zlirOYz8C/rHLxmqgGB0jvD6NB2bYNSjs/6ElZZken5A2L/g8HyfVsPh/OSQ0J0w6vZYadYIvQGBsLDUmG/++Hc4+3s8+fk/Z33nKYEXOZGUju2loqie3V6NvjMQAhmd8vfWhFhnOrkNYcs6Wn9ouE5qvRKrzN4SiIK6fbP3+64ZQnUox+NdhUHfPGrqJpAQ1nlBK/EHJFpBEjmUDSvVWmc0CCt1ZZUmgMWao4yrDwt0fOoZGNKIijiqqH+ZVkmNjTtJf9nEQq2u3w4V5ynXTeJgT0TCistOCIkSBtKo4/qXtGsGdr3FeChpLd/n4eMVTk72cCddhLQIwxHuOAQVMu5PaDhNNtc3cew645HLeDRlOPTx/ACn1mBpuc2jx5tc1BXNeo3725uV8KrMFPywxnCs+MWvP+H5yzfcW12hZhvUTPjsk0/55vs+p2dH2JZAqjHTcY/QlTSaIfrCQVptQrPHg42HfP/6lJev36B8F3c0ZtIf8fTXv2JzfZuvvvoHJpMBnqeZ+JrJsIsKPUzTIfRje7RIykGLjL3kdrkM11tsr7k/p4XcVa+3J2jzGbpV+8nj/DZ26iJcl+2Ztiu0SmM9rpFKr8PhxxAUspnf6d+67PsZzOP59uvo7mEep2xZj9SJrHWujlHCIJKQ0ygHobxceZIUNnM2R/MVhAFSgmVbQHTWQZKVjGDWPpHKE/KQCEgLtkLOt0C1HTOvicXjZcxd+Z7yWnviLJco/OmEuhNiGgF20+J8ENBpr/LV1/uMXZep6+FNejRry0zGIb3LHhenI+7fX+X8tEe31yMMJagaUhrU6jWefvKEpx/eQ/nnbGys8OjhfQjvWFNg6mILhfan/PKXH+MODtn//huW7QaHh4esrKzRajmcHL9h6ilqzQ6DwQA19em6pzzf69Job2PVX3Ax6GEYikbdRpg+v/qrX1Gz61i2xLIlw8GUyWCCaTi0mjUMKQjCECEkwoBZXX+jMPGJFHN1nb/SF1oBfmjJ827KElQ355RFO90FXJ3iPxuv+Car9HWXtvO3MS3eBSwyRf2YUMUnFpWgDnMmISFELgnN87xceYogLjchRd6CX1zzAhEf2atwHAspIQwDwlChVFxEThqR/yDDgLMh1kJmI4+uWQc5k0/VeZmxkuRyqQ+Jmc9MxIzD0h41c4Khzpn0ntFauqThTBiNhth2g7OLCe5Yo5XB9uYml6fnTEZDanaHi7Mh0vIYjidoZWKZHdotg91HD2kt1+iPLgjdUxzlgQqxxNV0MYHKTGHVEQi3SzA6YXPN5v/9t3/L8OyMrtnCNCSb203WOjW0lgjDwRchUzHFdUMcx8ALAhzfpVFz8AcnKBOcxjo+Hm9efc/jRx/SH46ZeBP8wMM2JdOpS+BNUWFU7jbUOqmIFKuC5oIXOJOSi2aUOaISf6PnzY1z8ENKoELMEnXePejYHFe4Ojd2hUkqeR+LpKo5LAqO56JMeluCfBOGUPbEub6YJ1w3g8XtbymrvHPI+TTSf2aglMoVq0tMRtGpZlHJhel0SqPRSL+H2NxU0J4SjSP5EYAUYJhRRdQw9AkCP14rBskBXCKz50WcCBtFAs+iiars3yRqqJhZM9P7ZqbN3PfxPSnRJ3/2SRQ1FasuqbagsJhg6R5LTRc1OePFN39Ds+6ytLSEU19jdWmLP3/9R9qtFQ72TrCkSfdiiDf1sWwHZECt2cK2WtSdVfqTCQcHR9SHgi++2OVyOObzjx4xGvR5dXBU9nrnoDJTWFrZ5ehwn/3DQz7/4iNWapK+28dedbi/e5/Xe8/xxw4nJ6fYNYtQhShlEWqBIw2cmqS9XEMFY2p2lNhyfnqMXaszGp7z/PmEB492efBgh9+eHONYbabS5vz8kq0PjCihRET11RUCLSQ6hCg0LWvqWEQArtiM5CULmL242dUSSlaSzKCKX2nmomjKyc8CbFNUyp6xrI9kAxTNWYswzUo0VQyuheSbwvdal0sjOUI6QzPfdTEZMFHDr5iqot+hKpQWm5sjyiXmG12FEZSZfZK5i767LcGvYtZbXKDumlE1zGneIu4voWdKxdFD0YE4vu+jVIDj2FiWlR58kzh83ckUBLRbHSAqMZFLTMsR8xmqCbaR6UkgDYEf9z07LjOKLFLpvM5qEc22ip79LmSkirk/ohsVswRZEBihjM1RMs4/jI4EjVlIhiEkO1WitUTJ6PmkjsxKBtHJcRqiQqCBj2l4ODUPoc8JvX2WO1N895Q3r1/z6Rf/He7Ap9YwefV6j+7pGXWrRqu1xMnoDM/zQJicX5xgyB5L7QnKgFrbxjYsPHdMs1Hn62++omMbVDxjpzpTaK48wmqMON5/xR//7m9Q/pit5TbD3hGHeow76BPYS6ytLtHvDRAaLGEQTD2wfD776GMQgpPjY6Se0m4s4U49QKGUz3g45vQoZOveFkvtZZZXHyPaq9QaS4RKo4gczZHvKA4zizOeyizWmbd85XMl0njursQgqWfflyejzot2s30VL86yqINrupnDXJOTOtK+S6lrptuYMcxLyjOb61Umt3LJKjte2dghFYL65iDpeS608Zr7RMEkcFvNKmW8+YVAdmVE01UiXZbOUwnmIjN/lfHMHxxfFllU7ZmrRmLlmYLSISl71KT2/Uajxng8AcA0TUDHNYbANA2CQKNCndYfmgk1IsPYNIikhMVsTB1G5zKbhoE0DBSKIIyO5xXSJMtAIJlKWXg+PVvfMc6omSSff+L8fTp2HCuR7AqFjA/A0RK0iOlRdp2LqF0kXkbsIZTx+dJhTDxi5iBE5DB3ahYiVHhBH+UesbmsaZg1umeKet3h+PhrnPYm65s1Vpc/4qs/+kglaTdaCCkZ9qccHp5i2DX8wGM0PENJRWflHs16k7PjM0b9E+5vNqk16hBMK7z/m5iP2i1o2lwcvOCye8n6agvblFiWwXDQxbEdLk5PqFkGhgoJw0iFrCHxej3O37zBsiwcoViqSZqOxMSg2eoQhJrD41PwNQevD1ChQb29wZd/9S8ZSxs31FiWRMd5CkbMzfNKXfJmFmzIK2BOiCosuEQ9zQlSOYK8eLy8xP7fNrxddFeZIWa+/+LfP1SuQpmuVLqSKrzQHzsK7l2Mn/QXmX/AdV201ti2HUvv0UwlR2aiDaQ0Uv9Cae5KJELHn6PQ0+SsbCmS8iQhQRhm7isSf1IGMR9kQcZhHX9fmJZiXxETiMNgdSTeGGikUKlgqIVGyyhjOmZzMbeURIwhQBDGLEOCjMzYCgGGAYSocIolTeqOwcneIR8/rEPgI5TPcrvJyfEJvjfhcO8NPg5PP/qCre02L75/hW3DysoSw+EQTUCn1eSzzz7h4OA57ZUOVqOG73lsbKxxvHfEcThETwSBO670riszheWGzXffPqN/dkqnWWc6mTDqT0CEsTolqdkWvjum2WiCBn/qo/wAw7IIhgO0NFnZ6OAYdUaTAe3WCp1Wh0Z7BSFrtJdWOL+4oD/2GXoS4bSRwsTUFmGgMbOqe0GiK1fZqzOH2drImzkSCalU/c5dEjNzRxazUkl9Hm62iW/O+IpQBafb4C2us/fkW+fGKssgL0p27zZsM692Lnb8Xo/TrRP6bgHvMgAim+8R2fSjiB+tNY1GgyAIckXrkja2ZRNVJZ3lIETaCqTzJxITUTJWVA/JiPMPtNaRuaVSaLYobZNlCGX3z7UnRGiFRsbnoEURQlLoiBkIgRKgpELhIzDQGJHWIARCBGgRgNYYMXlVQhDG4ysVYmiFbRmE/hTT0Wysr/H9sz9S04fsrk5YW6rTrLU5OdnHG0zYfPghw+E57rTH9s4KS60OZ8ddJpMe0lA8eLiFO+2zstZAyBB/OuTe7n1cd0LNcuhdDthe2abRaFZ655WZwl//f/8/9Hpd2q0a7VaN4dCn0WoxnvSZTCaEvktoSEzTYDAaRE6mENzphJYZaRXNZoMgCBl7HkoYTLyQwckF29YSa9sfUGssc9oT/LP/4Ze0tz5gEki0baOVwDJMREBMZROzURIfPDMl5bWFcgJyXQ2dWdjYrKekKko2pCxn30n6KYxaTiTztqhS+3bu2t1u+kWS5HXE7e0IctnzlDEUkfm7Onu5CyiunJLEXZKV8MNhdMs7b2FiipTfvNE9ql2kU6ex67pYVuQ/8H0/JnQq1390DgIQl6BIqppGUn7x3UZmpCRE0zZNdKgIgigZzZAGusI8FIl+do2n10WhJ1H8UwMKJRRKGpHfEoGWcZ6EgEiXCDGEF+9iA4GNViaRXiEABSLEUBHDSBhJuu01SAUGBu4EgqBOe/kJfr+HsAM85WLadQzpgr5k79U5Zs2jP3DRYUg41fT7feoNi5WVZVaW60gD9g/2GY5d7JqDYWiePXuBJS12tpZY3ehweX567TzCDZhC//KM9fV1Gg0H1x0B0Ot2MS2JIQS2ZeEGLp4folSIVCGmkGjL4GLYw9chvlDUO8tMlAmGjaDGaXeIarg8+vAJ3++fYSztsvHwY8zWOlMgiON+VQBm0RGZvFAx+yNyAGVrpFfZWNcQ4Iy9M2OMjz4LQbYy3l1mNOfxm7f736VpaqEd/FZQVrIgu2FjW30G9zJnvPqBLTAyFlsj7VDnLYQxKD1XQ/POoJpE/A4hJt5JEprSIUEQxj6DqGidZVm5CKJEQ1BKEQRBWhZbqcT5mxWMwpQxZP1TSahqUgYDdIpD9ErKtYA86rr0c3ZORepnyEKaPQcIQgEhAhUzBENrHFNgakWodFRFSXvUHRelfaQwmXoGyDZoB6Ukpu3gB1N0aEThsDpEiBApFZYpCT0flEBpUFYbs/6AweSSg+M60jDR7gk1s4m0H9NeWeev//bvaLUVrZaFaVmEASyvroCG0Av5zW8+5+TkmED1aY3GXHYvGfRO+cXPP2BlucXJ8Ru++f4faLVqxYcvhcpMwQtcuv0LnNomfhDiBQot4rqlcTVA1/UItMIyTax4IUnTpObUmKqQYDLmdBKwufsBH336BafdIa1tid1YpbXxgEedB6ysbTGVdQJf4wtQpsxJ2yk5ju06s+9EGiURpUkm1Razd1WRmGaLObo/iSyIZcQ57SBZaIn4M08Qi2t5Vv/+hpu/YE4pM02Vm3My+JW0+SFgXhPThYtJwyp9Zd6PzjOUW9vTdX59VUSlMlSV3N8VQ6gUjilmGqwQAtMwoygaFQt6cdmKMFRpzaEkYS0pex2G4QJTTfl7kdLAMKyIwAXRMZwSmS6NnGbO4ve7aNpyeIj5d5rbQwKEiHwmGoE0QIY+qAG25WMIjRdKLDPANi+QVsBgOKLT2MDzNEG4TBBaBL7AsmqgQAU+jhOgGGEaCkNKjJqFNxUY0mY08VlaXkGLTTrrP0fWQ56/6nJ+tEe7Ca2myQePv6TTrkfCuPAQMqDZdBBa8ejhA77+6recX5xjOTaD/gWObbC5uc7OzgZ+MEHIgA8/ecCgf1k+SQWozBRMx0Kh8dFMfB93GlBzaggC2q02vjeBMDrEAm2AloRKEwoFUtNotZlMJqxubuFriacF590h2A3+8tf/DOl0EFad8VQjhRlLiQqEQiuBJCptESubJXJ5RspKRejyLOeyaBwyYW15CpUzMrMo5jlpO1NDM1jNmYLmGUIlYnCHtLx8vKtxiHhfFeJShmhhHkvNe/l5qlThqsAoy+BOmaB4d3Vc5wn39W2qQhWfRsKrE8exjJskZSpM00jDTZM8hCRzeZalHKb+g+KzZK9LKUDI6BwGouJ1kXA5j1dRU1i0dhfNTc6vUJiTsr6lipzFFgpLdGlZp7TsAaE35N7mJqNhF8ceMZqc0akbBKoL5oPorGdrFR+JChVKa+p1jVQXtOwRzYZgOBxTq6+AruH5Cse20IGi1V5nfdVB+acsrX1Mq7nC6hK8ePZbpIKde1uEaophhiytNAgCF8uAl89e8PrFHqurbZ48/Rnr60u8fPGSqTti2B9wenZCp7PM8XEXz3NL56cI1c9ofvgAzwvZ3r3P+uY233z1FcurS5wfH9BuNFlqtQg8TaCiyZABOHWHerPO2J3Q7w+QhsnU9XBagj//8U+MfcUkkHz7zZ/59Mvf4AdTDNMGNCKcHaozi1+PVDolQAkQaSjpIpOPpvxghEVwtbiamhRjmh4rK2n7KGZ5/kCZvHKh80MtgB86giUa73b35aFMRS82KZf08pf1W+FUjEC5Mwc11/PmKuMtjMjhx9HkkvGFnIWMKhUSBB6gU7NR5D+Aej0KSVVKUa/XydY40qkGn+27/L0oBUpF98nYtFQ2wUXiXfS5lTmYi0S/6N9L20RdRCKhCnFEFIZrMqJdv2Br6QLtfsdgtMf5m4ClpTXC4YTQu2D/9AQ3WGJz+19hOQ2mvo00mkhLoAwFDGi3ppjqjPODV7hugLH1EcLYAN2iZhgE0wH1WnQu9XQ84ZNPPidwTzHFJaYxZNx7RRD02N5Zx/WGHJ6+pO5Y2IZJZ6nO06e77N7fYXN7lecvXtFo2HQvBqwtbxBMBftvugQqwKnd8SE7p6cXPHnylJfPX3F/Z5fPP/0Mx5aMuheMJ2MmY4UONZZhEaoApUIsaeCNp6ytb3I5GLL76AOevTnGMhv4/pTxcESjtYL2FZaMiL2vPTQWSokkITF1TqlUQ0hkepH5VEZY8osgWQjF76OTsbJ9FRZYKv+L2FwRlcXVOQKYt2Hmho9t1YjYdJTeNjNZFNmRyuKqFxOibCS9zvybw0rPNJyr6E3RxJPd3LOgr7xutljryuOYUxSS91gcMNeXjs2D+fEWm12yeOq5rn8KcDun/WLJeQbzUvltQOvIYWwIM9rDYRCXrYiCKxOGoBH0ByMs26bmmPiBHxF0GWkOEXHPOpgT7Tpv49cqsgLk52Veh5wd7TR7yRkDYnw5KnYndHR+glCZPSVAaFAizhtQmcNxiA7yEbFaJHRATXiYxhjbOGajeUqD54ymf6JudlGBx6Dv4IcW/WGXweU5XtjiDwcTPv3ZCnZzGSUbBCiEDKg5ChX2eP36d7RqId3uBd3hgO2Hv0QLD4Gm3ZAEYRfUObaYIDzJpD/EtMe0VxuYVo17azVG40v2D14jpGZ9ZQtTSE4P93FHE5q9OiN/Qrc/YHlpjf1XF7x6fsTGxj263UNcT/Pq9XGldVCZKUxGAV//8RtCP8TtDtlcX8W0onhZ04ajkyOEbeKHCrNhE/ou09DDn4b4p31W7j9m+6O/wF++5OXLI1Zb29zbXOZw7xBbORhaglDImonreZhmDVPL2OEnopP4dLRopE6K4WWTpDIJMdmrhf0znxWcSKPF5K2C9KF1fLiSyLVBzJSR+fETJiajKo5EztNUA8r0XYT0lLNrFR2Vp4gL6YUoXihpWCDKuWsa0mzlRfpZoiWVM9W0lU4MbXmCP4f03LPMv9/sKGU9pd9ea/bKz/XCCK0Fo1/ddzUo9wcVieY1prnSz8nzl7wxrXMSdXTybQiEWHaUYzA7GEeitSIINYZdixPLBEJGpuVo+UUClilkTNwj57KUkUYQlbuOBCoVapLT4GbPWDziNk5nSL5TOtdeRDJWdC5BXARHqpngFT2bBqEJUQgtEUpgaYFQCiECDCvJYgZH+qzWPWrimJXmPg3rBZbcxxdHvNx/xou9E+qdTeqtHXoXYzrNDnVtIPwJdTlAaJdAR6ZoSwiU72PaEY1w/YAgBN8bMukfEzJkZWOJMBxzdv4d/niPx5tb9A8PWeq0CSyPS3+CNhWe8rFsi/X1NfbevEIqjedN2NpY58Db5/D4hNCo0ah1ILQRNNh7c06vG9LstBFSMJ5UK0ZfmSkEnou0aqxvbOK6Uw7PejTrFk5NY2tN6Hn4gWZpZZW9oz1W11Zo1JusLW8w9gVLGztY9Q6/+NVHbG6dMDy/YKOzwqB7zsnRa1berLL26BFeEM4XyYIiGS0hdldBmVlodk0kEvzC+9KWNzDrzwjnTMIV6Vc5s3uJirN4nDzxLjr1blsgrjodu2q+r2M8mWZXNqlipLljqDjkXZmPfgqQXTsJvmEYJZ/Ztj23RqSUUWkFTBzTinIWtIqdzyFJeLgUGeldK5SK9pdhGPH5CSFhGBFxvQCPGY6QnmJ2Bf5SWKRJZEZyxE5UJE8TxkflRkf4GkZUstswAyQTHFshZYBSHqtLinDyPYZ5znTykrbdp99/g2kp6rUay0vLCLPNoKepOSv0+mc0WgaGE3LR/x7DW8domGA2kBgIy0ToBtOJxcAdEXg1XFfz1//pb/jVb/4KLXocn75h5I4QyuL53gltw0aMzrGFS92qU2uuMZ2c0KxbCHzabQfHEZjSZDIMefzoQ5693Gc6lbw+OsQxHVZWlnDsGuNxQL8/xPU9HOeOD9lpt+o0W0v84le/4eS8x/ffP8OXIaPuOe2GoOU0OR/1ODs7o9lsM3Y9XDfksuviNJcZyVPM1S1GPjimyR+ffcORITg7PmZlfZPf/9bjM9Nk89FTVHgT4puHOWKXM13MF32rDjcltjMDkUi1mLgkcFEov7ZTZlpAlnkV+vgpEKNIuK3wcAU0s5JiMmtVHK3VCuKlmF2D03WmxooiSMHstRivWy/Gt4ashpCGf8bmI9u2c6ekJQ7kMAyxLAutjVyUUfq8WeVXBQihMeNCdgn4fhCft2DE486yoRfOR9b0mBlvxhAkKIlAow0NQqFEiNYhudIiEWIIU0fl+J2QuuWBd0boXmKLKQ3Dw7O/pnv5DFOd8OblK1aWbIK4QsPlxRAtbSaeRX9wwvq6ifB8+qM+gfGK7p7F6pZm9+HH2NSwDQPlCx7ufszBm2cEMmB9tYWQBhfnr9ne7qAZcdHzuX//Q/Skx3R6waC7x7ohaVqCll3H9Q329l6g1ATT0pyc7jMZTPn4wy/od10s2aI7mlAzGzTqNrvb2/h+gOM02T88ZW//BNf1Kq2N6ppC4LK8/JDdhw8YByaheMNkPKBu1LBNQehOube5wWA8ZuCOabZbTF2P0A9wx2N67h6eEDz64CNODo8xDcV4eMm9e8t0B12WzHUatoPyFBIz5z+oCqURFSkxhZmtu1hP5vrTraqND3PmJRKz18y2yhVREJmL5G3zCwjUFVLWVVDe9joiVTQplcO1xC41Ay0iwrN5u67fxERwHVTDqdp9NyXlVTKeb4DSLaHctBoR2TDOVFZYlpUS3CQbOfltGEbMRARBEj6alHYQM2OpEFGOgxAaKaNYfa0gCKLyN0IYCGFSxhDm98EMz2hO8gwhZQzJwEQaQWTWlbEZKfIBGhq0UhgGhMLFtjzQB3SaPQK9x/d/+ntWG2uMJ9+g/S7Hp8/ptEz2Dg5QyqTV3uYvf/lzLgcW372YcH4e8vLVCe0lk/bKDs9fvMEwl/nsiwZLrQC8HoQjarUAWxigpjgmbG04aGnT7NjYVg/D0Dj2JkptMfEDluoDHt9bxxE9HEuzutRifxTQbDj0+pecnZygleLJw09wJx5/+sM3nJ2OWF7bYTwe4hgGe/tvqNVq7Oy0ube9Ta8/5fvv9yqtlMpMYWWlw3fff43ZWMaur7Czu0O77jC42Of88DntmhNV5atZmHYLTylaNYehPwHfpd12MIMxo4tjxr1TmjUTq2kyHJxhWw1ev3nJp7/0kBos08BTibSY/M4mwySLlnShpGtobifFmyEm2JGZKHJwXQ3zNuckpC2thKqT4lmFliVdi3T8qO9cktu89YgsAU4iJ6LnnSfMV0XaRKbkbB33WWni4vPl8Z3fpLrkryowl1jEzBb8Q8Fd2flvk82cJbA3H/9uTGnJOzcMmYaURrhpfN+Py1XMyEEUeioIQ5Wek2AYBipUCBSmjPGK83LiZRaFs2Y0cqVDdKBQYWxakgKBGd0ar+VipNAi/MvzHxKZL4RkXWsBwsAUgjDUGEJG4fKhQkgwtYcpRhjhKaudC9T4GYPz37G7MURPBvTPXjMZX+K6Q/rdMVN3ilNrMHaP+eWDD5CW4OtvD/DcIaGyOT6a4k412/c+YmX1UzoNg7rVQxgD3OEpvufTcgweP66xv/eSi4sDVHjJZKhYXf4L7q1/xHhsMh5KOq016tYp5+ff4/ZesL7UxncboEOC0GM6dXFsi4+ffsL68j2++fMbLs+7oBz65z1CHTGPi/NzXG/KdKp5+OBjLk8v2d7YqrRWqmc0D7oIofj6z79jbf0+tXqT5soOx+MBSIPL4ZC61IwmA9zQZ2N7C4nEHUwQKD56uMNJv8fx6+8IAoX0DCaTIbV6nY2NDcYnI77+9nv+xfYTQp0PR0zWQdY0kcQ9a62vIHRQJiHNrguKxDAZYl5KjZlT1m16I/t9RmsoWfulPenkcRNcZ8ztqrHzDvIKbWZXZ7fdUrqtBOl7vU7juB6nYj/l2mJFvEo0uCyxKq+7tair6/G+DhIB4qa6Sbn9HYSQKKXwPC89+2A6naK1jvMMzDQz2TRNgiBMs40TxhYdhykjvwGzeUkijZJCdrZTQ2lFGCpmUWQxXjq88TNln20+81sjpZpJG0KiVFRu2xAaqTQ6DLBNhRQjLDlhpTNC+XuIyUv6Z7/j7PD3iMBlNGjQHfTo9S8YDPs8frzLwcEB9ZrL48cbvHzxHUcnfdxJiInCdQ06nV1EuMJ0KPngywc0rCnBZA9TnuG5BygZcjZ0cUyD5aVLOh2bsdtnMu3xj3/3msdP/mc+evS/oT80efP6G4z2AcI/Bn3K6fE+3a6DF7hIQ2HJGr4K8V1F73LI65dvaNTrjAYBGysrKB3SardZXdvg4rJHs7HKy+d7bG7cwzLuOCS1P+jTbHYY9C+YmAbhyOFEjxn0L5GWwfLKDhdHhwjpMAk0+yddarZDq7XEoNtl2OsRTie4gy6u6+KaUQlcpSWv946xO9u8PjjgSfeCZmcN07RutEiuN6Ekkm8UgnrdmpyTz0oo+U1oZMJoUgk5Z66iFJ9IG559mXOIVx1bJMLcXcjllanr9S3E7f1Gc/3ckZ2liHWZtnTbyke3wTP3vq9uyWJTXATJ0IYhkUZkPp1Op4CmVnMQQuB5U0zTSpmFlDJTeoLUv1CMAovMRRFjsON9O536Oaaawydj57/On5BahEpMRsnfgqjqkBJRNjRCYgiNUD5SuTQdgVMLEHIIdNlYDjHVa6TxgtPjv+eDeybTS82f/7RHsC9ptJq4nqZWX2Fp5T5bngE6QAoLiaZuSabDIYEruLf5kNFUg/ZZalmM+4f88fd/4uknLQLvGd3L17RbFhtrS0hMmjUPS0q0d4ChRmys1FlunWDq75GewZMdaNcl435AY63Dt998w8VJwMrKBs16m/FwiDvSDPshquahlUbIkHbT4YMH93A9H19LlLR4/HCXb755xnjkIoXBxtodF8TTUqB1QKsmcbuHTJTGH1+gDJPu2McFpNNGSIkwp/haEfgBSmsw6+wdHuPrACED6lZU8AqjDtgsr27z0S/+OUZnneZyB2HKsmoRpQumqDGUtCr5WzMLQS3fqAUDDzPTT/7a/JCFzRkjmjKESnRhZpvNahjRteJmv945erdmmiq9Xc9x81nP+WdceFvp+52vkX87JlF+8lyRoL2NrnQ7Tet2I5ZF8WgiCV0IgetOME0TaVmR/R+B7UTViH3fx7IsTNNM+0k18WSfifxYQkQ+h0iJjcJNERLDiBpG5qjsI8WlaPTVjCHRL4pt8olpgJYxSslpbgFCD2g4Y+rWmOn4GGlc0qiNqRsBFq84P/4H7i35HO7to5VHZ3mZ/nCCaRs0a8scHp/y5uCMWq2F744RwsGbTGjaLdY7gqVGi8++/DlHFyeMXFhZ9tDhIZ9/uokQxwRiyO6GQ6iGEAYY0gIdMum7mOEIoSZ8/NEyw8kfIZQ07A5nh8+pr/R5sNWk1zun0ZRc9lwuL3ucHJ6xtrKB8iwO33TxpsfYlsOD+/cgMPjwyS7/5e9/T3+kcBpr+H5Iq7FB4F1yuL+Hoaudc1Ld0RxqRoMBdSlxLEnohQz7F9RXN2m0l/jZL/6S48Nz6o0mbuiztbXJ13/4PcFoiBAWmIK15TZnJ68QQYjWBqY0EdLm4cMPefzhU3SjxQSDUEUq6vUbQmcYg04J72w5lROe6zbnjNEURyv8LSgQpPkYa510mGCRSIxzro9yJ+DsK13KVIpaUlk8+jyxKz5N2RO+DVzXd7yZs6ah0tsWmIuK34r8O6Bkeq+FVAG7nsHkrHMLBqrErCsgWZ2PFN958TmSQ6o0ge9jGPFRtjoyKwkh0CoqWWHZduQ7UyqPQGyCykpsEZ/QmbpIicnJRCPyDAPNrAAdOcvYlf6ElPgnXCWuWJruPxFrKqBlUvcgwLF8HKtH4D3HNk8I9THtpkTqPtPRM/zJ9/z5xWumoYk219jeuc/w+XPanQYhBiv+Ct9+/5xOu4M/GTO4uMASAatL21jCodZyMMSYTz9ex663ODt1GQ9f0++C1pfc26yj1JRnz/e4f38Fx2zS7V4yGQzx3QmPH28x7L3Aqm/QO59weSaoGZJ7G+ucnb7GqYe02w5Ly3XevDxDaoth7wADiT/ts7G+zC9+8XMC32XQ7XN6dohhSizH5PWbPTorPq7rEwaKB/cf0L24rLSSqmsK2sALFJZtILVEGQaYNoazwsQ3qDmbNJebjMYTzi9OCMJLhq4iVAarK1vUbAdPKcbTOp1aExWEjF2NN+nyv/yb/4WeEvzmf/uvidbhrFpiTkIoUFw9p06Ikt+LFttVO1IUNBVB5F5b1Drba5YZFXFOe8+TyBJnNXredKALEUllRCUJtsqNl9E4ks7L/J7FfZmjB3N9ZHGauzrXT5YRSSFQ82dfzuMD5CKwyjiHmBceir5ZIaC0HFMlyAoH82aaMsTvPmrohnfEUr1SKq1wCoLAT+oSGUgh0iMzBaCVjuz/hokWse9MzIQYFS3IKLNAhRhytscsa3Z+ciqYpKXssxAbeq5gdEV/QSR8RVpHqKJMIWkopFDRQTihBBEzIAOQCo3CRGPqgLoc0F66YDz8LVJ0udg/5lV3n8cPN2mtrdId+bz4dp/J9IT7j5q0G3UaTp3xdMD2RgNHbjEeak5HQwIbNneWcYMBy7v32N5e5/jga5Y7j9l//R37B138wGZj+xFaCDy1wdLSBhs7Gwwmp9TqGssOqa+1CKYG/UGX+pJk0rtg2O1zfDhke3uJNycHPHq4wuX5IYb0WW47rP/FxxCa7L0+ZtifcG97jXrdoTs4o16zCUTAyfCCy2mf/jjg1flLzN5rllpLPNh+zOGrU0bdOz557YNPvmTvzWsm7piJH9DpLCOxGPqCB08/5dVBpGo9ePCQ7d0n1Oomjc4yf/7jH3C1w9rqDv1uH1lbY+ANEcLBabe5t7lNc3WDre2HqMDANCymXoAsPdIxb0vMkVJdZspJNnRBkqwEc6R1/vIcAS6KPzrF7creS+1KRYa2QGoumDd0+bmhpfddZWaZZwJFhnszM03+EZM5mmenedCFT4u4YN4pU8YArpW4Sx+leFOiipZofhUg/54q3bIIsevv0jpXjnomwRspo0jw9ryImFu2nYaCR5J4Ifc+NhsJosqplmWlkUyzaKas7T+P05w2cM07ieZWobWK8hriTiPUNEkpjehM5CBiEkojUQgmtOs+Dj28wRuWHI+9gz08f0CrWcN1XUxhonEYjTXSEOzv7VOv2/j+FBX66UFqWxub9M+7fPnlp9j2GG0EtFbXONh7xmjQ5fl3A1qNZZQ3YGf3Y7Z2t1DCRhBycnLE0vIa337znMvzc7Rn8OXnHyMaNpNxj5pTx+15vHr5huEwpNXRtCYNBmML07LZ2tzB9y54/OQJb14esrqxFCUGN2qYBti1EAyXvnvKyxfPWFpbwW47fPz5LpZtMxm47B0/Y2Njm07zjn0K/7v/0/+Z599/z1d/+C2Xp1EihGHXWdu8z89/81eYTosvPcXScotGs8lkMiIMPD76/Bcc7h1gWw4r24rdx0/ZvbfJ0dERm1vbbN+7T72zTCBNRr6KzuVQBsj5zZcnJEVbcrXiaQtzGebaQZFozQmpJcLuPJMo9jP/ucwfUvYsVUwQVa0Npc93C6gWb79Im8i+37L7KowPZElXRNDmGext/Q7F5xOzkLC0n9tGZVW77+bzmziE04ihTESRZVlppdMkEkkIESesGfihSoWd2eqdMYoIZ4VTq8fOaT/uz8jgE7crzt0N5mmWmBb9LWNNXSuFlApDRtnS2ojeiWkEBIFEaJuabdK0J+jgiHqzh8GUcDxgOnI5ODml3TIQGyt8+vETumfP+fjpZxyeHPLB0yeEoUcYBnyw9ZC9vUNOvSF7F28wbU1/cEyzOaXVsfEnB9zbsLm/cZ/11XscHFwynixzenKIp0xq9Q5Tf0yzpbi/u4ZjtlFqws6jbUzTAa3Z2Npg/+CcP/zxBcOhwebWBu12m43NVSbjEULB5WmXk5MuZ5f/FafWpD+acG97A8uSSOkzDfvYpqSxpPnZLzdZXt2hN5xyct7FNk3a7SaBN+T08iXLzc1Kc189o3nnEY/rK2w9/pilZoPvvv0KIRze7J9weDngl3/5EUEoEaZAKU2zXiMMfNrrm2zdfwxaMJ0GeN6AVrPNvY9+ThS1IJkoRRjHMROCKXS6EPMrhYJZICOlidnhKHcCBXNViam+9JYURT2TlnIlLkrvu10ETbLHsn6UWQb3jHmW2fOTcVNieqcmjyKe7zbTuiAalE/1rYefJ7jXnd9w04CCq6HMZFUNEmYghEirnPp+pBWYppljCFprfN+LAkVSn0+ybpNKVVEJCzM+l9n3owzZJL8hIeRzvojMd9c+rcivz+g/BWGkCViGRsoAKTyQYXTwjTCiUvuiFmc2g1JjBqPXmMF3rNiXGITc29rh9d4RB28usaVBvzdha3uX49PnGNJm0OtxcXHKcDhlNHQ5Oryk5ixhyYCL8zEqHNNs2gg9Rk2ntJbXuDw/5VIrmvU202mP/mDEJAgIlOTjj58i0Xz/7SvWl7eZjLr4ns9kMsI0Q4ZnQ07PhkzGJr5ncXJ6ibQ09UZIGIwJ3IDTo0tCbeAGA2q1kA8/+JB6vcbR4SsUE4JgyO7uGsvrTVwPjk5fomWDldU6Upq8fPaaqT+g3uoQMqn0Diozhb///fd89PQD1jpLGGi++Mv/AT8I+eBzDVKipImOzpgj8ENCANPE8xXSdkALDNPGrtkE0kBJI2IKQXQeqhBgSIHnh1EGZMWNkC6i2zgXM5Df2AlBz9g1E29Bbm2XS8BRDZaE2IoMYyjgnBm7iqZQBtFt0eZdSIxKtPbFzubFcNU9i0KCFzOEhU6EhU3mEvOITUrXTlYFgjTj5tc3vUZTyAc8XKWJFsxjC4aukvOQjZRKEuUSBuA4TkZbMAiCMDq/N5b2Z4wjOkAnywSEiM5NBh05p0WUzay1ig/h0YRBiJASKaPkNEMacdWhcvwWQdkeEQBKI7TCFArHVGg9IPB7hHpKo9HCDyVK1zDNJiqAQPmEesLSis3g7ATtHnNx8D07Dz5geanN5fkBINl7c8DZ2YBet0+oFXt7lzx6+ABT9vnzH17h+/DB400aTQF6SLvT4skHO4xHF0yGE55/+xyloNlYotc/xQv6bG13WF5b5eJyyO5OBx0G7O/32bh3j3HdZmnJotc95OzskpPjS/b3x2jabGwuEWrN/sEJnaUdPvrwMZenl7x6fk57eYn+ZIDSijd75wS+h9ZT/LDPyoqDNOv0Bn1q9Q5n56coQtbXTQypefTwEZvLAZfHYy5ORwvnPguVmYLv+bx5s8+D3R1sE2zTQhhmdBqRHxD4LmbNQGkdaQs6KrMr7SgfQWuiY7CVjVKaMCG8poEKo2xEpRWGmZyFHDkQEzNHEg6XbPC7lDvLs6DzIAAh85tdFc6LTBlZ0ZeQs9Ik1Od2Joc5vNJuEsaQ1RCyDTNY6rmLdwYJUcoyiXnn4QJpvqSvbMNyAqwqLAYFxSKLJYy53JpWgctm+qua9FeZUVY0H2UPvDEMI01Qi2oVRfMWJaQFaK3jMxBm9wJRzHus7ZJYf7SKqpvGEkcYhrF5Sab+uoRZRJ0k9ZBul8E9//QSQ1rYhsaWPoboE4YnnB5/Q6NlY9Y3cSchhr2FadggbcJwjOn4BNojEHA5HIPlMBgNcScjfvmLz1leavDHP3zHxto9tjY2OO9esr6+Qq87RGKhAsl4MOX1qze02zU++9k225t1bLuO8ltcjgfc277P3v4+/cGAaaB48nSHZmeN49NL6g2X87MXCG2ytb6K703Z3l7DtFwMo4NlGbx5c0YYCizbpNcbIUwD1wXDbDCZBJycXdBZahGokFDB8HLIxekU27IxonQMao7F4f6QtfU13JEC5eC5mv6FS7NRxw0miNDm+OgCd1jtnVRmCs///Dssy6Jj/4aNzTUMy2Y8GWLXatQsA2VIPBVE1QiR8aKRBCokOZlGo5EapJZoQqIDcyRJxQmZmDru1NJwvfo9L80u2ohXmwkik5dKP2WZQ0oXBDfKwViMYwIlJ8YluzppniYLxe0W8I2b4rQIZgXWyrPMBYs0wXlfwLU4VTSv6JsyYnETM9DN4e3MTnkwDCMuMidTExEQJ59F5WBM08B1o4S0xFxUJrSLVKKfIdWoOXG9o/j0w0q+npun+RXzjZKIQxUGeMEYpzamZl+gxAF431ETS0x7Z1ycjLn/4AtqtQnDkUe7XUerCzrtBl6vTWgtoUKPw6MjWi2L7XurSBGwu7uK7/k0Gks4jXt0e5d4U5+PnnzAy+c9ljoSX0/ZvrfG/fvLrCzZ+FOPer1NKE5w3Ql2o0mIBqlpLzVotm2+f36JNAyaDcnB3gmBN2F1tY1hmITBBNOUCGEShiYbW0ucnPbZ2n7MeDwlUJrJOOTg8ITd+/c4PjxEY+H6mv5ll0HPRQcSw5Ts7qzz6dO/5OTsDUK1qdsGTScgnIzRnkG906I/6vHs22e0aktsLK1Ueg/Vo482l3j56hXj82OenR2xfW+H1Y1NwnDKaDxhaWkZoQx8BZEGKjGEkdJ4TeQ0kkohQ4VCooQgPnMJHZPTKPjz7mDeoVrprgV0Zl6jKNeGU2qc+Xzzp8pLmGV9FP0Fcz0U2sz6uM28FM0i0bV5wpa0jT6Xj1H0GN3a5xBHoVwFVbKCy8w58d2FT1X7uq5dopnM+p8n1PPrsGw+o0J2JkppPM9LTUZJ6QkhBMPhKK1fJASEoZrrS8S4Zw/FifBS+L4iCMJYKyjLIcoIHcyvk0WwOJN5pmULKbCEwrEnBN4bgul31OQebnePdnsTv3vCyLpk4+lTtHQJxib7+4e4Ky2aVhulWmxtNUANkQLaHRvP9ZHSYzQa02q1saWN63rc391gNLrkk8+2UAruP9jg8Pg7nHqAYQkIoT+c4GufgTuk3mzSHw2o1eq8fvOSqRdgGBatVhtDCpSa0llepb1kcHjwHGkojo9PefWiS7OxhOk0uWc5aOD07ILxxMc0fHZ2lrCtcza3l+j1hzx5soXyFb7bR5kWrfoKhnY4OejRaq1zenjKaDQk8KF3PkaKIePLLhN3woOdDXa2HnF23Kv0TiozhYNnfyJwXf70j39NqAVH65vcf/SYUCkmU4/1zS2W1nYwnCamYaG0xA9m5pTk1UsV/QDoWCLRsVg2O3jlqq13E+Ixs7Vf2apglsiHTsKiDV7uB5gV7qvSxyIoEqliJde8GahsjCLRSTZtTHi0LKB0c6ZVRsizl6QUc9dizClGA5X3d/3cVT0xuZrUOl8td45oVVp+V9vOk3cX8Y4rVnoFHwOQFq4LggAgNRklEUa+72OaJrZtE4YBUpoYhpxFCaUDzgSy7PXoxDUdFbNLL+fNevl3lKyzq9/NfDh1/p6khIVpSkypsS0Pz7sk9PYhPOSDx4/o986wOeHxRgev+18Y9gc8e3NGq3Uf39qltbVKrbFOp+HTrztsbrbwgjEIn8GwR7c3Jgj3GI0FhhVycDjg0cNHHJ70sKwaH368wcrGkG7/JeORjUGdWq2JG3o4DYsAH2mZ+CpAKc1kMsW2JOeTLv2zkKWlJvWa4Pxij2bT5vvvXnN82KfV7LCzs8P+8SFC2nQvu2gktuNwcTlgd3cV13WRqzVWVpv4voHtGKAVNdtibXWZvdevOT09ZqnTpNVy6Cw1cccj6o6NY0lsG1ZXWggt6F4csbpyx5rC6d4fQQpMvUKr3WZyes6xe0CzvURvOuHbr/+aX/3V/54Hjz9HxbXWJcSJMKSLXwuBMqITyBJTixQyNr8LoiomGp3LrordVgIWn7lcIII6S1DyC+86qTRiVCGRuTqOgxbMOc+yh38k9tm5KS1IzNdFIs1u00SHlMwY5eze4gbM4z5jGItMYqV2g3kcCnYuXZTvi6jMbix8UW5CKnR+jZ29bLBEgNCF1lVMU2VtihJwuWkqPwfxu8nekr8y/1knT1PQBEpQUgufexbAoJQiDIIoMsg0Y4lfEPgeoVKYhoFhmpEpVwpCFZe6jiOpEiat0egwOppSEp3XnDCEaK1LlI6Mw1k/0Tzxj4W6rJ9GJ3spmjNB5HdUJPsoui7jpTOrtKoIcVH4+IFP6HuYwseWfY72/oEPHn3M7uoWw8vnHB3vczGZEnrQuxxxfvgduxv/PffvtfDcQ+qOwdHREVtbq1xeDOj1PXp9he9PaXVaPHj0kPF4yGTq01ly+PDpBu7khKnbpVl3ODo4J/QMNra26Cx18H3F69dnBAFsbq0xHp8RBgJfa8Yjn9UOeP6UV3svqDclYSgxRcDqkoUfar757mssx8GpmdSaBv6ly8TXWEaT3/1hn1ZNM+j5fPj0A169PuH5i0tGY0V91ebFyz3CQNFoWFhOAykcpiODwBMsr3QYTnrUaw2kKVDTMbbjs77eml9gJVCZKZhqguPYhO4pl+NjPnz0iKYxpi4spnqCNzzjxbNv2HnwGZZt4wUhQprxuogJIjplElpo0LFuIOI6KJlIooWEM1FtKZgFogszCTvdzyLbYOHzlUZ4xDgiok0UjTc7tnNGrhLCUEohF4yYNeWUqeJ65vy8kpHMM4WsmWeekZRT8pQnJxgsJNAl9+akzcUmgXmMM3Neoj2kry/3/CUEMsd/SnBMNdW8qaYcsyIzK0jlYl4uyWeHL3j20mVwvZ6THsuaGR9mtnelNaEfYppG6suJ8gemGIaBHSeYKV2YXw1KRxVMNdG5B4Y0kKaVCk45AUYkuQrJBMweqJwp6Px6yjxFutYKimByPWHNyWeNwDCaqLANepW6s01z5wl//sO/Zakp2N7YQKkTGo0JVrvOhrnM198eYZgO/f4b6tJB0gflM52MkWIdy6yhlMQ0HXxPs9TpMB4N2d3d4be//ROj0YCNDcnK8jY6UIwnEzqtZWyjw9HxMdQkrdYyluEg0YwHLoYwMKXkwe4DXr86wjAFyIDt3Q18f0TNNKjf28UQDuddl+evT/Gn8MknT/n62++xDE17vYPQNt7YpnfZ43u3z6B/xGg8xDZbhGqAaZs02xpD1lBhyPlFj1a9g0QSag+zJmm02xydHNNwHL787BGdpmBtpXHNaovXWKVWwM7aFiIIGfW6GPhMx31Gg0v237xEByGPdh/ge0F8XF9ciyQ5Dk8Uzu7N0v4sXCHAzy+8ebPQfMZkdpPfzmaddXpJIdLFmkh66YjxgePVrBRZk86iH8huSMT88y4iQEUVvHwsCn9Xwft6Dee2UMZIss7OSLJ9Z8Mzo1LFOZpvll0DM6n/6p/5tbOYzWcxEoXxImIdZflOp1PCIMB2rLSiqZQyDTGdq3JagpOUEkMamHH2c5KdnPwUHb+JBC+uex9Ff4UQ+Z90TpJ5ia9l/haA0BLtmRA0CMMVDOsxWj7g+EyztrHL/v4L+oOXhPKMzXsOrY7A9S6xbY3jCLq9U0aTSxQT9vZeMOh1mYz6oAKWO21sE3q9MZOxjyFgqVVnY7WD0D7uyKPVaNGoNVlZWqd/6SK0wfraBrZhMOj1UYHClAbueELNafPkg4cEvsvqSo2lVUFn1WRlvYVhmSgd8viDR2xsbDMaerhjn+HI5+uvniGVQU0YbHWahKMB3nCI8hWSVVqNdRyzxbA/5qOnu0ipQQRsbKzy2WefoZXBweE5R2ddlDCZTEN2dx6ysbbFg51ddncesLmxhW3fcenswzenuMrFaliMxx5Hp2csNZcJlM3p6JRf/It/xc8e/xq75iAMgfY10XF4BjlprmDRyJK/rApeLVO2mr9gXpq+vu+suScJjUxiqGYYk1FKdMwIM98t7DvfpFRqnZO4i7J0PGoFu3NpeKAgI/KWd1LKiIto3pCvXAXXl0FIVklW5rxd6GP5GtCFNiUtCmqCWNhX8b5Fp+tdjZPQ5BzpMy020hpM08C2DHw/yB3kk/gVsuUnZKH/MA4dTqJ+ZqYi4iS2eEZi1TMh5umqiTXz0scvuTgXWQTIeH+mTCJjWhMaDKExZVSuO9ASpE2jvs792qd888c/IYyQlZUaUz/g5ZuX9MYGytjAdcfowOOr8wOm4zZffvGAleUmjt1iOh4zHk/Z2lhj0FWwOuCjD7exHcV4dMnZ6REPdtb58uefYJsyxsegUa9zcLDP5vYm3fMJn3z6IZOVgO++f0P3vEfQ8jF0gGM7tJs1Gss29Y6NEj7TYITwA46PjqhZy4SBotNZwvVCxqMR3bMpTUdw/vqIjtNmdW2V569PkDToX4acHPUwMLi3dQ8hQw4Pjvjo4yfsv4mS7TxfM3Bd7j3cBunx9dfPmAyHnB6c8rOPnzJ1pxzsvy55UfNQmSl4YR1Xw9j1qTUddGhxb+0BrfYmk9BkEtSpNZfwgwBpWiDAskx8P7OwkvWSXTtUpynlzuCbQfWY8Xz7efKYZQAZc1Ml5avIzBbFdVfzPywcpcBU5jizgEX9lxK6olOlDHSGbV5DLK+rJKpTiT2e46TGkc5cy/STJXBz77TKNOr0nwShuduMktdb5VC1sryIKks4b7+P/okYQIjtOAgEnhcgBCkDsG2LMFSpOSlhFlIUDqgRhXIYWs8xDpEOPCPW2WeaaXNXP3MZQ0geSiZ9kWgJOq50CmiNFAGaMdq4QJhnhPKcgCH1Zp2WuUW/5zHxPC4vXc57grF3zP7eiIcPHjCc9JFGi97wFK09Ls76LK90sAyJRLO2Usc2QQqXVtNhPB6yvASPP1il2YQwHOO6A1ZW1hEyxLIlw1Gf3Z01GrU6IlSsLi0z7LnowMQybCbjCRN3jDJbDKdjAuFxenzMSsuhIWxW2ytYlk8YDlle7bC+usuLrw4wfUnDahH6khd7Z6gpnJ306F/2GQ8HODWT+/d2EYbPwf4R3337jMPDc0bjKZ4v6KwuEyqDyTRgojxGvQHKV/z7//A3fPThPS7O+xVW3A2Ywme/+pc4nRqH3SOmoUut5nA4mNLUmuXNLR5+9muEVccyTQIdxTOHishZRVxfnQzJzG64orPtlgT/XZZSSBFLx5CxDyNSerMqcE4ZKlGjy/tedC0igJFEnifsVR5Xa42QCfEs9vH2sOhpss8p5Sy2vfiOrsz0zplyinOQXJufhFutg5gh5DAoke5VONfq3YLQKBViyKi6aBhGh9GblhnnIEScWimdmouEkLOzC5gnzkmZi+S8hCSoQ2a0g3QO4+kuvpnCRC1CvhSH9HOilUCqIYCOmUT8llWA1iMM84Jm64TT079DNC8xgmNqNYtW7R77+3v0ej4XPU1/pAl0yNrKKpZpsLGxwtpGh/OLQ+oSNja38P0po9EEgWY6nbKxsYzrDtjc2uL4eECjFYLs0x8Imq0WjVYNw5IoqRm4Q1bqKwyHHgf731CrOYxGmu3tXY4OT3j96py/+OVHvH7zGh3a+K7HVPkYUmIbEkKX4eiUzQ2L4cRHiC4rS9s0P/0Af1DDFk32Xu/z4QePaZ0PeL53wsT1MIXJg50HPLr/iO+efcXnn33Kf/gPf81kEr1Ly66xsb2FXbMJwwntVoeTg0t+/Refs7uzzv7RK6bTsNKSq57RLG2WljfQrstf/vpXfPjhhwx6I5rNZYa+Qto2hmWBkBBqAhWgfB/DsuJ4XZ2qiFplZeyiNHt7WJzgdfv+EpiT68TMmJOc5iZi9Te/YRZpJjfDM9pDWUl/cVmLvE8BsqXIZ/kDCW4ZhnGd2asioklV5RkjmMctiY4q93/E32nigIRknlNsQS/WyKTMf6cp0Rzm8M4SpvzdxYZ3xRIqrQClU7zCUKHCEMu0YgIe7aswDDEMI81MDsNwbl6lENH5yhosM8o1iM5TjlfTTL0je+jsTB+e6QkVDi5c4CNacFfGbBTbTdOvpCGQQmMYIwLvDdp7weujr6lLj3A05sz1ePLoQ1ZWFEv9S169OaA/9hDS5PT4GN8f02oJHj1cRvgu7aU1zs/PMG3B1NVI02F5bY3J9JzesEu9bfPwySbtTgvfD3m5t0+zUac3HDPxfEy7TojJ2J3gBVPODqaYhsl0arO3P2F7u8H5xYhmaxmJyWQ4RFgGnUaTR/fv4Q8GrC81EVox2WhyORziuV2E2OD1m9f4E8Gvfv0rtNSc//XfYTLl4YOHjEdDXn5/yP/j//7/YjDq0l5ugrYIQz8SBmzJ0dE+h6HHxmqb7tk5zWaDN3vHvN47QOkR61uda95aBJWZwtSfcHZxToiBMmpoq0VzqYk0TGqOYqoUfqiRItISpBFlMwfeFMt2kEZ0mppK2ECGvqXELRWOqxHMMpW83NlMod3N+k3wNHJbRMSHf8zczXHOXu7ZyscqMAUBcwlYGvJhLgJkQiBnhPVaE03SkDgyS2aHzmZDV4P5eqOF8eLurjYN6Uq4R/OSiIzxhVR0zUiyOpmPvBkpM1wlJ7Ws4J+Y9fOWrKEiTkJGTM734+J1lkk0fyFKRU5nQ8qcQFQWJpr4C5Jy2r4f5M05RUFmbhtl21bx48zjMBNEii1JmVHCeGRmnqVhsdTpcPz6DNPzsaYK2zQ4703xg5A//PlbPv/sI3r9AesbK3jHp+ztHWFIEzA5PhxydHDKckvQ705otZo02xsMBuf4YcjLV/usb9UYTVzGkzMsC0bjMdOpYnllmYuLMb/97RsMU2FZku7ApV5r0B967Nz/kLOTS3qDEaYFrfYqw4lHq2lxuP8GH49f/7OfMR6dQRig/ZDV9gqt+go6NNDBHr3TLpNRwFmvT61eZ//iWyzLRBouOxvL/PKLz/nPf/23GEJwuHfGZX+AdTTAciTStDAMhVMzMB2DwFcEwYQwdHFqLbSQTAMfDJPzoVfhvd0kee3wJVzU+ORXf8nu7g5aRfLE1A8RtkDhY5l1VBi9ZNMw0Aj8ICQIQ0zDREiiOOgCrROx6JHG+pdKbIWFVHVzFhbiTaT0MkdrbvyC1C2KGz2RmOf2UIFgFvrOD5LgneCTJeLzpqCyBLDEXyGEjJKWVPkhO1dC/E6uJ2Q61TrK2pZpNyKezPnrkBVN84wkVRvKjW9FpnTtitJzDG/O1MVbs4IsQtfjFEcahUGAQER7SmukkATx+QimaaCFTE0+swifIuLR0ZhSCMIgqRc1e6KkfZp6WeJPmfnXKqgKJUwhO046CZpIM4kP5cnv6yhHKFQG42GIEdTZWX7C5WTCwZvvONg7w2pI/upf/Ya1lRbfPhuz83AX0wk4Pj5CKYFjdeh1p5hScG9jFSEdQmXz4sUhjfoS40kP0wan3ooOK5UWp+cnCCGxnTbTYMrJ6ZBABTh2i+F4jG06PP3kM37/uz8z9aYMRlO0cpi4gu++f83ugzX6gyk12+XDJ1sY0qfTrDEZDLCV5OX3b7CsFgevzmi1VlHTkMuxhzYDlrZsrI5H0zEQYooILZ59+y2jwRChJecnI5Q0kNLA832EGbC+uYoioOaYmPU6w0Gfx48eIIWDOw1YbW9wOTxHieC6lwbcgCnYWnFvd5ufffwhnVaTUIGW0QJTQiDMqE67Tja3jhxINdvA8zIJMJmtkFuX8W8dL5K32X5ZiWlml14Mi8wwJS3jRSuTT6Rm7mRMnd/suqgtiKhceBanmTRPfjPmcJoRwez1ebSLDDAaK6KvkvmIo8QgU5DqEnQKA82UuUT7yNwnZs87o90lhFUsYupFbSl55kQjS4SGPGOIrudvnzf9JWn0JcMSCSY6HnMmFcex/Nkb0qV59YLJpdCUPFbZ7Ym9PyHsSqm4ammUlBYbAKNKAFJGTmERmZFm+yuRsnVcmBCkIZFItE5CxTWGacyilTLTnExjukJLcRXXPX7yRGl7UVx2cdfIJFcpWjiJrqDj96oQSCxQHVZXP0Z5f+bl6y5q2uL0WPGzX97n5PSIYV+ytFzj6HAPy7JxbFjuLHNxMSGcBqxtrvPo4S6djsFo4OJOLri4OMX3pnRWHGoNh0bLZjTpIq0m/f4IK1CcvNzn4nxKzWlzejqg02mCNnn14oRgKvnm9SsMw2Yw6OLUTZyaxcraMpeXp3zwdJMHDzYwpMQbC6aez9raFmfHfb7+8yuseovuKRjmEu7kHK1rbO9s0WwpTg9PECgMTJbbG9TqXRwBWg7QUjANPGzLJkQxno6oNw38YAKGxerqEucXl3z04ae83nvFB/c2WQ0VnZV6lZdWnSkwnTDtd9l/+R2rW+tYjQ6hMgjCAMOuEwYaLZg5uLSOIicUmEaUCalj+2h+VWSrjQqkIaL9ezdFFiklmwWCUG7KmP+cZQiQ8V/o2d9zlm5deJSUSWSkrQzjSDM55wX+UrzK92ZRbU8ONRfzTCrppWBuSXrJvq/EUhMrc0R1hwpMsCgB6yhDNvozw/jmTH9lT5HVihLCn2UU5dK2yF3X+bGTNgl3yj2dUULvCv6JmOBeZfqae3UlTcsYQ3RYTTQ3SY6ANCLibVgGYaDw/BCtg9RRLKVAB9FeS5iVVgqtw6gvCeiQUGt0kgVgyLx1Uszk8uTazLBYoseW7pfyiUiazbee7Rchkw2v0FqSZE8LHdETRYjrSSx7lb0jF9/c5PTwCNP+EG/a5JOPn3Jy9B2h72FZJnt7B3z68YcM+lPGI49Bb4TvT5i6fY5HA2yzw9bGJvv7xyhDsrm5wspqi1bLwKk95He/+4peX9HrnzEaBYzHBo8ftTk77YPyWFmq8eLbE3q9MabpoJXgX//rf8mbvT3Ozs9RGn75658TeK8Y9AastdcYn4+4v/GYk8NLvvnmkIsLFz+Y0O9OULqGrLWYonjx/JiPPt7k4nxIs7WMPzD47tlrkA6r6y0Gbp/JdMrK+jL1pkRaFqYt8fxLvvzFp5yenPHm5SnDQcDzl3/m8Qc7dFo+pmOwvmlf/864AVM4O3zNef+Ub159y1nvnM9/8Zesb+xiGw4q8HEMA60FWpJqDCKNQRbxUXqg4nhoEBBvgJwZJhF1VDm5uynMOZ8XreWiVE7xc+JPyKg2IvtNfG2OvhZkcJEE3WUvLdKMSjjonBa1uI1Iucv1GziLU2JiTtCaOSHjZ00l/RK7MVSb70qwSKzPSO0ZDpZXPgpMoNBzWU5ENF1JSGv5nTLDNBY7+q8XpFPtJtu3lHGfKtUYpIxKUwdBSBCE1Ot2Gs2lNbO9lDCEjAYnpQFCE4ZBdPa5zGuouXkQeRac+zvDwER64XpImUpJ8+g7hSSqnCxSDTD6kUIjhCJUAYapsawaY9fEbD2hve7gTU8RxhmWPeX77wfABBX6rK52WPfW6PdHGJaJZWt2H6zQ6/YJ/CXqto3Wiovzc8ajPrWGwycff0CraYKecH52hm01mYwHONYyb85OcGoOg/4YMKjXOhzsdZFaMxkHrK50+NkXTzk5uST0TQ7eDDk9GfLm9T6b65rHu5s82drGWG0xuBgzuJiiPJNOa4U3+6cEvoXG5mhvwNaDbfoXiqODKbazwXikeXO0D7TprKwgTcn//H/8n/jt73/HykYdL5gCJheXJ3z+s49oNOHezhIPHmzwpz9+h20Z7Ow6rKxa9AddvMkdl85u2SHTcEzNbtI0FH/zH/4N4PDL3/wVH/38S3rTKdpwCHXE5QG01KDyKi1SE2qiMDidZGFGYyidmAeYi5deHD1yPdErc0jnv4+7yIh0OiU4sx2hspnIWuY2v05somWSa0EIr7anFqkKRUKfv16ehzHfe6UEvsLvLDFMvpl3HuqCcF2JC5fipNPaT7PnLTNXpYx4Ea8s6XuhvTs5cH7BeiuuttJprMgIi/caRpSDEIZhrAVEe2M69RFCYJpRFNl0GqQmJikkSFBhfO6BFLEzORbO4pPJhCHm5KxcpnL6dxyemjCBzPLK/n3tsyURY0TEvty/pCPGoGW0nxSp01tojZAhQiqEDPH8KZ3lTexmjaXlj7l3z2OlaTDo/YlG8xWX3UueP39Fb9RlqdNiZ3eb09NzfvGrz/jD7/+IMH0MaSBUiGVInj55xPpan937G7SbJpYZcnZxwvHREWdnASgDP4CnT56AgMB3+fCDR4z6Ex7c36J3MWQ6GTMcjPnP//EfWVtb4eSsR6vZQpoBvufx7Gufmgr5rfeShrRZbna4v72CDtocnfdZXbGoba3Q7YYMRucML0yCwZSLiyOefvQAZBTV6bo+570Lpsriz1+HPHq8yfp2hxcvn/OH339Fs2nTvRD0hiO++PxDpBHy3/339+i0m5wcnxAEYzptE8esVsCiMlPYXGuhnRqX/oS9Z3+is7xFt9fnd3/7/2Njc5XG6ipuKNCYsRpqRMXvlJ695NTcIAlQM9lbhWgkQs6yJovx9IujeGZEe5ETs3KYak4knt2TU7UzV3XZSi9jOIX7UikxQ21T4Tcr/ZZKykljnf5kTU/RmFfPW/mcZBiKLuz7EmK7KJJkxigyt5QioXOEe/FUzuYiEjZK1oVINKPitSz611GyOGxaJGUcxALc4+vo3OspTtHc8+QfN99j/D58P6lyaqbjBH6IISJCr1RkJrIMM9IQ4qWgVIgQ0X0z81OQ4q5jhBYJAllBZmYczTxb+s4zGto105nj3YK8bpx2E1nNBTMfRoqLiPaXlIDysC3N0B1hmjWkXadua0x7yFZrg/29P3LZ67G6voHnTzk8Omc02sMQFu32Ej/7/GO6l+e0mg6TXhdhGQg7YH2tTX9wimkPabZXOTk+4PT0nDBssrtzn8OjMSsrq/zpT1/jTsaE2/dYardZX13CGx4gdYAhwTZbnJ+OWO6sg/Aiuudp/GnI4ZsRTZb5/Dd/wctnL7FNg0HXR+omyofdx5+hVI/trTVeH5wwGYQsra3y5Rf/I//+3/2vTAOFsB1CrTjvXVJvG/j7I8ZuE0KfZt3EFCHbW2usbK4BA2o1je+dEoQOqysGk1GXrc0POT25uPqlxVC9SurJPrJRZyoN3OkYph6/+MWvOTz7//P2Xz+WJHmeL/Yxc3m0inNCR6SurCxdLaZ79MzO7B3uYi+IfSBBgG984D/Af4MPfCL4RoAEL0CCuMt7L1dyOLMzPaJFdemq1JmhxdHStRkf/KgQWRU920sDMk8cP+5m5u5mP/H9qS5PPvtH1m7fZm33IUGSIA2LUCkQqXFM6NSFTmuF0gIpTUxTEoQRcZJgmGaqSYiFkHaduv9PiUG42TUzAnuBMnN1qy8X0Jn1/T0i6rVzum4Gl7nZMhV502a+NOYbCfU/pV1kijPCutCOfgvjfB9huQK+zSCGZc3huoywl9tN10D6+d1LZnH3NyjTcOnSxToTKTVP+zLkPMupmEJIaQxCAlpN1/7M3pDOYVHzIF2XhmFMNQmI4ySFi4RcYt5X3XTn7cIjviTUzGAsIaZLUrMwKi2f+AYBYan7uRF9+j1BEmmFtFIbimUIICGOVerCLhS20NgiZjA6IVd2kYZFEPgIEeBHTTBOcNwRuXyGcqnCV199jRQOUie0Wz2ap5/z4MEt6vUsg36XcDxBqQmZbI56o0aU2GzfatBs7WEYBmurDRBZbLtAtxfy+tVrVCSROsP+q1PyuR4mBlqZGBLCScIoGCGEoN8d4Ice5XKOJJH4nqQnYoaliP/w7/+aaDzh/v23qZTXMX0wHQMvdCiUN4mUR6awya8+/RXexOC/+7/9vxmPWwghyGUcDNPAdrKMhkMmE4VtTbO75nKsr6+Ty7pIMaJccpFyhM5pQr+HxMYSNr1mi0n/t1yjWUsIogC3mCeMQ1on+/xqOKBaX+Pls28pv9zgz/5llmpjmwRBogU6dbImLa0mmAfbqBgpDUwzDbaJkllq7KnqyFQtXh5fX6/yXz4Hvv+86+Gk2T9IfWcXYvwFG8B0U6Y/LbSUubj4nUNP8d/LouSlbpgNMf9bzSG55R+vo4XiSkdvut/Lben+Lxxb7lumni//RIZwMaDqYt9vIuziokEjhZQuBltc6es6R4Kbze/GJ85mdP3P1zzKlImIpW9gmWmaeM8LplK+nGtDURSBENiWlb6ZaZzBLD2GgDnsY5ppbYw0FgEuwogzu514MxebP3t9bQXZ2bJemKhu8v4X2vtFrWHJBiJm7zRGSA9paLSKyWRtgkhjmg5J4GPZI8r5EabZxLZMpD/E0AO80QsyJQ+lDlBqTKc9olws0Trr4o8VQpm8fX+XB/c3efXqMWu1EhuNBs+fv+D1wT7VRhHD0viBx8QLEMIil7c4OxvR7fQ4PvZAF8i4ZQ7PTnFtm1E/5qsvXqKSgMC3iOOQYqmA49h0uj5Cm9hWlljEmDYMxyF7+8fc21pla3ubarXOyJe4wgHbZugZnLX6hJFmY3OHO3fu8+TZVxRKFklkksnaFPJFhoM+w8kYTYzjSM7P+6gpyTw8PmNj5xa2ZTEaDtjeLuLaCUZRo2OLrL3CZCAoZ6wbvLffgCkYmQxCCoIwpL6yQjAO0P6IoxePsfJ52ocv+Yf/z//ET/7wz3GKNZz8CoGSiCmcNF8UM5UAnXoaaYFQCWpmIENeMD4vFtOlJfeG/Xt1vd5kAV+UPtND12gBYibRT4/Ji/MQ0wlclOCvimRz7Hp2ln4TQZqONcfFrnKdC5t3IZJd8+vFdtXQuswU069i7l2VHpyPfmHuV3GSm9lMrsFXrj22OFfP5q2Xr78BlnGj9hv0c4PTrtyJXqS9nn1XKi2Mk8m4c20gjmPiOMYwjBRGUtP3ohd1I8Rco0jXhppGJ6cOHjONYJkBiasQzpuewLLQw2LFpf/0nJ5fvfb6vsXy+bO/pwcMAZIYQ4+xjAGW8IlVQL8dkM3XkaqEaYZYRg8dv0RGTZQfUjZNkrBFYcXj6OhbfH9MrZJlNPQIJoq3Htzlaz9lAivlPINOi9VamXKlzGg0IlPMsr/fZ+yNeXj7NkfH+/SGHmCTy5TxJxPOT4dEgUUURKhkRKHgUquWuX9/jVZzH0M6qCTmxctTfvTjHcqVKv/5r3/NcCBIVEKlVsLMOLTPTsmWXN7/6BH1YpXxWOEpyauXJ2TLG/QnitgwabbOOO828T0PlcQcH7TI5k1u7dxm2O/hj30MU2BaDquNVfqjDqYFmYwLMmE8CcnlFLdu7TAenBMGMcVsDkvmCEbgiDzC/C17HwWJQmmwXZtWs8laZYVSNc9wOMYq5Djt9Xj95EukhrEyWdl+wEc//ROEZcxQdCDNg6NmLpwidUc1TAOh9JJr6ncp/Jdhgzef8/3nzc5ZJu7TTSSnBG9Z+1he3bOP5XNYkqa+Y6zLPO96W8ilY8s7cWlOF8wLy+fq34xYLs9pTlL04rdldnhDeXqp78t1fa/TCL6vZrFYnLNs+7gBrHjd873mrGvndW1fNzjnSmyCnpaTmc5XJSnEY1smaLXwJNIK17EXtZbVAnK5aBBOP1WSpA4QU0FDsKwRLDOIG2RsWrp09v5nzOCire/aS699DsufaH3hmNAJkghTjHGMPjrpMBy0yDlllK9xCxaGCIiCY4LgKcI64nT/JeHYo5iXONtF8raHjWQ4aXF6eo6KTBzD5fbOJmsrdb744ktyuYRGI8/p2Rmbm2tEKuDHP11DWppXe/vEKsRxs7iuQ/t8xKATEXlFksDg7KzL2lqdf/bnH6FUj92dMisrWzx/9hSlBA/eK1Ct5hkMB/zwJ9s8f9pmOIgQhkKZMdXVErfubjAYd6hXypw1zxj6WTK5ItLMcHC2T75YoFwvsf/6ObVKBdfd5tWLEH8yon3eY2OtThz4BNGYXLGE72l8T6G8mPFEM5r0GI1b/Okf3WLv1Qm5LAx7HhMzoZixyFo5Xr4+JA4E/OvvWwS/AVNwTAs/ClBhgCkkKg7xx2PQCXv7e9Q2tzhtj/jy668w81V84XKr02JlfXdKtBZ53ZVKmIfaiKWEaSJVj5VOQ1aWszourSxAXFcojOu2682gg6XNo6dFTfSiStQMrJmHgS1Jqgu7wixHzawvNf39sorzxhlcOm2Wf+ba7TYf+7IBb+a9xdI9XJbi3uShtOzbvyyVX2YvYv6/vqb369ol7eGaC6Zv9dJBMQ8TXMxLzM9GTAXpG3X+29AmbqhPXNYeIY3RmcVrqFQjNo30+SqVoDUYhsS0UpfJOIowpZF68M0Hnb5NnTIZrTSJUmgppk4aMq0/sAwdzd/W9R5AV+9stg8WWuFsHS6W1aWAPnHNu0PN11D6mdpBtEowDImUCqFGGKqHIwa4RhctO8j4jJcvn1As7lJ0DQpFjYpbqKiNpVtU8h7aihFE6CT1Jjo4PGEYBZRKFc6POsSRx4Nbd4lCj4cPNigWTLJ5i0hrTNskX9ogDANOTk8ZDCfECja36jQ7Q4IxxLFN4IepbUIoHrxVJeEMaQ7AhO6wRb6SOsp4YQjWGG0EjIMeP/m9t/nys1dMJhE726uU8xbbKyXe2t6lczKi2Zmwd9xh9/7HnLR7lMpF3FyO/ddPqa+uYBmCKPTJ5zPUV0qsNxrkMg5SSyxhEow8SqUqnl2iNx5iChhPFO9v7LK+vovUHv64i2Os4IoMWbPBi28P6LSHOM7NiuzcHD6KwdICFSbYGROJoD8YIdwcwi3S8TWRXcALoegWSOQ0W6oEqafucLPyllMsVWvAYG5Ek9JIk34pleZIEmk1NjWl7LOFN9/jyZs26GXCcBOCsGRH0NPvl66W+rKdYzbOZbhGLF1/cS6amzEqoWeM701z1+kzmklf09NmaMNsk+v57p72dUEjuu744rfl4Rdml5mHjr7m7tL3q/SC8VxI1bx01oVpveEuxdQzZXkS171ZPcclrjc6X1akZldesLveVP25oXV5hvcy1QyFkcKoiUptIqYl5/EIM7kiDSqb1uUQkKDmGurF9B5TxiCZGoFN0mj1+Z1d+0C/R59K/7+caFAsKcPT/+b7bzaMuAxMpVHT6Tueum4LncJFUqB1gJQRBifE3iuyWQuVnKFVh6wTsL6S5eT4BX5dkc+E5DNnROMxthTIrIUXj9EIztp9TMcmEgnHzSHdgcbSkCQhphlRLrigFKNxh7EvkI5FJueglMd4MiBJAmzLxB9FdDoeg/EIUziMJxGdTkil5FAt53EyE5yspFA0UHLCcDgmDAMGwzFjX7G+vsU4iIm15uuvH3P71l0OD9oIYkJvTJxIDk73OTuK6E9ga/dtmp0OfuwRRSYZnaFeb3B6coBtCRIVsLpWQyAIw4heFFKr1jk8eEoha2MKyGartIYKS1oIPcS1C3gTg0qxRqQFppQQWLR6kpdPJwhhIozfcu6jYWiSGAYJMUEgSAxJJlulPfLYfvtd7r//Ees7t5kECX6sKa+sUajUSPTStpZLarVeggOm5H4mWUhpoLQmTpKlylFyoTrPsNVrZ3p56d+svsHljwXBv0CWvrctQ2XwBuhC8L3E5U06wmKc6f8L69+i6wv9iBkduXT190jX101oyhneFNg1H1EsC+bXnCPEwl7xPUNettBc7i1VZm7yvK9ed6WfGzGGm5wk5rE3s3FmMQjp36lQlKaxSOY1DeZR8dM1f22Cx/l8F8ev0ftuciNvnPt3qLMLiWzOo968SoVYClzVMv1UGtMwQUU4VsjYP2A4BNcdcnbyhKxTxJ8kBMEIQ+YYDts4RkC/e0Y0OWd7rYKnDV6+2Ke2ukLRdmis1Dlr+oxHEyKdMDAFRycHGDJC6YjxcESiYffubUxHI6VJNldE6wEnJ2PaLZ+JB7brMPEiosTi4x/tEnoBO7fusrEDhtMmDPsM+wGTUYjt2KAMQj/AmyS8enGKITOYdoYwUZy1hmRzOYToUcy5vHp+younI8rFOmt37vD48DWV+jphbBJ4MQd7hxQKDuiAH/7kh3z9+WeMhyMqK3WSWKCSmO1bd3Hs1FW31xtQKdcZjwdsb9zm6TdHOFJSK+c4PzvFkS45q8ygPcHNrmMZml6neaMVcPMazbUd4iRg4o2YjIf0wwjb9lnfuk1p8x6PfvR7uNkMYQxIGy/SaJkGpaCZpgDWF2XouYikp/h9Kk3MlqRhGsRRMjWsXVx8lytSTXu8PjAt/eumt8oigOnKD9934ZUj1xmRxfSev6u/aVnoK31d7kfMYYWLjO3CFzEjejMJ7uq4N0JX5k4/Cw3pSlCYTiGz2bwupl6e38n1MR6X2xVCfp34e9mwfc29XRn/uv7eVGP7YrtZnaFFFtg0k+mC2BvGzG00RuuZK6lBHCfzcy56aYkrfc8YyGX7wm+jyWtu7uLjXXqn3zG+XhIG04I5CSiFgcbUMRDgSJ+MPaTdPGBry8UUbUaDUxynwPqqSdbtYNkeGTvh1m6ds4MBRwcHSCW4c+sWh6dn7N7eoT8eUcy4lHN5uu0mK40shZJBsVjk9d4+XqRIlMHPf/U1larN2nqGRqOENAVxHLO1tcXTpycMRj3KpQKlUo3N7Rqt1gnlmiBfgEw2Sxhqvv7iiFKhhoVFEkwoF+o8eXxIr+fhT3yEHnF8PGQ49ggZc//ODkcnA7xRyMZukfc+eIdYBxRXDE5ahxDVSUIftMIgYTjq8e//3b+l12lSyOUYDPs8ePgh9foazfMX7O99S4LBxu4jtJHHn4S0ztuUcjmO9gI++2SfQqmAIODBnQqxkeGt27sY0Zive8MbrYEbM4WdRz9k5+4242DC+dkprmnQb3f5ye/9AY3dbUQmT4AgFppEaaRl4ofxvFJVitRPk3ldXD5ThjALxkmD2PT0b9NKozUXbplLV1/q602ulot0D9/VliTSayIwr8W8r4x0M6lNLDHDN85GXJKS3yi8LUMF4lKu+6mn/wVj7OzXmWvn8pjfo71M55Ri2svS+bI0PwtSXH7mV6tmfJ8nzMWzL8xi2c680I9mx66RrtNj17wHsXzOdULGde3mDrmz/meJ50zTnI6lpjCKTLOcziE5MZ8vMK+PsHxs0fdlxpDO7b+o6ev6uch0L++D6xnD7GWkWoIUCZIQ04iJvCFYEegBJj0M3UUkbVyjRNYNSfwuK40cZ6ctLKNIEo4J/QSVDNHKp9trQyzIZUPefusermUwFopqIUOiYb2+S5yMyZcz9AY9LNfl9HkLy7SJlKLfD9je2aBUWsU2PcZDiTfRuK6JZafprONE0Ru0cTIxlZqJlEO8yZBup48pHCyRoXXeIZMpkCvX2T8YIoQg0Qn5XBHHcokSzUqjSqcXMBgIck4FMyNJTJ+x32fgt5gEkkf33mfQHVMolnFtsMwJw0Eb07SwrBwbm7fwY4FwXVY2Ntg7esrm5jbjwCcIFRsbOzz+osk4iQnGA2pr22hDMA5CmsOYop3n8PiMP/jhO7TP9m60DG6eJTVXpFCt87sfPUoXaJwQe2GaV8U2sEyJHyuQkjhKkAhMy0KrJbhHpFIDTG0KTDcjoGfG5tmGEOnmMSQILUniBORio2g1M1hfXItXpevrUjJfJhpwOQPfPIHkMsG7cSDcTTbnTc9bPv+qZrTMEGb/X4aPUruCnnvF6Gl8xcIoPevtBvc3w62XvLouPpeZlnWVDbx57te1i3mxLszy8ku/SbtWXvjNYRYh3vyU0ueZ/m4YMzfRBGlIDJnaEJIkrTdiTNNgK6WnifAks8yowDzFRZK8uUDQYm3qS5+/5bbUbcozpmxh+dGr2fucBjhqgUQgUUgiLOlhyAmGO6DfPaC+YmHKDoWMIGPmiP0BIvYp5U388SmNlTLtswNWV+v4kwE6GeO4JpVKkdZ5G9OW2LbA94ZIYgo5E2maZPMuCAc/DOj0+uzs3OXgsEcSS072W6yt1ggmWY5ej1CJRzbjcHx4hG0KwkiSKZSIo5hnT55Qb7iMRhZh4JHEAYNugjfWjHvnFCtZTCPD0eEZpVKFar3AZBITR5LXr07wPR/v4JTEs1BRHi9j0xn12Ljd4OD4kEqtxJ27D1ivr9JttwjGksP91wyHLXa3N/n8s2dY1Qql8i36/oRcuczBqwP2j85Y275DlAgcN8OzZ8+JYkUS+WRch72DA2rrNYIk4tX+U+5sbCINjeMoKiXnRq/7xkzhg/ffZuANOT46Yq1RI2MZNM9bjCcjfBVT21jDdAvEWmJKAyEhThLm/iNTe4LQArlEfuZEbCqlTFHrBcSipwW9pxKVShKkkBjTeIdFKP+U8VyCmbTSv/FeuWqs/E0lsOVddM348x31XRObA7hv/nWmTXyPpgDqGglwWbq/NOc3tWtw+SsEa07rZ7DRGy6+4WDfxZQvg3KLOXHNL989/m/Exq+FS1Kmm2Y1TVAqTTMhjdQon6gpPGSIedlLpcTUwWJhQ1iGj76LIVw/u+9r33+Xl7X4izEP03Mua9GXHEHS4NPpfifBkBGWGRB450yGR9hywPnJKe3knPu7eQYD6Hc6GBLiMCCby5LL2Qhtc7TfoloxyBeKWKZJ/e06leoZURjT6XWoN8o8e7XP6uoGo/GE09Mut+/e5cmzVwyGI6Jon15vzGjgI5EcH/SZdF+yspJntWGztVPhxz+o8dXXz2me+xy8fIGbyRGGgmKuyObaOp3OMZlslf2Xr7GtClYGKpU8puMQKIuDkxbPvzmgVlshiSWGhCQW6ETTbY8I/YTx6IT6msPhUZtcvszxicd772xyuP+MybiDTBT1FZettXf51S++ZWfnDt0etNohgdScnrd5/uoQLbOMxoJEOTz99gUSRS6XBvn1x02sDPT6E7SIWVup0us940/+7M84Pn7K+nr5BmvkN2AK/+P/4//M6tY6x3sF3n54n7xjYwNBt0Wr38YfHpBf2cHOVilW6mgjJch6xgCWoWy9RBcvrsElWqjnQqcQaWERrQVJMq3ZwMXN9KYmhbgKoV+penNx4aeG0hnD0XPJ5/vaZaPnrOcLhlBmEMz3bOJLjGPm5XqZVf1mpHaGDyy9jCUm8r3055oBLxPhmTbynd2Im2VGT4mPvvD9AmzFMjB1+byLHYnfQi52AahpQZvltgz/zKOOdQJITBPCMEFrMS+ZKcTFJThj2PO4E/5JSswNZv/dTV55d9dDZZePzeJQxPx3PV1XCkOkmkIcDrGNBC+eIM0xBy+/IWNPMBIb1/UJgoD1RoXT4z46q5hMxjx7csRq/S6TYYJlGPiBz4nXYTLxyOcy9Hp9eqMBtm1xfn6KaWfp9T2+/uYlxVKZXk9hWTZ3b68hEZwcj3j67RmnvTaT/gQZVdnd3EQlPiIGR0ps4fLw7i3crOTWnQqd1pDBUPD4m28ZDEIyGahUilRXHYJY0+p0iVWM7bj0+wPGgxAdS2wzR7VSZtJvYgjJyso6+ZJJrzfhxctj1td2efzNl0y8Lhk3LaEqY3j85VMqlTonJyFK2zz+5ik//pPfIwwTJhOoVW5zdDRhc/cWd+7WOT19we3bFV48+RUZU+EFE6oZix/98CFZ0+LW6g5v3VvB1iuEo/GNVsqNmUL74Fu8/j7VRo2zl5+Tdyyq+SybGw3yBrz68hvI71Ndu8e7H/4AJ1ciETJ1S0WQGqhSb4SZ8LzY1HquJcxcHi8cn/rECWbVp6Z2BjHDsL9jwV8jqV/rt/+9RP97xvkN2k0SRVx7juQCtfiNGcIF7eNiKoSb9HhVZpxh8RfP+T5/Ly0uJ6p4w3jy6ru7Xj+4PCdxaU56nrXzv6iJdL1eXgYz+AfSzQ16Ch+JKQypMQwTw0jdM9VUe50ZolOPu8X3xTExhzH//9Hm7+4SGnj5mcsr2sOiOE7ajwaRoIkRIgEd408m9JoHrNayDHtNbOEy6p9zpn207jIZtTHYJQhCosiiVKqBMmk3A7Y3N7AMFyNb5vTUw83k6fX7vN7zWF1NKJQsytUCg2GAkBaDfsRkMkAlEd4k4MP376KTAPQxtrnL15826TZDLDXhvbcFwjDYWb3Ni8khH7+/TWO1gpOVTIZDDo6OCGMDTR7bUUjTpTscMwoiev0+Q89nOPHJ5QyiwCKxBJvbO1RKNV6+fEGjUcDzEnq9NtliBa0cHMvh9OQUKdrkClmS2CQYjzk/7NCo1igWCmzv7PJyf0Bj/QFPXr4ijH3Oj8asr23y0x//Ces7b/Hq4BX9UY/Xh/sUVwp8+N4jWq0X5DMTthuCaNRH+8cUMw85P27TafdutA5u7n2kJhAktI56CBIKjoVRrzFqvaJSr2ChkTpAhCOM2MMkh2OY6FiSIEBqtFqWTi9Ly2IJr12oozO1fF5BTad5XqQUREkEiGmisItxAfO1LK5u4ssE4iJctJw9aAq/zAXs6+CcZYDkEoVkKtleY7X+Pv4irh1rsfE0C3Boud83tjcQxYuZUW+mDS3GWcL85vP7/nubTfUmZPqiMfgiY549g2tUwavj3cCIfB3Tu9xrGsR4CU6RcsoMwDIkitQpQoiUwNu2jVILfi6mHjxapa6qs85n8TjG1AVPw/WCyBU48qbiwfc8cT0TspauuOIYMuvmqnCiZ4FuAqRM8zclJFiWRT5XYdIrEAch1WKDo1ffIJTg9etX3LtX4f333ybyxty5vYudMTk/H4MwePX6GDfTwHKySGmwufYhY6+Ja+fY3ErY3DQplEziOCJKRuwdHtNs+hSKOQp5C9MQHB7upfUSZIBpKn74o9uYyqFzPuTsqI9pQaFYpF7dZjBo0203scYSy9Xs7TUJI1hd2+QP/+h3iSLFrz/7Jbadp1A0mPgxxyd9klgjtIlpOgyHI2zDQUi4fWeTfn9Iu9sh40rGwzGO49BrtzFMh26vh+PYRJ7HZDjCsQwMM0O2GODmJEoGrNSrvHz+CoXJxuZd3nv/pwx9iev2+Wd/+t/w/PlfUsgPuHV3hXrdJ++MsZmwvl7Ewub0bI/z5gDf/y3XUxCmJIlChE4wDdAixU3L1TJjf4ISgt01m3DS4fzl19S2bhMbWXLVDSaRxrAMPD/GNCVJotMgGSWm5Q4ViGkwz1SEFMJgXnpxmYLo6e4SYEmZRnRO+5gt4JnGkS5YgwubQXClEtg8cni2EaefM0OsnqnVM61nSVG+8IyA5NJzu67ClkB97z6+Kg9fN+bVzi8fSb8bF85Zlv5mQWnGZZagr4PCFllSF/El/zSc42aZ3S+2yyktZk/laobZi18VaX6g5XZdkJvUVy692rTCmHvBpQQ0NR7raZZTAUqQKDAMcwpFXmZu6dWGmf49c1u9IIGLa5Vcrs76+qMpc15e5zdgwVPha7YHgCtecKCJ51jRUubV6T3O+HOSxBgijQmIMDFEibWtd1DeIbE/4uHb92m3FKViyGrd4PZOldb5hPGgx/NnI9zcOr1Rj4cfvkehuMrXT4/ZWGmQtW0cyyWMT/nTP3yH0/MXuK7N0UkfFcUQx7iWwe52jUIhy86tNSaTHjEhkzhE2Ca99h5Fp8LtOxsIbfHk6WsKpZjySp1y1aXff8VKPoM2YxrreYbDkGI+y/NvntE8G1EoO4y7IVoKiKDo5kiUYDiMmHgTAt9h7J2D0hwenxAGExqNLFHUJ5vNs/dyj3ZbkctVMWxwXZvNnXV63Ta2adIaNDk4bdEbGRiZI+7sPkDFbWJanPSf883e5/helsmgz+sXn1GvT8jaXfZevuLh/XX6vSG7d7YoZLIc7J3x5It/pFbdZhL+lpmCZStQCSYSnSSMhyGH8Qn7RydYjkUml8M2DogSGI37nHdbhCLL+q232b79AD8KyLomQRhjmw4qUZiWJAiSOQFSQiMkJGohfc60h4ViOoOUUmotZkCEnq5MKRYxDOK7/covZJKc9T3tZtbfwntdXyg1Cpfz+SwBM+Kag8vjcvXYlXaFICx3NGN+lzqfzvta98A39rV85sVEF9f69l+Cnv4p2PeNgZybaBw3IOTzt7hMmC89A3GjfsCUqVeRYaQEMQzj9LudZjmN41kZzUWFAKX0FAqaCiFCII1ZUZnZfVwzurjyx1xznt7E97bvXwvLbaHCXqjRPVtXMH2OS6ruhe4XxyzTRGiBYpqKQ5kksaScb6Asj+ZZE6wCtbUtFMd0Bi2KpQLdVpvT4z6mIymXs3S6r5EyRyYT4funSB1y524Z293EcSI0Caenp9RqFSx7wv23NlHKxHFs4iTB9wO+/uYp1VqZSrVKIZPnxDpj1A74/Ksv2FzboL5aZf+og5GxyOZKPHrnPfqTY0Z+m+3ba0iRo3U+odPqUK2X2NxaoVwrooTi3r1d2t0OR8fnvHx5QC6bI1EG47HHxvoqvjehXK6wu7NKEsW4bol8psrPfvYthmHg+yFhPKHXD3hw7wH7L1/Rbw/RWqKNPNrw+PSTn2GaIb//xx/QHkz45Sf/iY21d2keH7JSGVGrgGPD5tptHAdCO8tnnz7j7Qe7DAYTTs9GGGZAoi4Jh29oN0+drX2SKEKaDqZpkcQmYShASqIYgjAkiV5QXalQqBZpnx+SKa8jlU+/dYLhuOBrMrkyYRhhmyYqjMiaqdocRjGGaeCHMUJacwJ1UQqcQUizOaUpthc53sUVaEjOZZ+ltfuGe7zsjw3Lkv7sD83l1NqLXq+S0Wu9YW6Cr1wpc3WV08yJ+PJPUl8ZUOpLJ12e00wTWnpOb57iTbOg/rbaZYJ43eDfPSHB1BX6yvu9vC4uA0NXWxpwlkoHcaymAWlTO1esQE0ZQlodZq7ZKDWDlOZcIF3K16zZy7NcXmMXkguKi79dvucr9RNuAjPNmMI1/cyEIzHX3pfWu9DTc5enldpQ5DSFvpMt0xkck3cqlGtv8Wr/FZ3OIVl3wGjoslqz6fUEuWydINSIWFItZDl4/XMcwyZTK3Hn3i5BfIiKJozaHnGi0CI1NNdWaoSRTxBEuK7L/uERE29IrxcSxW1qKzGmMJj4Hnfu77BS9ahV6nS6I96tb+Jm82QzLn4Q0R94lBs13EIGz4sp1rLcuX+HyA/JZG26w1N2tnfo9QdpXYN8ho2NVZ49b2KYEfVGhjAeMvEHFIoVEjUhThSeZ9Dtt6it5MnninhxnjAGKbJ89eUxzaPUE8m2LMZ+m+pahlwmg+d5HB8eYmXzGDJmY6PE/svPODw6YGenhtaKrJsn9AbEfsJ7Dx+RzbgkVZf1n77N06eHjIbB979/fgOmoCKFVoI4StJIZWEQJRrXcjBsm+FwhCkSDr0BXhRiZMvo9hjXKVN8kOXk8BA3a5MtjsnmV9CxwDYknbMO3UEHYdlUG5vk8wX8MFks/AvQ0dJKvZQJUk3zyHApvuBaGnIpGvSy98x8KDHfu0sDL89hAb+k16ZJ864qaVe4wveL2Bd4wOJeLtLyy0k1rp4DV+zT3zvcTdriuf7Tih/dcJRLY74hk+r3iMwpU7jmOV1471Mo7PuekwBQc6nfMAySJGEWwezYJlpCrBY5gi7XkVjEh8g5U58R3MvDCy6/u2smKK4ev+CocNOXKxbnXlAEZtrJVEGYP8slG8LsmjlTUOmBVCsCgYkf+eRKa0g9JkgE7330rxn0Psaf7NGoKlaKNq2jLxj1D1hbX6FccWhs5Hh4v0IQTIhDH8vpUShZtDsew9GIbnfEy5dH3LvfoFQqYhgSzw+RhmRjo8pg4PF7v/cOrVYHpODkvIkf+Zy1T3l0/23AwEsmOFkLRcBp85yN1Q0mfogcS16fHOHmMpSKVT7/9lfoBHIZh4xr0e402d3ZxTAFYegRRROKRUGlWmVra4XhaMyL511WGg5RPOLw8IwgMPEDjW0ravUi7X7Ivc2H/PCHf8z/4X//f2LYNcm7Jr3eiPJKmXA0ITEchMhyctDngx/eZ+wJWmfnfPT+e/S7Fo2VIkl0SK/ToVrOcGtnBwNF6Edk3QIvnh3T7Uw4Oe7daBncXFOIHEATKwWWgRQSaSjC2EdHHhnHwjJNkAatsxYf/87b1FZvc3re5ednv2R//zWb23WcXJEHb3/AaqPG8f4RreYZr/YOufPwEdvbO8RJlOY+miXPu27lag1iCnAsL+KpO+AMCpJyBhxMJeupKLNc3JxlmGQZYZhRWzH7c+4Iu/xUpicsbXoWHhwzafSKnfmKeH/tE+cKC9DLQy1cfa/g1Ze+p89nqa/r0CN9/bO+bOidHV/u7L+u5vDdmsIFOGU6n8vfZokFL876kkF11s+lh7fQKMRcao7iOIWSTDOtjoaeBlUuJIoFfZ3m/5FpUkitVOr6ObNxXH53lyY/YyyLWV+zcq55/mLOKBY+Qd+35vQShrY05BWkaE74lz6XpyIAIQ0Ear7WlRII6TLxI0zh4ma3UWGE41ZYqd3DHz7nm8fPGAwKbKy/Sz4fsLHpoEUT1wlIoj5uwaLVPmZv3+PopEOtWsZ1K5SKPtlshvFoRKmUJ4paeMEAw9Dcu79NNpOjVCxwdHrOxkaecX7Ere0dTEPS7/UxMzGToEWsEjK5DJm8S6szYBj7jIMxW6UskfYIE59Oc4hrGxSyLpblYNsO0jDp9VpUq3k2t0rYjoE0AiIdUShJwnjI7tY2JyfnrNdKvFPd5JNfPaNWd3nvox9zfOQzGUfsbj/g9FUXnSSYMiHyQrzBEMPME0eC1Z0VjvZOGYx9PnznI85P9hAq5sXT17h2B9fus954jyj0aHU7oAxsq0ysYOLF+MFli+f17eZMIXbTQBxDE8UhmgB0jGUbFPKZqdoVo7WkUqnx+uU+T56ecefO+wx6XfaePad98grDtTBFTOsoS7t5imUYeP0zZHKLOBwTCxfDyTHbAMvYdrpBFoaw1GUxjVuQU2qc5veZ5q5Hzm0L8w0ilgiLWBDRK26GS0FxekkSkxewdHFh82m9JJV/hzKgLnAf3ihpi6VxLz6FKbZ7CfK5to9r/rqO2QpxHcm4KpVfPv5fkyHcRAG5wnAvK2XM386V45cPpER/cd6MSM8IuFKKRCVYloFhyDSjr05zcxnGNEZhRsl1muV3VkJztjZSWGlZBb1WD2X2y83Rsitv9II2930yyEwEWUayxNK/GXPQpE4JV7jX9FNMNffLSQqFnDJG0iwHQRwR+grTqYO0KddTcUsAGXuIFx5y3h5RrcFg4BHFCd3+gOEw5vioR6s1IvIzrK7muX/3IfXVGMOKMS2JYWgCf4LSksloTOgrJl6IZWUBweb2CtlcnjgKcfMZ2oMOoDEtA60SOoMmjfUGQ9+j0VihXC4RJwEbazUKTobAT5iMPbSG46MTLMvEcS2iaEDJqmAaMblCjt5oQqnsUKsWEDLm/oNVvAkMhk208Hn+/FuePDnk4GCM7/0l+ewG+VIer9cDAbZjkytmOWv2MUyHJBnROe/w8L23MK1ztncN9l8PQI6JkzGWo8gXHaIgYX+/R/MsIJ/1qNXuslLP4weZ714E03ZjppBECVGoMF2DRMVYjgEi9aAoFLKYhoFhZxCGxVmrSZgYvPXWD3jn0UP+4e9+TjQeEALhOOQff/bv2FpvMB70cB2HwSSk395Fxw9x83lCpRDSXEBBLEjhsjglpn/rOZeY7rVp0RJ0MsVxF1juTLJczjXDnMBeXOcXjNoClBCkEWTTKO15qoepxrHEZOautNdQtquQ1XWwyHItiWneqPk8Z1IrXJ74lYI2Ai67bL4JhrmSleq/Giz0T2s3Qt2uubelBfTm67hKgJeZoJqWw5TTGsqzjKeOY5EKBHpRc3mmMMzW4Rul9O9h6ILrDdA36OcqM/nuhzCT+GeuuxdMWkvqyZt6EdNB52tSpMKUEGq+rpIYbEsitUWSKKQJSkc0uy3ymQkJgvXdDbzRcyrZMm42Yjg4JWvn6faGPHtxzGhYwJsoDFHF8yyePnnFnbs/wbSGJCqc1g0wqVUrOE4ez0s4Pe1gWRkmYYQfxgwHAXFd0O91iCKf0SjAsgRFx6K+WmE49MmVHLbvbNDqnpBzM0SxZtjqowKFSCxy2RxCalxHcPfeNmE0QasArWPKxTJHJ0dksy6lUh7HdIhDhVIxR0cnVGsbvPvefQ4Puzz9tgkaMo5Nq/UaiYmb1YjEYjDoYzkWOzt1CsUMk7iLW5KUa0OG489onR8TBRPW76yws3mbzc0CTsYiCKCy0iCblQwHBkHi0B8H5MvbN1hLv0mcghkhTYmbM7GzNUaTIZsb20xGI8qlGs3zc7QFvjci1gF+mBBGI9bWKoBPMW8SRh0cVyDVkOZxn4xtYdh5co7J3VsbGFIhhMayDJIkXaCLhG4zHrGo2qalukJ8ZzDSjHoorSHRF6JJk2SB+i9rDRcX+RSv17NPMISYwlppEaDrEufNNIXLCcuW2/WFYa4S5IXUuoyIX5T9ryPk1xG3q1LoJZhFX4Nny5sQpP+KTd10/Gs4+lK77jld165kfJ0yoThOGcC8pngUI4TANI0pfARoPdUKBAqFKZg6QYg00HIuBy/m9/2xE9cw+DcS9jdpdRf7+66ml+a0JC4t4KOZjHLFoTjVMxYylkALke4ToRCAFNM9KDRh4DGZ9CkVS2ilKRQaRMExETmy+RKWhPFoAsYApV2UzpLL2+SLGq0lr18fEAcDBn2bP/rjd5AioHV+RqI8svkcEoOMaxFFCq3SAmETz6fZmnBy5rG6mqfdnqCSmFzOwLRtAn/McKgxrCbjyYRWK+S8dcat21sMej10HGDEAq8fopQkMRKanQ53blcIAh+lQ1QcoBI4Oz7D9yIcw0YlEa1+h1K+RBTFZHMZHjy4D7gYRgZi2Np4i7/9+28pVgp0myO213cJBhMM0yDRgk63zcHRS27dq/PWo3UOTr9lNFYYMiGOA3r9kPfe2aTb61PIbzKeRLhumSjQFMtVfK/Ik6evCKPfsveRbUfEJNNgnAyblQ3QBrl8lbPzASq28MIxQTJhNBkhDJsnz77k3/37/4GHD+9wfvqETqeF0gGh7+PmMimXHvoYTomnj7/k/dI6WbeMkA4GBsl8RU4l8bnkvAyizOwIswR7C2lNSonQgjhJpqmJLUzTTDevnOZUuixZM2UIOiX+M7WaacEfoWdOrsuE/JK6zEx7ecMmFBfhjIUd4BLRuLCJL2zT+Xniwm/XNL08jYW2dHluv71o7d9imxPB2bwvM+GrBPI6zH123WWg5hI7JSVsc0/9afyAwjTFFDZMMwCbljnVGGYQY4zUUxuDTpA6QZOQRAkIiWFYqYvhjDHo6Tv8nmd+MbxgyV36uptmWSEUF+5whut/F2O8DFLO4TQWr+GCE8blwWcjCnHpoumnTgU+tMQyBaVCLt1nhg3kkNQpl6pIPSGXsxgP2xy9Pmc0jNnYgJPTFo5bI44ku7cyZB2XlZpke6dKNqcZjkEpg8k4olC0EdJAqZBEJ0SJT6VcwQ8dun14+uyctx5usb6+jmlonj75ivv3bpPN2hTKFr1xG0REHDloBflsnltbD/iP/9P/F1OZaGFzcNxl53aDSiVPv99FiJB2q8fmep3QT2if91mxXBCKbjtEhUMOD9psbGzQ7/fodn0ef7tPpZBnMj7DtgLOTgcI08Tz2zgZF98PWFlZ4+nTZ6zU6zTq60xGEz7+4H3a3T0yrknrrM+gE/DZp48pVWwmk5jJMGSlssbh0QlaGZyfd/F8QS5XfeP7X243ZgrVWpXxZEgcRyjfp1BZQ+ESY3D/nbfIlwu4do6f/+PfYBgHhMkYLRXP955QXVnBsFxydhEtR2CERHFMFCpc18VIAs4P92htPuVOMcf56RGmVaK4tkZoGMQajFhiaEArYhJiqRHKwBDT+lyJQidTdV0olNRoYiBMXQaVgVapT7llSUwjQWgDlYi05KEhiSONJQ1EEmPKGJUEaMMgFJpA2UjLJvYDDKmQSHRsobWJBpRQaV3WBIRKc0MukNbpp5jrOgu3Pq6BfKZ7bGGQnm5uJRab7ALktaAec4a01E8ytbHMqcybeNWMuNx0UVzXB9fn5P+ndnahq2vqbc8S/l0+NstYOjMYzbRLgeSCS7FYSs6QJGgRIO2EMNAIYeFYYDAhmowxhI12XGIjT5q018cyugxbe5jaxLZyFCsV/LDFwcuvqFYaZDIl+p6gtLJDorOoxEYqE+N6I84F6EvIVNNNgUM111qFEBeq8k3jo1PpXE5ZggIShTQl0VSzTQtbLb/kZTYprmiUy+LHxR+udwHRly4WwpgfTJe7TosKSRMpzNR9VwiiMELpDP1hH1vERN6Ek+OQ9rmNNykx7GvK5YcMmgE6hp989EPGkzOyBY+YgNP2gNFQ8vnnL3nr7VUy2RzlWpaRPyZTyJIJEyKlsWyDMPAp5m1Cz6BaXOPg4BUbG7cwLQcv9CmaNtlCgSTWDLsBvU5AMWdxfjKkUKghsjZjT7GxWcV1XLxJwmjUoZC3+cPf/wnnp8c8P2rjj2DYDtja3iWzVuObr57RPfeIhmdkHlUJxxHVcplSOU+vN+b2nQb9/muO9sbcWb+LFDY7u1U63RaFsj2FhQJKFYte+5Ri0aCYs7F1gV5TkQRFBn2Dg71zHMOGUCEpE4aSLz77jGq9zJ07a1cX3DXtxkzh/tsPOTzcw5DgewHr66skZGkPRmQrRd77+AOyTgUnY/GrT/4jrfYBIz/EdgXCSpEAP4rR2idfyRDEEiefRSeaUrFIOWvh94755V8fs3X3A876bRLbwK6UsB0HC4EJJHFCohMMU6IjlS7OeQGfBC2Sabj91ABtCJI4xjJTI1IceBzt7/HwwT06zXNWqg0QECY+haxJ4HfIOiGGmqBljBdLksTFdKr4EVhWhE4m2IaVqrYJJMJEAVESYRuprSVNUS3TFN8sNvZM4hMz6/bMdrEE3s78pRYEXHOxbN3s3Isax+zbBSmRJUO5Zun6S5t6nvr4v7AtlLnfRjcXpOmrgbkzmPDy3NNjCw+j2Tli6W+Y+dGDQOipVmBERHGfrOPS7zTJFSya50/54pNfcP/OexTW75Gp3kFIgW3GGKqJP/iK5uEZhfwKhXffoXn0NYcvfsXrQPDo0U85bse8XSgibAvTdDESSRwmSGumzi80uOV02Jo0SZ2e3/sStLMkMIgp89MCtEizEBvTIE6kSAUYBIssfJehppmUv9AxLjzzy+/ljdxiSW+YPVdhLMyAUiOFgWFKhIYk8ImjkIk3wbFgNPZR4QiimNX1R1SKqwz7+2TzERJw5AAV+kxGTW7dXWESH6GNiL2DA3J2iXfefUixYuPkcrS7XbL5HL2+RyaX4+nTI0YDhWVKDGHSWKmQy+bZ3t5ByIgoHhJEMa3WhDBwaZ33mAxDBt1j/uKf/y7t8yPq9QoqEbieQvUnjMcjQh/WVlfZWC8SeAGjQchkEBJ6CafHE0zpEfsx/lCi4wK2LNJrefTHPrGOCcOYajVLxq1Qytf56+ALvv7mK1Ricev2NqfnhwxGY4IwxvO7dPsJv/OT29zaLqJVSCnrsFrO8uz5KSd75wgU7z/axDQtdrar7L8+5V/9yx8TqZByJcdN2o2ZgqdCpGPiZh20JXl1+BItMmTyJX7+87+hsVHk3Q+qVNezvPf+PfZehRwen2NoD9MIuXV/k0/7e5SKK2ihKBXzjEZprdTbmw0ePbjDF988xtcmEVk23/optmkQjwfkZQERRljCRKKIpMIwrDRdxqwosdAg1eLvmRYrzKnaqhA6wjFCXj/5jL1vfkneydLvjPnwBx+zulPHMBUFp4vQ53Rbrzg+OsVxtzjv2Tz68M+RIo8pQ1xHMRq0SGIL26oitIs0HURiInXqopgaGQ00Ajl1oRVTqU8JsTAgTjfiBcMzqQe7uhzAJuCKe8gVqVxcOD5LRJFu5sui99JV1/b1m7e5tPlf2NeC8C1Js+IqkfqOW2JG4sRURJh3OZ/nAi6RAizbJAr7lHI+3vCE55/9nA/ev0P//CskBxwd+TxaXcPVCVobWNrDNsaYyQknr/+B+vs/4uBZE89vMWw9wxRlzvZfoESDQbtHub6CaWkMoRDG1Wj7i/eXvjkxyxYsplmXLuRGn7m1GpdEA4UQCdJgWuvcSKFQMYXILiKfLNbMTD29tMaufS83aFPhaLYmpJRzy0ocRyRJxHjUw7Y13d6AtfoG4TiHNzTRBEgLbt/LUyid024dI2SIKRN+8PHbdAYHiDhFDOyMJEl87ty9Q71RpN09ZX1rh4OjY4qlGs3miLW1Dc4ZI/CZTMbUGyV6g3MSFWLbBufnPVrtFoVCBdssYOoYnXSp1rM0W/tIwycIJthmDkWCYSgyjqRSLiJFwvNn+/zg4/exZB5DFIlDj+bA5+zkOasrNQyzxPZOgfF4RKs9BgmnzS77e03u3W2Q3y7y/PkTOr1z/MBIazUcHxHHEY6VZ3NjEz/oUqsWyGZXyWZW+PyzTxh0Q7yBRbcX4mQL3Lq1wtbWGllHkURjNjdtQl+Tz69Qb9Rv9NpuzBRu3XtAoVLi/PyEYq1G4GuGo4jXT74lUiH/5r//v3PcOqCxkqfTOaSSsyjc3uLofES7uc/61hYHhxXqtRLNsxNMI5Xs793eoZBzECTYto3jFHn3/Q8xyus4GYs4GOM3T/jik1/iWg479+5RWF1FxQrHsNCkboEYqaSVzO0O05z1SYwUAkMHmCJm1D3iX/2zH/PZL/+e1sket+rrfP6z/8Af/LPfxYwVo+Ql49FTDl5+xlv33+GzL56RLbzN2atfs7n7IRkbuq19vvj8E9bWd7h950OCuECkQGsTNSfw0w3INK0yU+1haiNZ2FCnf8jL1O2yveAq9CP0zCOKJShqWaJOKb3US/rE9cDwb4eSsyRE/ha0jusyK10X3nXZQ0fP/10k+hqm8ImezzOFTdL3pKI+tuwz6bykdfQMNzrg5Rff4uQSZHxCb9Rm1P2A1fr91KMNn9bJE4ad59zdzRBN9ijVGjx58QmD7iGbqznOjvYJhWIQPOadjxtU6zmsaUT0Av3TLJ7csnQg5iCkItU8l21sF+ppzIUAjSTBEBGGFEQJSOlONcZkOtxFgWSuRWmNTi7M7I22pu99vXPtY6r3aua2LCkElmWSRAmum8Z6FIolgkijMTGsLK5rYwqLQmlAHD/FckeUayGOHeOr11iZEd5kSKJ8CsUsjfoawkjo9DsI0+K81aV53sMwQkwjz/l5k1Z7gutkuXVnl+7gnHK5QJSETPqK4+Mm/Z6HP8rROd/DkAbbuzVu36ohjQHZrIEQFqPBhCCCTmtItVyiedKi3iizUqnT74wYdCdoZWBIl2oly+FRh8E44Oigw+7uJvfu3UYIwZNnT8i4JUrZkO31bcrFEuVSlkzWYjwKUVoRRRrLzLC5s83e6z08b0SrdU6jscLL51/w/Nkh9eo21co2lunx0Xvvk3GHdNvHdFWTJJow7gcU82vYxSKds99yjeYHDz/kb//ur2j3JyTJCEtmGI0iNAZaCbyxz3/+6//IzkaFe6tVIs+nWqzRZsj22grtTovt7S2SSZ+CBd1BGxlrco7BcDTk1cERx80+ImfxnpnHsTIkwZjPf/5znn/9GZNhF4Xmm2ffcP+9D9jYvctaYxtT2sQ6WhiYtZpupukNGhqtQhypMeIxRy8+YcXd5t07JT7vvuDoxWtcM8//8//yf+R/9t/+AefdrymXJ6hxH0dNeP/+Kj//9Zc8f7JH+6TJBx88pJbThKNDgqEiHJexM9uoWCNliYBZyUXSPH+IOYSU1oSYEbaUEIgl4r0M+bD0DZbPYQmnnQFNU3gJzXJ95MXlM08muTh2eVdfiZy6pl2yVVz3e6qdfU8/N2xXvLeumffc2+qC99SU8E+1gRnDSxXIWWjjjFmnBMuQimw+5PmTX6O9Z4yar4lHHbrDNvmySyVfIAwmvH71OSur96iUNhCMiKIuphmQLbucHnfSlAVSsL7eYKVSZ+LXCVnnzjsfUamvoqRBJDSI1CNpGa65LhBPLmmLM1Y2s48gkikDTmNypBAInWDIANsI0+h6JZGGg1Z6qnnOvOKMuW1CTkX5FOL5TRbBdS1dp2KmsU8vWaB4CQkps5HSIFssEQY+oHFskyTUBEYf5BgnEyCsHqP+Kb7XZDw+4/buCoPRgKfPX1GqOdy5ex/TttFC4+QcBoMBQkqOj5qYZg6Bw97eCa9eNqmUq6gk4ez0jN1bDeLY4/TklPEoJuuWCWwbU2fJWoJ2t4mkhG1BokLQBpNxwHhs440ihp0Av99CK8X25ibVcoF8ziGzmyPj9Pnsi2eMxgl37m5xetpBWpLBaMzewTGFQpF2ewRaYySa/nrA8dELPC9mdXUF3xsQhy62mSOJNePhAMdSSGHRWC3QPp/gTQTtM4dipoHvlNlcu89a7T5B8JpMNmQ87GMYmt2123gjiUgsJv3fcpqL//Sf/hY34/L2ox/w1Zdfsbp+hw9WN2mdt/nyi0/RwkfpASevDxm+3ufO+jqNcoZitoQ/9Fmp1CmrKv2TkN6oR9AfIK0M49GAbm/MeTfAqe5S332fF0dtht8e8LO//I+Y/oiNlSKrKw2KK1V6XsjR4TGJshl1Y+7eu49huIQqRAiNaZqpe6VSaJVgmiEqGeBaAtMYslZV/Pof/gdqJReRdHnv4Tq//uQJajLgF3/zd6ysunz41rvcX9shDEa0jk/ImB6NO7cYDE/5y3//LT/5nff4459+wLNXT/mHv/03vP3uH7N/GPHgnd8htlcwHZc4SrCd1Hid6FQ6E0Za/3W2vWGRNz9J0p0zS4GwsCwsqwZiCiVw0UTAgpEs2MlCs1iUOroERy21pfyyN2rLEuqcHP+WoKP5GNd2dgkPh4U77SxS/bJ2pTWGhCiOsG1nyWCbXqO1xjQU7dYzBv0XeN1v0aMuOcdmZ+0OISEHZ11yToVWZ4/Pvvhr/uSP/yWGMaFQthj3bY72T+n3JHYQY9plGo0ctrGC4Tewi3fojnyi7oBiqUA+6xKGPrZpshw/k0rTy/c2DcQUU0BxysTCMMRxDKLYx7IESoJSJrZpEkceeTdG6iHjYZ+EHEIYmKZLmEQ4doYomtZ/0Gl6mFn6bqXSuJ7vkw4EF+e5eFfMBZILgo2YQkhCIJScH3ecDIIEx80ipELHHrYTE4VdHKfDyHsNYkIUD/CCLp3uKWtrFtmsYH0jR6FUo9sdMBp7aJkgpKDV6pNx8/Q6HnHkk8uWOT1qI3VCuZBnNOzz7vtvYZiKbq9F5IfEvkIYMdVinW++fIprO5RKNu32Kd0OKPpoVaCxsoOtFZ0g5ON373J2eobrSvyhz0k44NbtDcIooDc4xbJCGo0865t1gjDA8yO6gy5+GJHtjhj0QzKWjUDy/Nk5k8Bn4E0YTwTlco3xAMZ9n1KhyMHLl6xvFPlf/i/+Bb1+n//817/AtCqYskrzLCDrOhwddBDR12iOKRZ7bKxnCEPNsA86znJ80GEy+S1HNP/NX/8Nf/bnf8rm7hYog93d+wx7PuWS5KMPfsSzZ19x1u5QzORxTAtpVonJkS1ZfP3kFbVGwGp9lYxTolioMAk0hfIK1WqD0fgUO1PCya2Qr26CmeFg/2tK2QzZjMP6xhr1zVU+/t2fMvAjxuMA30solhoIaaWSh7SAJCWmKsIQYJqaOG5j6gGDVou1qs3dbRc7ydA8O6BUdnnw9hrDwZA40tzausXB8R77L/tkLI+NzSqm7ILqYxoRH75/j6ODQ2rFHEHUpZI3+fmrzzk7OSOTu8Puzi3y6zXCxMfUitBXIAwsOwNCEEUwq0sqgVmkrNYXvWfeLGkviNxU5pt9TV0pL7tsziGlGXPRMwzlSs8XtZI3jz6b32yzXxsd/Z29/KbtEr59aYozd2Tm8SRT5jd7RmL6bFSCVhFoE60UhjRRSRp/IA2DJPYRKuDli8dYuklGarxhgmHm2Nha4+BowONvv8Ve2aBa3+Ls7DGFfEyvd4oWmomX4EU2O/cfUGuY9Dr7dNsTut1zasYuvhzQmrzizr176DimWsqjYjmts7Bg1hdfjVhKyLiQBjKORIgQYQRIkWAaAo2JwMBxQ4g7DLoviUOfXHkX260S6GjqdOFjGw5aJ6l3kCFAKYSxJNl/z/u4LhYGFlDTwotppjWkzZx6R0GqnRiGxPdT4y9aY5hAMsF1POAE2Gc47CKEz2DQYXt7nWqlQhgOse0M47FPpbpKtz8h0THeOMb3FM8ev0QiyGdL6NhkY3WLJEpYq9e59aMPGI56tFrnxL7CNTKs7ayRRA46sbFtm5V6mULRZn2zyErNxbLLJFGMDk2kluQcl9CP2d7YxDQVo/E59ZUqvU6TfD7LzlYdNATa5mj/Na3mmEI+i+vkaJ73CMaKRmWVk6MWxWyOQS+mNehPSw47dDt9kkDiGAYymVApGHzw7hYbjRyjwTmlQpb3P/gdXjxv4QeCfMHCNg207vMX//yPieITfK/JoDfgF7/4hu3Nhyidpz/u3+D9/gZMIecYfP35J/R6p2zt3CLjOuTXy4yHYxordYq5j/n7X45IkpDN2w+wXRuzsoUZBNCPeXnYwrRqlLJFattvczz4hu27j+h1utiWiz8JqWfzrK808BNYKboUdINoOGZr9y7ZehW7skLc7NBYa+APfWJpEKkE0zJTCECAigIsEWHoCJMIdIsXz3/BoHVC8b07xP45rjOi1rAJAs3h+WsiFSGEQbc3QAFPnj/HtTy6gxyJSFjb3OKtd97n22/3yedynJ+cMxqdUChJHtxZpTuI6fZf8eybv+Unaw1s6eDHEYYWaGETByGxMjGMDNIwmWVQmuH/WjOv3DUnz1rD1GidHljoF7OMlDAjyjOGMFMfprjzFEdPbQozzWFJip7tUEDM3Fa/oy0AmoUb57VX/La4gr7oW38t6iXSnELLwYKLOhmkzEKkErdpSoRQxHGItAWWJaf8I81KaWTLaOUyDiwirSEyKcRZXh00aba7ZF2b0BuQMyJePv41tjNmZWXEwd5zEmVRrW6wvbNFrHtEkcb3PJLYoN87o7BapT/oMOy0CISkmr2DNGblZKdQ0iWirNGoadyiBITSSDRSxJgyRJgxQgTE2k81TWVgWxqTFo7ZQQUjRj1FXmR4fdxj89YtTMNJbQ2BQhrWVGNI3beFMFAYfJ9w8F01NMRUBRNLZ8PCTXmqkwDT2hMyQZo2UksMBJPJmNPD59za9rEYEqs+0pR8/IP3ENOI4cHAAwGW69LrDzg+OqHX6/Po7Ue0TgcUs2W67SHDIOCD3/0Rj5885sXBS0yhqBRtcrkshpYU3SLjbkjzuE0UCgb9EKEVhmVSrdZZX2tgmh6FHPQ7bVrnTU4Px2SdItlclozr0ut12N5ZRTGmUV+n2WzhuAWymTzlbIXjwzHeyGc4CLGtAlK7eMMEPR6jQ5PMSoFW9xwtTCZ+NHWgSTCFxpQ+qytZKuVbvP/2Xcb9c9rn+9y/t07GVjQaOQ6OjgmShHzR5Qfv3sF2x7x+sseTb59TLtYwjDrCrNDrDpnE3/la5+3GTKGctxn0m7TO4eT0mF/98lMePnif44MTNtbqeJMhlZV1LNtmdfch9fU1Nm7vcHB+ynko2LqTJxyEmAWXbjtgLHJ8+s0rJv0uGcMkViHhaID0J5weHlLMSILIZOfe++w+eIhZLdOPFLlqHREJLEdguwZKCBQxSRKASDCNCKkmOGaCayaMhm0mg9fEfodw4kLS5+DgMXcfvEW20OC8FVAoV1C6S6wTIu2jDMU46qIMm15/iCssPD+gWi3ROuvhacXZ8TG262Hbmvt31vj1Z3vkHB8zPKM/8qnUGpy1+pSq6yhhECtBpNJavbFWxFPYIoWHZYodixRjvhBstmxuuPwJUwFyhpTDZbK57P0xZxYzbrGc5+cGlHz5DHERL7h5ezOCde2IlyD2a2wKlxjCBVxtKRsuCkOK1PFAgmMbxGGIEALbsjGEII5c3nn7p3z22V+SSMH2rVs8efwFljFGJIqVagHMHOPWGYPJPh//+A5EI2wRkynkOT454uc//2vWN8rkHJtnzT22tz/mxeEeheomyThk1DrjnbceIWNFLGbwoVwyll9iDFKkAoFOAy7TGJcExxY0z08wrZAw6SMMg3KxTjjpE8en6KRFHA0YeR5e6FCrbpDEA0wnTxKF5LMF4jBE6zSDQBj6aGmjdZq2YxHHcWk+12iaF9fF7PMiWxCkSnIaUpIGjUZRgjQEiAQVx4T+mEI2T2Z7h9bp35J1fQb9FpiC4fCceqWMIRJUAp7vk4wDegOPbmdMpVJjPIp45+2PeP7kNf3OOZaE9nmHyAuJg4TtzVUsUzAejmietsjnykyGUVr2s9UjkynzB3/w+yRC0GmfMx5FFIsmrfMWGddG6CG97hnF7Qy3bq+AhlZrSMZdxQvgl7/8NQ8evIUpM7h2EYGLYzqYWmLZBTbWbzPsh+y/PKF1PqaUzyOEiek4KC1YXVmh0xnw4MEWRqy5v1vng7fvYEqBFppm+4hS3sawTf72b/8Du7cf0R+95Ed/8LsINaQ7+JZ+X+NYGVZXNynm64TxkC++esbtB/fJ29Z37rZZuzFTGAVjMG3CyGB7+zanhyccPP2GKJgwtD2UVDx46x59LyQx4PjsnM+/fUKE5vmLl/zpH/8ZTtlhb+8Jh4enJIkgiBJ+8KPf5/jomLOzLr/+5S9pnffwwiFK9KiWywjVRLpjdnPvUypuMvEDhJHFzGTRDsRRBHGCbZpkTIGtPY5fPibyTnENn1xuQi0vyFZW6bZOKRctNtc2eP38JVa2S6m8Ta87xrZdOt0uMqsxbIM4hDiJWG3UODxp8c2Xv0ZaDU6OmuQzGRI0QrrUamX8MGRrvYqpe5jhCVkxYdLfhyCkdXBIvf4ehtXAsGw8FSK0haGMOSETiDTfPhqmUbSJUiiRYr9ymrspzckvkMZyTn2RXjODIcQl+4BIFY6ZK+N8r07x+nmM11Lmghk+f5HALvDi9MvUhqGXf1v6faldGHL6Rek3S5uL8TTLLripZqWXv1zoVOvpMxTJFE5LUyyYQhHHEw6PmpSKVfJ5lxff/JIP3n0LwzKJE9CYjIUgn69y//YDDl49ZdA9xzQNTk+75HOSbN4kmy/iRyMaDQtDdem3Wox7E1ZXq5iM2FmrE4y7jNSY1UaN4WRMfXWb0cjDNDJsb2xRzBUwTIMQhZBTbWiW+l0v7kcAxrxIcxqAJoTAlBY68fGGpzTqBlGwDwlMui2SIGKtkSX0HaSRpTdqEQwjlDFi7BXZ2XkXaTpo1ccQIywrwTQdfC9C6SJKWpjSRiUKoUXqtaWj1FtOmMwqIs7lk0RPYyHS9SXFVDO7sG5mGYo1WoIkLalrSAMVJfj+BEsEZPOKcNKlWHCx5RavXrzg+KRLJudiGIqt1c1UEzJNjo4P2djZwTDzuK7LyWHA3tMjNjcjvMmIKIw4bQ344KMhw0mLu/dXKRSLHB4c0u8PkNJmOPKpVlfptIfcvvOQQqEMwuXXn3xKHEywDYdfn7wmlxO8++4DDo6HGG6BQAl+8asvaNTr9AYTDCNLEvnk3AbffP6Kem2DRmOL/lghYoNGrU6lusFgEOKPPfKZPJ1oRG8QsLrjMJnEJAiUjtBKcu/uDpsrFczYQ0chtpPBMCVEAbd310l0htu7mzx78Zh8ReKFB+SyCa3ehI3GJpVSjo3VFYY9hWm4NBpVYq2or9/93j0Hv0lCvESwvnmLYmmN9bVdinaRaNRiPAzIOCF+EvH06WdE2ubnf/8zkhBCP8YPIpyMy5e/+Af+6I/+mLcf3ObV08+wtSafy3P/3kOEVeT5/s9JtEnzeJ+VmovhCkK/y5MnRxyf7/PN06/4V//6f02hvEEQK0IjTRsgDROhFVL7eMMWZ2dPScYHfPXJX7JSNtm91SBrmjQqdQ5eDxn3YmorqxirBZqdERkjhwp9ojDBi3w21lcolW1O97okgYPhuHz46ENavYBREDEcdpmM+oTBhEIuy8lpm3t3d1FRyPnRU/aykmzForKW55PHn+AYO2zWtnCdFZp+H2wLS9ok8Qy6SKmyFAIDiTBSAp6YEiU1SaKI4wQ1hbjSl5EapxGLal6pOjCFI5YpuU433/JYy5RezOjrNPhpdo0WM6nw4jq4kDh82WNpLiIufp/ynekYYqm62Uyqv9DZFVBCXNJmmBrLZynP50KrFovSlVpjCEESR5hGel4cjrFkwsn+K+KVIe0kIRjvc/qyxy9//Sm/83t/QbG2Ta5o0zqPOHr9BEcMCEdNJAn1xgqBP+HktEVtNcNw0uStR29jGR6VfJW4EvHs8VNi5TLotIkmQ6J4yMC3qazvMvBjCkWbTL7A5lYd24IgDjEMe/6QZula9FyzSYPSTC1TSVqolJAiQMWgPHY287SanyNopdCYytI+7+KYq2RzksGoTSYTYNuCiH0mfgWSbSxLkEQdMnYPQ44YTxKGHY1TvMMkmlAtb2AZJpYBYRgirTTbV5TEaCTaBK1SEFQAxtzfV5HGSMyYubwghCg9TWs/Ux2UIElAJRrD1kzGLfqdPcyVhFw2S6W2SxgmtDsnvPvoLTbWtjk/2yfwItZXN1GRolzIkcvkiIchv/rZEY6ZpVi2qFTr/M5P7lIqS+pBiWqlhjcJyWWqGDIHGrK5EmgL35ecN3uUK1s8fvwUoWF9dYvTwy7Dvmb/5YDDva8ZeyNilbDt5Rh2RxwceORzLr/61SGh30cnmihw6RMz7p9yeNqj259gOxm80QhT2OxsrHOoOqw06nR7Y1683EdLhZPNM+yN8MMx//mv/oa376zzRz/+IaPeiIP2PvmChWkn1MoVhh5sb6/y6VcvaOyscv+tbZKkhwjKNKpr2GQ5OzgmGGvi2OD0+IyR56HiITdpN2YKeDFnh6cMh4rzZofbuxuMQx8/CpG+IFMo4EQ+7dMWjtL4gY+rBdmMgSaid/yaf/yr/0BvOMDQGtuwIE5on7fYXN/h4cMxT755gu3ahInF9to75AtZ/vFXP0d4GapmmSdPX/PeR6sYJkgdEycSQyRIGSESn17vmP/w7/97/uSnb/ODH3/EJ//4V5yc7FMtV3ipD1ip1jBcky8/e0W13mDQBn/cYdj10LEiY2c42T9FxyVaJyPUSLC+5tDrDEmkzcvDPZSGeqOGZa7w4tljCgWXk8NDwsAjNW10aTXHRLpKxjIJvC6t5jf09/dJMg1KG7ex3MzMAjqtvSAWnicqzbg0k8akIbGlgQAStajnO4OAZh4j8zxOzEj+QtqUU8VBTxnDLEPsIp4C4mlGz9lv18UIiMXp6fclaT0V2K/REsS8lEx6y1NJeAFppW1G0JfbbN7LM0jvfMZapoxkChst+Mc0oleAStLU1loI7t67QxJEeL0jhN9h//ErRs09vv7s77DzDQzLx9RnVCsGOgowTUlwPoHEYRKHOLZNKW9y7+5tpPLZf3WOjkwsy+aDj37I6VkHIRSmU8QLBfl8jtgf4w9HPHhwn0KlRBh3cLJ1TGmiVWrvmd36Ms+evz3DTKOURYwgQeoI2/Sw6OOYHlQFIy8hCD0c16JatTDlEENCxh6B8jCFjykUQRiivQNQA1zh8e1nf8WDB6tIbeIaJiJ2qBZvYxsDpHbRicSQSaqFYmAKk0RDosOp1jl1a529wSnj0uhpPYVlvDOtfjcz/KcdxHM4KA4E4ThGxxGt8zMGsk8+U6NS8qhVMmytrXN21EZol+PXPbZ2NpGWZLWxxv7eMdlsFtuxEQK2NrdwHZd+twvFMqvVDbKZAjqUtI72KRSKoCFj5JDSppAJCTOC4/1DkjDAlJo4ChkNxrTOAoaDgG4zwgsSdm9vcLwfUqms8ezZc+7cKfL1F59y9842jZUak8mIXKbK8UmLoZfg+1Aql/jm29d89PEHZNwijlum2f6cTNZhMGpj2CbRYITlmKxUy9jSJ459/uEf/oGSm+X3f/IjKuU8/ck5/cmA8QTq9RUePNji1r11er0x0ghZq9RIlMLJZSiUFJVylsODJpZj46BpdztX9ud17eaV10Yhlino9voUGzavTo5plFy6Zx5+FLBqZ0lCTaNcZ8gAPQlBJNhmStjCYYueDhj7EUppXNsgl8vy7JtvcUptarVVSuUmMokp1TbA3mB15y5bbUW722R18y1iZZFogVAxlikRiY8h0+yn0sjSWNnk937/jzk++pzddZO7Dzb5+d/8I5//8gvymQIZN0vWdRlPhkjDIUlshMiQdRr4viYyInAVL5685v7OHSaDCc++bYIYYeayGFkTnYQc7h1QLGTYWKtz/94uz55+TSFjcvfuNsguh8dH1Gp1bGlTbZTYe/0JXzw+oznO8r/63/zvsJwayNR1Vik9x1nRiyR9KTGdwSXpJpJikakzSTRKK1zbntNSrdOEbagFJdXMimClhr3l4vBiCh+paeGX2RULheEikV6Gf7h0jliCpi6bDeaY8owWkE7xCtO5jFVfa1leTgyXStNKK6b4GQI1TWsiSaKQOA7TSOU4plIrkhMKe1Vz/OwQHRucmz5v7ZYR2SyFcoHVapXDlx2GfTg5PWVlJcvJ6Zhy0cUQmq31DDs7JUbjHt+enVMq7nB61uF+IUe2UMCbjFFJkSB2WK9XGY1HrFYcCPc5PWriBfeIhSKfryMSMX82WjEvyzlzxdUIornNSCNEgGSEFD1ymR4ibiKSFnHUp93p4VhDivk8vV4X28rS7zxnc32NJPJ4+mwfaa/RP7fw3TwGE8b9b2idHOPmSpwdjXCLPXYrRVQyIeOugHYJI02CiY51CjslCaZMYU1BgsCYZoGdzluk7zFNrCHQSrOsX4rpuhNCoVWEbcQ4WYmpoejk6GmT8WDIMOgSTUwCL2BrM8ezx0+o5KvYZoZ68Q7RyECamtPXPf7xZ59w1ppQqxa5tbtLIVfg2eNnqESx9+IVa41V4uicUqaMCG0SmS7Cw7Mj9g+OKRZX6HRGZPM5itkMGA7ddp/xYMRb9+7y8nmTTnvAvd0NMpkCr/b20CrBdhr8+tdHaBUyGh/x6GGOjFPgk8/2CcOIoTfGzVgEcZOd3W1OT5tksgFhALVahf3xEX4Q4whBLp8FofAnYxobJd599y2yUnN7cwMZQ7fTwYtGlCpFjs+PGAUBfuDzd3/3K956f41iCQadPhkzjzd4Sd4p4xh5hOFQWVmhZtoMRyNu0m7ufWTZeL5PYa3G2PexsgUOmy1iJRGxgeNW+J//+Z/xt3/z91hGi8nQg9hHxz5JAsVcDqEiZBKgo4g40kyiCVJKzGyBjGuQK2axpcGHP/oBZmmNSq3GD12XTr+Nm3EpVysgTCzTJoo9pBiDttFJBp1kcM1VPnj3DzivSv7hZ/9X8u6If/Ev/5DT/Q7t1oh33/mQw/0D2u0W0nRBuYyHilJhg5cvDzFyWUImWLbDwet9Ik9h22VqjTrt0YBJv00YjikWbLzxOSu1LK69yWo9S6NexTACWt1TMo6NaxXY3ajSarc4P32OIRV/8ed/zkZ9BQ9BTFreVEGKyQqZEvpEpVK9muZJmgb5pDjt1IVVpQFSUhgsB8KBwDCYFnJZNJUkc+ay7AKZpnqWGIacs4KFe+Syv/9Sm2P6swMzQrZM0JZ+0UtMYQmeMi4zl+vgo2sYAmLZHVJPC9dMf5qVgESmye20wjSN1EMJcC1JOO5y9OIzCkxA+1QyirWq5PX5S/r9M2SUIZ9LaJ+O6bV7jHyLfC5LqZBHJWN2NjMI1WMy6gCSx4/3qTbWmPiaUqVIt3uGEDm0aVIsZ0mSNru7Dc47exQrt3GshNPTE7Z2qtjGgkGn2tMshxdpShQBSYonIoXCEAGWGJOzxxwffIqhTjDEkGF/QBIr7jzY5fHXX5LPaCLPY9A54t52jeFozJ2dKpPIJgxPUIlFu/kaKboMhyOUnpDPZ9FGn0H7OYnOoAtj0Dk8X5DJr2CaWbQOcUyTWGriOCCMYhzbmacIn5XJETANIU9tJrNYkHQN6CnD0CADLCsgmHTIZDSJf8zmukvHzHC4P8Au5PDGXXJug+rdO0Rjg5fPTlCxJPDHWBmJsBKkynDv7jbVWgPXNjg+OMK1XUb9McVMhfOjJjo2GIoAx3KgapKomNFwgqkMTATt8yGVYonRcAhmqnXm3AwkCbYhePede9RXN/n622dMJh7SMgmjhHwhT7ejsZwCUWJzvNfEG3tEccxoMsZ2THKeS6lcp9sfM/E1hUKRaq1EHCviJKI3GCONANMA2xYU8jnWGjVE5PHixVPOj5qs1deoNAoc7J+gEs2nv/414xAKpRK1yjpaDJFSEsVpQpdEKZ7vvSKfLaO0ZHV984qQ96Z2Y6YgTUGiIiIV0djeYnt3i9uba3SODyk4WbSS9EeaSuMWleoaBwfHxJGPZVhoFaXZEQ0LUwgcS5LLWAxHA2wZsbFaplTLU67k6Hd7FCo2u482sGyHct3ivtxl//CUbDaP1ikOCSZSmqAkhrBRiQStcByXjJ3l9q3bNE8/4/nLz9leu4Xl5Hj68jMKhRIf/867uG4BkTg8eXxA+2yMYUqq1SLZUhWtPSY5C5TBaCzYvbPNxxvrPHn+Of3eOcWSydZmiUrJ5vj4KVp5eJMI2waBol7dYNQPse0s4ThEqIRcxsIb9em2mmRqualWYCDkdItoPSX05oIAzuMXpvKVUguIgTnqND2QMoKZwW9hJJ7BRQtGwBID0DpNh54k6QY2ZpXB5udOx4K5BiKWKbxYPmEK7sxhohnmnxIEOZPsp31rredMaBbctNxm3lnzJhZzmjM2UkYqhE6rn00ZQ6wTTCMFrsI4LfGKkWDagmzOgVGMUBHBuEf79AUSn2Kxj2l4DFotdGJgygpbm1s0u22OTl/xu7/zNp32Pq4j6fd7lCsNPN8gX6gjDQuNJJPNoJDsHx8wHr7m3u0Gvc4BrlWk0zylfxiTqz2iWB5Tq+ZRzGwgIWLqMitlgBSSOI6xTQel0qy9pvQJJ2ecdZ4ThU1sO6TX6VIurRMmYw73TrCkRb97ikg0K+UShlJkbZu+F3J09JxybQ3bdBDmhG63RbmSOm94sWR1JUc+bzLojxn2jwgiF8uu4qo8o+6QTK6AYVmQCAwgTsIUMRIGhmETRTGmaafxNzJ1ljCkhdBi6l2tMKZasNIxtq2JwyamecLB4VeYaki14KKiAYWC4PjkM6QeMewLEquCoUvsv2hSrzWYeJp7G9tMwgEfvPsjzrotxoMBZByiICaJYvZfH1IplpBaMBp4SK3Z3ihTKa3w7PkTwtDHsR1yuQzVskm7dYqdz7DaWGf/9QGlYgGlAipVi0wWnjz7NcIQFIom42CEYZh0+z3KtSLV+gqxhrEXMPYi0JAr1BiOenz0w3e5fXuL83/4e4LYJ6MyZN0c1VqRZruAUoLhcEShkEEqyfb6FpZhE0c+hWIJEpP19S2Gkx4nZy2kk+Gdd94iETavDk/48rPn/MW/+F3GozNWVurYIoM/iBkVY8qlGoVihWK5zOD54LuJ/LTdmClEMsFwstgZmz/50z9ldW2Vs4N9Em3x8N0f0GkN8JXg6etD/GGXsR8ikoR8PgOJiRSSMIwI/RCZBGQMjak15ydHZCqvGEUJzbMDwjDi3/yb/453D9/lD/7gT6jWN0m0SaVWRAobKY05M0imKncYhWRsAxEN6HRf8/nnP2ejnkXWNjg/fczIOyNSFqYLp+19lPBx3RwGGbIFh5fPz9FCIoyEwbDNo0e30HGRl88PCYchgfY5Pj9hpbHKo3fv8+WXf0+v30YpweZWDZ1MKBYyBL6HF2ZQyuL0pI3jJOTzFdYaWzzfP+Nwf5+Xh/+Wj3/vv+HOg0f4Xoww7NQ1T5hzx5OFEL4EwGgQxjRNxhyHViilUFqhlUIpEFLOIaIZzi9nkrSYVZtLK4nNGIZhGKk9RM8qjM1ShqSaSKpk6DkENTNYzyqRzaZ4oZKduKQhTO0lAKaUaYI6sSDw1+XYkdOuLra0M03KeRLS+QuZStmJ1qgkxjQkUqQFclLf/jRAzTQkK2sbJN0EGdpYlkXGURhmgpPXhH6HVvOU5lnAxDcwAk2hWCFOeuTyLv5giMagUKrT7CpqjVt0+2OaTw8x5IhSIY8wNIHfJRiPaRU0g76JFkV6kyyZUo6KhLzrpKnZNZi2iesqVBJiypjzs31qtTxJHKJiA9u0sSSIuE/kn2MbPlbWQCcCRAatbGJ/RKgVYRDhOg4b62WEmjAejWiedhkGYFo27d4RmVwGRIQf+HS7Y0aex0c/+XOGE5P910dYdpYgnFCqbpPPuwT+kFKxQCaj6fZPkUYaVCriiNif4PsRuXyFbKYAJCQq1UyFTD2RpJYopdPsqCpV6Uyp8ccDCoWY0+NndDqfQ9BnJf+QfveYbu81sW7y4fsPkJ6Na9lMBiFvv32bk8MeuWyB46Mmt+9t0R60aJ91qK/WaJ01aTRqoBIqpRJxqKlVKuxsNXj94gjTtjk5PWM0HhMnEXfv30Jpzdvv3ufJ06dooThvtnDdLFJKRkOPe/fvcHJ2Rm01x+07d8hkq/z9z78ijhSnJ01GozEqOcfzImzLwfNjwigk6A6orRT46Ic/ot0+wcnarK+vsffqgHv3HvL1V99iO5Kz0yGGNAgmAR/88B0+eOdDLNNjZW2V8bBPWI04b3YYeyGOm8dPNFHos7m7weFRk3tvPaTglPnm889ouX1u79zFkVkq5Qpaa3xvDCh6ndaNaP2NmcLE0EQkJJ7Hz//h5xRyOT775S/54N13+eLrp+wdnPL85R6mISlmBCuNCqOORxxOEEph2i46TojDBAuD0E/zqlvC4OnXj8m3xmzcuoPvBxwcPufLX/6Mw5dP+ZN//q948OiHNGoVosRIJTKtp3l/HJAKU4YYxoTh8AAdn/HTHz9i2HlOwV7hYC/m4PiEDz78MV9/8xI3nyWWmtPmKbaRZaW8gZM16A5HZPNVumOf3uAMHcVoEfP2e+9QXt2l2R0x9kd8/fg5o0lIs3nEB+/fojfsUy1naHfamIZJvlAjjB3sjEGv12Yw0IRJQLlcp7KxzSgpsbO1CpGPUBrLtBCmMYV15ia7KflbxmLmGXtSHqGnf0uBJI1MXTZWzmwFGs0FByUhMYzUNz5lDGKqoZDWa5i6Hs1cJaf8YO79KbhYYSutPDazVbDAlMUSXj6FxOQUSlAqQUvJLMXHTGO4mjrhKoC1pJBMtS3mjENNU5sYcsowkgS0wpQGCIV0XEwBEJG3JGroIO0sg2GXw/Nn3H93k3brHKUTtAy59/Ahp+2Qo5MzHNvk1esmpvJQScQkdnDzd6itPETYbR4/PaaQtZmMEkzTw0DhZgvk8ys8e/aCkddGyxoPSg/I2QqZ9Hj6zVc8fOs+wTjBMSEM+jh5iS2OMZTNcNBByizZYo2skaPV3adesxmPYiw3x9FBj4lv8/LwFYYhqZY2KBfruHYOwwqxDYvPv/2WXmuMlc1h5vKUikXeevs2h3t7iHCNXsdnMDF5/fIM016j2xVk8zb5UoXReEKiepyf9dnaXCOOJIaImIzOCSdmqsnnixwNz3EtizgMsK0ciQYhXYQwiaMQ23RTl8o4rcSGViRhQOx7TPQAFYVkLQPHziASH9eCUimLsKt8+tk3bFV2KGUUJ/t9NlbvsbFdRyVpjNKvfvUZkQ75wUc/RMqYguOCSBAyoVYr8fLZEY2VVZrtDsPxgLyXZXtnk8PzPQxXY2YMvGBCoZTl/R88Yv+oydFpC6kltmlTqVVo97sUK2X6gxF///Nf0umMCQOJ54Ws1AoIIeh2+gSBYLWRRykPpQV2xqC+1uDpiyc0z485Pj3h+PQY23CQEvqDPq12C8e18SchEsGTb55RzTv8+EdvczJpkUQBo4lPuVbn+a9f8+EP3ufV3gt2bm9SKlc5Xsvz9t3byETy4PZD9vcO8UcRGzt1Dl9/iWO5FNcKlIp53nn44Ea0/sZMwZeSQrXCYBzy6199go5iXMPgqy++5uXrYzrDCeVSibVGhX/+xz/lyZe/4OB5TL91glQCqWOCOERH4MeCnO2SJCGJBgyTcr7KP/+Tv+Cbx4959ewxbs7G1ZLHn31JLtdgfec+Wis0CoVKQyQlRKGPJROkEWOYEX/3j3/Fo7sF1soxVtbin/3p79EfhQyGCU6mwGDkMwkGSC05PTnm9u4jpNXCcBTZkk11cxff62KbNiuNAs1On9J6hr2DPQqFDE+fHfKHf/g+zVOLly/32dkqs7lWZzwYYdsOewendPsRWuT48Y9/n09/9Rnd3oCtu4+or9bYqe6w9+opOzsPcd08hiFQsUJgzN1AF8V1ZhDQkkTNgkBr5n9MiejiXCkFwlxI8Urp6T9Fkixgm5QoQ5g668+DqWbXzYuvzOMhFt5LQohpaP6ibzWVBDViaoxMXRbnMdA6ZTWJVnOtJs1+frXIo1i4Ui2OzRjjko+VRJAkCpUkqY3K0ERRjFZJyvCYPkotiDAxnQLKNJFJDHYWN+OyulonnEiybhVtjwjiADPjcdrex81X2Vjd5LNf/CN5J6RYdMmUVzg+GeBFfbKZCpXKBsH4mOHIx7YSRGwgbIeDgy5jz8C082C4rKzk0XGL86Mv8AcD/vN//DTNhRT02dmsUHt3F0vvEQx8HGLiqMDZ4RHZW/fIuQnDXhMtAg4O98lnaxw+fU4un2F7Z5VC3iWJ+wgRY7uSF8+eoWTE1u110Abr2xscnBwz7J5gELHZWCMaNbl7+13s/CYHZxGZ7DqW47B3cMbW9h1K5QqmdHn54lsqFYOVeo44DFHCJOMUGQ7aFHIW6C7tZo9srorr5jEzaximix+FKJUgE4kpBYY0SKIIpWK01gy6Yyr5NdZK0Dt/zqg/YtTvgZlgu1nu332Aox2iSUIYe/T6TT768CecnfV4/fqQ81YTN+PSPG3hOKllY39/n2o1z0cffUCxUMM0HcqlBrfu3OHwaJ9JOOT+o3uYlmY47hPEAWIssR0Tw5W8/+H7vHrxCn8SMhp7jEYBWg9odvocHrWIwpDtzTVW6zVW6iucnTWJwhjXkZimyQ9+8DGnZ+d0x23K1QJ+6LN9a4tYeQz6A87P2vy//sd/CwoMQxL4IbZpY6CJgpi//9tPaJ0e8NGHj8hmMnhRwDA4Rdo2mWyO3d1tjk72WVkpcffWOl999gkKh53bm0STBKkthv0RQsHd27f5+puvWFtfZX1z/Ua0/sZMwchtgJXng4/u8eUXX5JzMlSKRYqlIncf3qfSWCWfdanksmhvxMbqKpbXoSMjbEPQ7U8YDf20elkcAyYqjrDcLFaxyuat+8hMkdraDm89+pjm2Stu3Xkf4eQ4O++wcVtim2mNAT11zVQ6wXFNpIqJoxGWjKiVLb758u+wHlZJwjYb27sMBj2evzhjNBE4boHV1TUc0+YH76/S74wZDkdEUUQulwFjQi6XQyYGcWSTzeVBWjiOy8nhHgLF3utDtFI4mRqdnscnnz7hwb0dhqMhYaIplkvsHzX5m5/9NeVSAyebpz8YcvCLv2fzbpdnLyZ0WkM++sHvoB2JaVlEiVhEH081oWRO9qZY/UwsnrqWziD/mZSdEmw5D/LSeqo9iCnBlwLTmGVKXcjgSisMacyJf6oZqLkmMWc2ImXEM0hICOYBdYYhMU05ZxBSijlB1koTRxFxFE/TJluAAgWz8gALV9jFPc0gq2UMSalF2mtN6q2V6kMJhiEwjTRqWQCWZQKaOErmzA+RZg0VQmNm8iRasL6xwejFKa2zPnfuNTAsF+kIeqMQQ0ha531KuTVsp0wub1BbLdObJDiZEm5uhWJxndKgh8pkGLZO6HVP0VpjF8o0Wz0inaFeXSWIQs7PXuB0jmk2e6jEoFh0+eTTr9lYz/Nw512MxGOtNmHv4AnZfAGhqsQhHB94uBkX00rTU5QLdeJIc+f2LTLZCCk94mjCeNikUBS0OwN2bm3xzegbamslirkCpyfHOGaIJT2CcZtiJcedO3cZTiQqMel2e2RLAftH++QLGSbDJl0TXr18BSrk1PNIogLZrM3KygoHe19SLJaxnRxWzkRFHXSkmcQTTCxyFQfLMjANiYpn3nEJhmGQzeawREjO2sIbJ/SGmnpxh0H3FSqJ0rgcTxBKwfn5CflMkXsP7xB6gpf7z0kik6cvnqB0wg9/9DG9ThvTsoiTgJ2dWwipKJYrNDufcfv2XbKFDIZrsaLrJEmIm3dotc8YjQPOmmesrWnc2CWKNZ9++jntZp9SoUBkweHROaVKAymzNBpbaB0w8Ub80R//Loaw2FzdZHdzwM9+9gvarSaTsYcwDIq1Ei9fvuT1a0WxkE9jZ0yTamWN9fUNXr16xXsfPORw75BOs00UBAgSbMti4kO1ssag2+PsrEVttcJ6Y41+r4+VMbBsm0FvQORFrFZWWV3bRDgGX3pfkc8XEdLgw48/pnV+hm1bOLZFxrF/u0zhL/7b/y1bW1UsM+J3fvp7fPHpl7z96BGP3rtHtlTAzhioKCQZB4SdAcH5Of1YUjYtKtUSg4FPLG2cYg6l+0hDpeUwDZO13TtsP3xEK4hYf3APp1Lm008/QRQ2Wd3cQFiCcTQhm88ShQGGdEkiMAwDFUxwzRBvdEI4eMmdnRwrbpXY65BxwDBC+r0TbFOyvb7Jzu4Dms0mx4dH/PC9j4m8fRw7Ry6T4dnTl2hziCE1pnLJ5bcYehPanSa5AgzafQo5zcHeK3Z2NygUqzhWQru5z9/87HOKpSxursBgPJziq/8/1v7rSdI+u+8DP4936V153/7t7te/4w0AggA9JYKkGCIv9mJ37/QH7MX+A3uvq1XEhhQKSqRIkEEChAgIjpjBzOtN++rq8pXeP97uxZNZ3QNs7M5GTEZ0VFa6yqrqOud3ztcZ+BEUaytcXHYJk4T++THNyg6SEPIv/5f/kX/0T/85RrGxrIQISYok5EE8aSYuPND+Km/g9YQgXOsEMjIJssU6Z4kd5PU0/Su7+UVFXypkBSlf8SyKrbig9CxB7KUgbjmZJMsJJssgya6bgnBNA0oWHPbXgLCm5f8xsyxvIkLyOhUty3KGVJymSKK4UHELXB/x3/ju5WtHz8VUQkYSx7k+QJZI4pAkifIV2aJpibIIgowoxEiCgCJEaKpCHKQomkoQBFi6ThSFpEnM3HYRxRK98y7uAGJPJAkEHr7zHo+//ALDTlAtHUux2Nu/Q+CYpMk5shSTCWNs30WRZbo9F6vUpFEuMRpNqdY09rfL9HttJv1XWIUC9w4ecnd7nyff/Dm3tg+Q0mfM7REIY3R9FVHTkASJNJsR+BH2HAxDJU0UDDVFwUdRHeJ4ThrGaErIxekFspKw+cFD9g528b0551f9fD2zoIF6/oTBVGcwHJJkKxSiMqalIIkDKkUfARs59pkNrrh7sMrcFvADiflsRhQJKHJMGtsIqUJRN5FSm6Ke4dlDrrpT1nY1ZLlAsVzB9T1EUQFEoijMPaeSDEVUCWIZQa6ysvkWanaFZ18hCym6oXLVu+IqjNhcW0U1dOzQptnaxJ2DnOisrK9yeXWKWVQ5fDUm7Pk8eHgPWc64urrADSJaa6uYBQtZl5BMFclWSaKEqeNhexFnlwM2N9cRBJHLyyFX52MKZoN6tYGmaQiImMUCkiyiITAaj9B0mVJdYzwdUy82ef70kKPDU+yphx+mBP4MUZFRLQ3PjmnUy7TPJkRBCMgUKwVWVlTu3n/IdDDBcTz82KFYKVKyVHY3Vnjv/ttEYcrzx6945+E9tndXeHn2AiHLuDzv0B+NmQ4jpn2bDx5+m9OXJ0gWPHjrHpqhMBiPGDsz+p0Om2urVCplhv1fMaZwdnyCM+1x//4NNEXlrbfu0+2P+Jf/8j+gmAL3Ht7kux++S7VcpDuc8sf/+Q+ZX7zkYLPG8ckZl50BYKEoCpmpkwp5OE6SxkRxxO7uLmM/YG19hWajgV4o0e0P0awyVslElvMQbVmQkAUQpAwxm6NqAVI2ZjI/YT56Rc0KaDardE677GzvYNsumqpTLJoUCmU8z+f5sxeMhgM+/fRzpiMbRVGZTvuUmjUevv8BaytNnj16hW1LlNUSs/mYwfCKOJ5zcHDA4Ytj0kRgd+cOp68OIamgKxJZDKpqMhh20VSdcrmGIJkYZgVdc9BFgX63Tyqm1Bpb/J3f+huosoIiCTmmkC53+flKZBmw8pouKvzi5+QTRbZYMb2mdb5GJfIz9LKJ/OKGPg9AWax13vh613KkbDmh5NNFKry2NwBy6zTpdYjQEhOQRPGaZbJ8QpomiMLraWL51V5PAvmEk6+2lu9wSaN9bQGxzEEQxRwbiRfMI1EQkcT8e5UkESFLyZ1AMyREUiGfXiRkhEwkS0UkycQNUiYzh0K5TCKKHB93ECWR8WSKLJXRdbAdl6+++Iat7U2SUOTmjTs4qcvLYwdDT6kWi7QvFLrtEXESUKuXWdvYxHU0xuMZmq5SMBXicEq5lKGICqevZmTZlHv3foQ9sXGmFrPJGY4/RdJkfNulH/fQVZEw0lAUBVk28P2Q8cSmXNIoFQ2iKMYwBE7POiiyhGPbKLKGpsJkOENMRWajKX6Q0XZtyGJsO2J76waIJkcnzwkin0KlgqlDqaSjyBIrzVUuzi4hk1FSGyVLKVVqJEGEqkT0u1eUigWmkwFpEiOJYySlQBBFbG83SfEYdI6olh6gCRkiMkkSkUUBk7mHJgtUq5BkE0wtJYvnRMGYcllGiCzmsx4kIXdv3sLQLCRJotlocnHRJ0stFBHavR61ZhMv8Hnw8C2ePXvGcDSi0ajgej5HRyfUag0m0znD4RSzWKB9eYFhavl7SWJ+/UffxfM8RqMpW2sFIk+ifemhGymKotBq1SjXSsxmLo8fn6IoGd/+7odM51M++/I5G80h4+kML8ingIIq5kFfokDBKEGSMZ/MUGUR38kQRIHZzOfrr58xHPUxNI1SoYBVKEEWcOvmbTaaDc7PTnj02Qt++zd+jXffe8DZxRFBkOD2hpRqNcbTOfV6E3sc8/Gnn/Phhw8YTNvIEgSBx/bWBkGYIAows23W19bo9Lq/VK3/pZvCpHtO6pn8q68+xvNdgigmiBLMkoVqyhRKOj/84H3kLOXf/6v/lcvjYywhZjSZ4cchqlrA9TNURUUwdLLIB0XEsHSc+ZTLk1cUWqsISYhmaDQ3WqSaQrVWQ1ZkFEUmCgJMVSaNfMpqRuyekYVjXr78nILu0738Ct/KUISYdneEqhj4icdk4mAVqjiOz1dfvcD3fUzD4uqqTRqKGHoDURSxCkV6vRHnZxckAYhCmc3tdZ6fXqHIGXJJ4+jVM0CiUCjy7OkR5UIFU5cY9Tw2NprcPNjCczNcN2E+8/NTimQQx3mJ0xWTqT3n7Xs3aKxt4CUKfuQgicWFyEzMT/+AuFgLXZ+4lwdxuL79F1DXv3ZZbt9fg8C/eFmIouB6gX+NF79R5ZcaB+F6AuH19LIwdZPE16ufJMvpofltGSSQJK/DJ7M0yZO34pwGK4pCztmOYoQFm0iSxGtGriAL19/r0tYjSVLCMCQTQBIhSWKiBZApikCyWH8J+ZpNFHO7D0mQEFMQBBVEF00vo+gWbjDl7OwCQdLx3IwgMilVqmxsFrC9V8ztiMnAJvI8eu02GwdrVMshr15+wp1bH+J550yn53nCnyoxGA9IoxK37tzm+YvHWIaMKHokURtJGOPax9QaJUqlCZdnz9jcMlDUADXOvYjKxTLjicNkdIlhNoljlW63TbW6QmulgSgEBOEMRUmwXZsgShBEkcFgzO7OJsQetXKN87MxaZjR64xJMJBlkfF4QhhqxEmGICSsrhao1QM6V5dMBy4HewfMhl00AXSjznRwSX8QkGY9vDAmVWxW1xskSYymqYxHY6q1Jv3uJbJm0TsdIMo91tdvMe6eIwpFRMnENArohkEiZwj4wAikHn7kMBkdYzKhacZsrjc4iydsrq1QLZawpwHtYRsSkeloRqvVQBB0NN2i0x2iaiqT0Yj799/iq6+/JAjWGA4n2POY1dUtTKOCJEk8fvKI0Pdo1svUG6vMZlNKBYv5ZI4iyshS/nOPVyTOTtvsH2yREaOqAqLo8bd++338IKS1VuHVx6cESczhyRnTgYvvxlTLFSRZYTZ30TWN45cnWKaCH/hYlpH7bMUCBa3A1cUViiYjiRppBvOZh6ZkWKaJqih0r64wNJX9vS0q1QphtsVZt83KRo3BpM90biOKPab2iMAOKZQMGuv7aLpBvbXO6Xmbo6NjVlbXGI/GRFlGud74pWr9L09JdcZMozmjXhdZU7EKRVqlEoVykVv3DvjBD79DUdcIhyPcYR85jajVK0RJwHjmoxYqFAwZccGiCeOQ0PcomQUS1+Xpl1/w7V/7NU6PT7j7zl0MSWOrtEqWiSRJiigK6KqEnPnoWkhgX/L003+Drnq0L55jGinNqkTRMoj8iI3NPWJETMvgRnWTw8M+QZBimhaWVcSZz+j3R2yt7RJ4ICsK05lNLElEoYc3C5GEmKPTPyOTNaLMQUynzGczTLOK77sUrSqqqnLa7xNHMb4f8OWXjwiCiI31PQrFFp99/hjbyQj8lDTLqFQr9Ptn/O6//p/YvfmQg3sf0lzfJxGX3P7F2iPLFllqeTHMGZ3Za6BVgLxqvrFeEZZlfnnJkWtxUbz/av5ubh4n/gJse/2IN3rMUtuQLE7foiAiSgtrjQXPftlDsiwXpknCGzYVIqhSrn/I0iy3bsgyBBEUZelwmqFpyhu0WUivk5Vftz6EhRiPDFVTFlusjEwS82mEjCyNci3E9foqzs3dEBAzGSHN9QwCCrKis7G9w/NnZ1glk0HPA6GM7/toYYyoJGxt1jE1k157jiYL2LMhmtpitaVzdvaK0UhnZ0fBs8GZRgiKiBelaKrOyckJWRrj2nMEoYOpr6EVMn78w322djZx5sfsbBc5Pe4QhmAVLGRFQQoFRnGOwdj2hDD0CEKFMLLodT2qNR1NEcmyCMebUyiY6JpGvVHHmdtsrDYZdAYUVIvq3g2C6ITu2GEyc9BVg/7ApdVq0KiZFMsKs8kFzZpEEgn02odkiYSp1wl8CcdOCIIUzdBotCr0Jh7TscPG+ibtdhdJUpiedXD8EN2KECUVU4XhqEMYQNFaw7Wn7O/vk6UJceRgmBAEE0RcwmhGGnsUSgZR4JBGNpEboOkal6cXiJnBpD/F1MbUKy0c22U4GnJ+fkFCgngl0GqUKBRNtra2qNcbOLbPxsYmIgoff/Ip7733Hg/fuo/rTDk8ek65aECaMu4PuTg5Yz4PydKMg9tvMZ6+pN6wqFSKfPPNV9RqBTY317FMMAyZ4bBNEEYoKrhOgmrJmAWL6dClXCojiiLzmZPT5QWQFRnX9ckSgcAPiSMb06zgOzNcwceeTzE0iZVanYJmIgsiuQgXSQABAABJREFUP/zB97k8PsNzbV4eHRJkMaPphKk/ptIo01ppocgKq+sbdK+6iCo0aiXiBL749BPOL3usbuwwHk2QFJWZ65Ik0S9V63/pprC+VmEwGJMmMVEo8v7du/zm3/pNVENGt1RURSLzPIbn5+hZRFlXKJgmp+0JqazjBgmyJBH7AYIokIoZkqYgySJiFnP0+BGu51JerXPrrX1ECURFIIwiVEWBMEaTU6TEhnjAuPcZK1WHRkWiVqjz+MlXiKmKlJY5OW1zftLn/fc/ot6skwkKWTYkDEIcx2V1NWcODDpjPC8g9HJl73Q6Z+S4GIZGQS/TvRqTCRqea1Oq6Mxtj62NTTJRpd9rM5T6xGFA2TJ45/0DVlaafPLFx4iSSq83xLYhjlJ8N6BebbC5tUWtUcW1J4z6FwiSymDi8A//8T5plhveLU/Huc891yfjHEBOc/A4XVJSl1zRRRpxlhuqCYswlmzx8dqn/3pgyIv8L6iPFx3oFxXJr0HmfMUv8lpOly6/NOmbFFRxWcDTReFdVvR8pZM/9zXWkcb5pKHIUn5gyJb6CTFfpS3WUNeivSQlTuLrHIAojhEEkIRcvJglEUkaI4sgSPljkiSfTMLAQxBSVNlAFsD3fCaTMePRkGazDjh4TsxVe0yx3KRUNpnbXUTRI/RHNKtFgmBMGs+QMofQG7K+UkcU2mTZkJUVgW4cMXUTsqwIgogfOMSxi2/3uH2zgKFGVEsyq40DdFNH1yR8P2Bzq0EYBhSKKoqiIfsZxaKOG0ooqUq5XKXXd9F1EV2X6HZPeTlrs9KqoGgytuOwsZafCn1njpCGyMTc2N9iPBrguhMKRYtmq06p2CAKBR49egpZxq1bB4iSwGziEIUBs8mMYqHKbD7C9cfM5im7B29xdtmmrCRIis547FIuZ4SRipzpBGFCmsl0+nNW1tYxy1XSRMEqFxj0+jjTiEq5QL1WIAhnCGLKbNoljUYYlkKt2EIVPEJvwFefP2Zjo46ulvAlF2KFVmONklnBcwLMQoOL85f86Ec/5Onzx5TKGtvb67kIrVZlMp7QaLQoFir4fsSD+28zGk4oFRQuLo5RZRnf83n25BlraxuIosx4MKBUKnP86hTTLKAbKvbCFkJVFO7cvsFsNmMyGrO9ucmX35wTBBG2bVOwCmxvbuM3Q9qXfTzPRVd1REFmd3eb8XTAeDpjY3OTNFW4uOgQeA7VWoXpdIyhKzRqJe7fucu7D99me30Ffz7j6uSUducSUVcQVJnNrTWG4z4ffvghzw+f8fmnn/L3/u7fx3d9hDSk2+sShgmlYontTQNB1jjpX3B+dYFZMAnjX3Ec58XFCbYT4Ho+tWYJPwzQDIWN7VbOREljOoeHfPGTP8MZtGmWDGbTEWGa4SYpsprbAGdpSiomxFGMrEgLSqkMccTJk0eoVwXaZz+ivr9FFMc5jU0SERHIAg/PbmOPvmbY+xwlPKFWbDIdXdCsl0GQODk5Z2V1m62tWzx/fshV+4zbd+5z7+5bPH9xQSZK+IGLripUq1WG3TmTyQQyEUGUkGQNL4hxpkOKhQpz20dTMmrVIppUJwhDXN9mbWOdIPTodYbIusBV9zn3Hmxw++4NHj96uVBez2g0GpTKdSRF4cWLZzhf2WhKkouUOue8s3OLJPYQZXlxmM9QZAE/CBFl5Y2D/dLQIrc/SJfNIHsjp1kQ8ulBEK6prG9iANfzgPC62C9L+FIJ/QsX4Reft3TBBK4dT0Xh9WoHYTmn5M0r/xL5emgZL5pPFNkb7KkFy2j5XSyboJi7Zy7f73J1lKYJmqougOYUXXmtAE/IcwdkUUIUl4lsIpIkEcUeQejx5NEz3r73AL1coFiA2biDkO7he3N0BZJkzuZGE0GRkeSIcsnE0kViO0ARVD746Mc4wQWT0Rk39nZ4dTxASH3CoI0kuVSqAn4sICQy9myMaUqEYoQqidy8ucbGehXXviSJFdx5xGhgI0oRc3tMq9Wi3qxy9OoC10mRZJ3UT5hPp4hSxs7OLY6OjtE0kTSds7e7QZo6VKoFFDlj0LuiWikyjnzIYuaOzWA4IPBmaGpGqWHiehGimCHJCkkqYs8dvvnmCMs0SJKY5soqQZTiDyPq9Rpx6lGoFOgMrvBij/Zhh0plN880DgQCX6I/m3Pe7nHn/gPeuv82qSAymjr0epfc2jPx/QhdV7FnFxTMCo2ajOsOWa2W6Vy0GYz7VMs6ceIg+imaWuTo8JJy06W1sknopJTMIu2rLlvbB3z15TfMp1Nce061UkLVZFqtJoah5ymKcYppGDhOwGg04vz8jE6nx91b22xsNqlWK2iagihI+F7MoGvTbLaIIjCNAm6ccvfufVrNItPJgFajRfeqj2mY6IrF5Wmbvc0djg/PsQyTrc01WqsN3LlPp92hWi3hOi5hHHHZuSRNYxRV4+qqg+dFZGmKqmRYekYSKsRhzEqtxs29PSzVIAtTZFHh/v0HVMol1ILKzJshKxLlynucXpzgzqbc2N/j2dNHNOoNWs06WSYxd6eUiwXiKARB4d7te6xvbFCuFwkj75eq9b90UxiMh6SZysGtu9y+94Ct/TUycZHvmiaIWcbZ4XMuXz5DSTxKBYvxzCMTBeJUQBElyFIUUSKOkvwHo2rIAohRiIVBGoSk7pyf/skf8zsH/ydEQSDKUtIwJEtlQnvGfHTFbPSKWi0lmEQ8f/mUTnvI5tYBz18cs7t3A1nWmNs2Bzf3GPb6XF52sAoppqVjFix0XeP0+IRWdY1ms8F4kIOLt27dIlZCOp02E3/MeDzCdX30os540EGWYaW1SnN1lV6vx9bWHoqcIGQ+khjz/MUjZnaMYah0exMKBZUbN2/gBzHPXjwlThPiKF+bgUiaiHS7l4xHPdY2i3hejKlLeK6PZeiEyQIqzmlA14SflGyxGuEaC8iW9ZXXj10W/vSa57mo7bzOf16uav66I+lfvyzkBPlrLBiqgrB8vaWVxi/mTy/pq9fuSgvAO/1rL7744tdihZxNlCfOsVBPv2Y6ZWmKvMAe0iTOqbGigCDKCKSkaZTHbS5MkWQJZDnBcTp8+eWAv/+3fpPYmbK5YtE7O2Jnp0BqlpjUx5xcnOGFInv7txkMZ9y9eZPMieheXdBqSpy159QsmdCboCsxlVLeeC4uZoRBzEpzA8czCNwRlXKRt999jxdPP8GwQibTS1QpZj6zkWWVar3EydkhGTGyptEfjRmNZxQKDUYjl1p1k17vHN3I8oD7LGE+n7G/1+Lo6CmtVolms4CIz0qzRLVcoVNWsWdjbt/dwzBknDmkcoag6EzHI152DlnbOABRpFRu4nshg4GHJMvEmZ2z0TJ4/uKEQqFIoawQhB6qBKE3xVNsGtUNBoMJve6ELNO5e/c9VL3A5dWYSr1OHAuossrF2RHh3KZkaLjDlLp5iyARsXQRe9JmpawjxFPkLMSdT5n2rggDgbWNTUqNGrYTstrcWKijRYbDAZDizG3OT4+5cWubTIiZTiaMhhN0w+Li4ph+b8rDh29jWRatVgvLMlnfXEUQU2q1OnGcIAgKtu1i2wFRCJKk0x9Mefj+fYbDAdtbqzx4611USWQ0HHNxfIYq66Qo6Cj8+ve+SwrcuHOHz794TLFs0lhp4M5dytUCc3tOuVwgyWJcN8X1Hcpli9CfU69q/It/8fe4OO/z8sU522tNHty+Q8k0CVwfw1CpVmsEcUCtZBBkEwQho1jUKBgqRcvAMAy6vR6VSoHPvvyc7e1tLKvM2toWF3Gfq8seGzubTGdTfNfl8dOv/38Xev7/Ea+FCa21Fu99+F02d3coVU3KlQqQIIswH085evYETUgo1Ur0u20CPyBAA9nED0OELEaSVNI4JluEyohZhgokvodMShQkPP/6EaOrAc31JmQJSHnA9VVvxE//9P/gh98tcXV1iCLaGMUi9nGPk7M+xdI6lxdjVtdbGKbG7u4GIimOnS5OtZCQ2wwfHOzhzkImzhxFkfCijOFoxCyYUK3XEGOJ9mkbRRYJfJdq3cT3bcaTAZKmIMsyllWk2VhlPukznfZ5MjxEkBVMo4JhKBRLBsNhlyyT0XWF/nCCqkj4vothFkj8kKOXL8j+9z/gH/6jf0YQZFyeTdnb2YSMPJWK3MSOpYL52hBuWYCXe5U8nem68L/hPvemAvkXC/9rHcAvog3Le//K5Q3AYXk1L/hvWmG/nk6umwKva72weOBfiwMWXqenAW+wjfI3IpAzs9I0I4mSa+pqmkvBc/xl8brL6eUamxBAlCVGowmVisLJyyd88knK7koZQwkpW2W86YRC2cBUJSolGTPVsOdTdLmAKljUyzXKOiiyR6GQcfjqKUliYlqrkBqE/oQ0mXPv3h2SrMnpmcNkHLCzu4JhzSlVQqKkRxgbVCtVQj+m2axju3PmjsfqRgMvjPC9DEEyOT3rIYkmaTZD1wxCP+Tq6irHOnSJ2WyCYWjM5xOioIKpC8hSTBjZWJZCEsnEqU8maIRZgCQpCKlG56KPYpYYj8dYhQLzeUicQopMpVTH8x3iLEYRJXRFw55MCTyf9c11zs7PUcUUSUgYjbqkiYppmJyc9pjMQ2699QBUBdMqEUYBs9EQKQmYds9ISxq39zcQ/RMUSaN/1ePZs8/QdYH6SgvHjjl+8YTYHbG2UqHeWmfi2KhGkeevjlhfXeHi8oLAj9nbO0DXTAxTx9RkSuUSSAonJ5ckicjhi1e8//4HTKdTtra2mE4nlEoW5UoJ09RJMgijFFkxqdY0JpOQfm/CZGKjFXT+8I/+lBsH28ynDqGfMZ7PuHvrPubdIp7j0273uHHjgJ9+/DPWt7cxigUa9RKFYonJeMKzfh/bifln/+If8PWjJzx9ekQcikRxwOZaEyFR+ef//DfZ22tw98Yub928yWq9Sa1U5vTVEaIgUKuXOb28YDQdkkibSLJHGIZcRA6B57LeapIBmipTLhc5uHGAGySIismzF6+QMwPXDfjmi29IhABhGvPtDz/8/1LhX19+eUxha4+t3ZucXlzw6vycO/dvUK4qVIqN/C8+zSiaOtMswTQ1fN/L3SnjlDiLIU4oGXnsoSbLJIlCLmdOyZIETdEIo4A4TpgPR3TP22xsbpAQk4kyaSpwcOMWWfA9kvgRimwS+HMMXeP2vXc4PupjGDV2dlp0e+eE0QzXnRH5GfY8xvdnyGqBRmsFSBmNxkiZimPPAQVBFDk+PiVTE1RNw9A1imWLq4szzKKOock0GqtMJjP63Q7j8YyrizYFyySNYoTMIvBDgtgjDGV2d27Q7U3J8CmX6rjOFFUC35uTxCH9fg+ztM7a+hY3D24wG4/57//7/yff/fa32N1ooWpajgkI+ZooXQCluTn0L0DJXK+BlinvizHhmih0/ejlLl8AUoQFkCAKQp6h+8YGaclyeuOWRRBPXuaXvkRLrfKb66fr3iEsfTN5I/ntNUbw5mXpzbQ0vMtN+rI31kmLqSZLURUZUVw8ZrEuEyUZ0sWEsGiSkiiSkV5jFWurTR59819QFY+vv/gTOiWD7sUr9tZU1JKIM0spWkXWZYUw0zh80cdQNLrtIfVKicvzExBrFEsqxaLJcBgxHQ9RNQHTMFEVmdF0gO/7eD6UygKTyQkrq1Vu3qxSq0o0qgpSJiHJAoNxl2LJolYvAiK2HRD4At3ulPbVhFJRQJqmmFYNSVIJohRN19ndWWHQf4XjzJlNB+zvrKBrCoN+G0XRUFWVbrdDkpaJYoMw9HHsjPbFiCxTaV8NySQbUTZRlAKCIFKr5uH0jWqFYX/A3LaRRIlSscR4Mubq9Jwsjoj9iMKqwsXlCdNpwq2Dt9nc3CATDZ48fgKyQL/f5qP334VWi2n/jMpaCSWZstEQ0dIh3sij/eolKxWRmTvixbMTSpUyhYKIk4AfeHR6PcrNJlGcISkqJxdtarU6tXKDollCEmSiKGB1pU6ShbhRwtr6Gt3OAF3XCIIAQRB49uwpk8mUer2E47lESUSnN0ASFVTd4uXLl4iyRn8wwTQL7B3sU56WEAWBbmdEr92nUqgipDqaXGIWROxuHfDy2SFSInB1esH3f/0H/Pl/GXOwv08axVQKZXq9SwpFmR/88F0UJePjnz8jjiP6/R7/9Hd+A9PMkEQXZ55PoOu1Gs5sCmlGoWQxHA6o1sps7q5Sqcmcnj9G03Q+/vgvadSbbG7t4Loe4+kM1/MxSmVu7+/x+OsXOJOQ7sURSZJRLJps7WyjGBnPnz/9pWr9L09JnTr8k+98mztv3UZUMmRVomDKixATGVmW2VhbRbMHTPpd7EwiSEBCRoozZEFBFnXSLEQQE0xRgzhDVBTiXFuUu4bGPlIYMR92EdMYgYwwS0hFAUOX2bv5kK9+dkyt+A7j9CsUo8bcizi5esFonuFEMScvX1ItlSiaJnu7+/TFCU/ax0SJy2weUilXkDIJOQ0xVZ1R5JEJBtubu7w8eQppTKVZIQgUGnGJomVRsixmswlpFJFlKY1yCd8LmY1CVFnDcQIUtYFuFZlMxxy+vEISMzw3oF6u0iiW6PXbyEmAJAropSql6ireLObm9l1+/vMvCWY2P/nzP8SdX/Db/+AfUmyuk0UgpCJCJIIgkqY5TSlZagjShVJ3WcSvT9fpm/1hse9/zVpaKpKXDqrZYq+/7CSvAec3GpAggCCxVBqLC+uLZWPI10kLXyrhdXN4bXKxmCOy/PT+JqNoiTWkWe7Vj5AhLqwwcmvvjCRO0FVpATrnBniSJOavlQEL0VuWxq/DfIgR0gRJEECQuXNzn5/1v2BrTSWdzVFTUESFTIhRNZF+1yPJNE7P+rh2hFSyka0CO7fWGbkaV/1LgmiOrpepllOu2hO++PwRWzubDEcpUTxkNOtTrW+g6CFxMGV/Yx9NcRAyDykVGQ0dHDfAqqi5TYNmMh7MKZebCLGEhIqQybh2QEkzOb7o0VjfRtJNigUd37ZZadSZjM756FvfRlWhXquQJSmmoecGh8IW5+fnDHsjSqUSslLDT0NSwaRcKRHGIlGQoSQqlXKRwPXxbZ+h4+F6IUkKkiozdgLcKMULbSxDx1BVpoMuUeBTqVQpVS1OTkdMJn3qjSaBP2N6ecjnvs3B3g6+PQPRprViEscTRs6UsqFQsxI03eLocMzYmdHtTtnfXWd7+ya1Upm579Af2Lh+yN1b9xgMRsRBzOHLC3a3dtBUg4vzczpXl+wf7PD01SGtlVU21luUfu07hGFMpaRjaBKkCa7rUrA2MAsm9brKl188plCocH7RJkkyFF3CLChMpxOmY5fxcMSk57KxukHRajEeekTuGM/2ERK4d/dtwjikUDK5bF/StCyefv4ZgiDw1q1bvPf2HqnokCYu77+7z51bmzjziE9+9gXlisLJ2QmmtcuLRxecHnXZWlslcn1IM9bWa1ilKuV6AaOg8vk3P+Po5IibN24w92ySMaxubjF3fPYPbiMKCs+PTinWY6JM5M6Dt5k5X9DvD3jvwX3mzpDH33zN6dnRr7Yp/KN/9A84PnnO1OlxcHOPer1MsdBAk1WELKd0tta3SGdjRpMZslUkDG0ESSENE1RLI80yZFlBEOUccE6S/CQgywRpSiaJCKlMFEZMJ+MFNz9fE2RAlKQoaoF33vk+l8efokrrVItbvHr5jK3N+3R7XVbXb9DvjTg5P+edtx6imTX8qwmr69u8Or4kSWU8LyULI5rlGuWyxdHJMYJu4toO1XIFEXj64imeY3Nz/wbjQT5q+4Gbb7MklUZjBc8NieOMq4sOAiqyZLC/e4fhuMfz51+jaRKCIDEaTSmYRQI/Z2gUS2UEucBoOMcNIv79v/1drtp9yoUCQTjlxZPnXPX/B/6r//afs7G+S5KmCJJMmlxX1utCvpwSluH1LAvtG5fr4YDsdWNYzgFCHniyvPlNAPr/k3NpxuuCLwqQiSBkWb5Cyl/wekW0TFxbrpeWjWsJmb/2UMqL+nJKiKI4V1ovGElJkjOdNC03DlyuznIwOSaXrAnXoTzpNSC+CN4RpRykTlK2t7c5flFGE+YkQgxpQpKk9AYjGGmIchHHiYhCgUq5AGLI/kGL0eySUkmhYEr0j69Q1BIgE8cZ85mPLBWRxJBEjJCkgF6vh5A4/MYP7yMJGaQRrjsjnCSEgYxumOiGRhzHGJqJIhp0uxMq1VXiOKZQNLHMGqMrG1HW8KOMYsGk3x/RqEh0O+esr67juQ61cp3hcEyvP2B9bY1pv4/rekhSbv8eBRlHx68YjB12928wnXnYwxnVSpPpaMZsNGB9ZRWxoCGICmGUW097vpdPrIhAQpqk+EHItHeGVarSqlvIcsD6WoXbN1fRVJPj4xc4bkwSB5ydn2BaBrWSQcqc47NLumeHVC0dRQRZVomSPOpzZbXF3sENhChGEBRWmlXCbodiqcFkOuPF4UtWmiscHh1xddlmf2cbTdVYX98mDH1EQWF1ZR3X9bi8uMLzAgb9CQf7N9ne2uX07Ixnz17ywUfv85/+0x8iSzqN+iaFQiXPJUknTKcu+we3mY5tZEFhOraR4i47q3uEXohZqbPaqJORUiyVGI1HeI5HpVTh137463zz6Os836BeRxAzfv7JI8yCzNb2Ogeb63zx+deYSso3X3zFb/7Ghzz64itGfZ+11TUkMSUSYp6+eMLJ+QtkDZqrde7cv81oOqdQXmFih7zzwbdJkoRYyFjdWkU1ZE5OTnny4gWbt/aRVZmzq3MEWWA8mfD7v/9HrKxVmc8Cel33l6r1v3RT+J1//Nu4XkB30OXk/JynLx4RRxF72zvc3NunqBts7t9iPhggnV+BYhLjIakaYhIiKzJB4JEKEoIgLjIRpNyGIE6RZDFfl4gKSAJzx8MPQjLNICU/VsqSipyqOEFErbYKscNsKFIp7nBzf5U/+4s/5ssvn3N+1idLFL568orxPEBRNMIYStUVRqMZnWBKxSxiaSmmopFlGb5jE6ciqiVydXlJTO7Vc9nusNJcIwgTwigljFPs8ZgolrBtDyGTSTIZ34sIAodvvvyCKPGoVctEUYhplslShZOzHgg5U6FYqlGpreCFAlftCS9PXqJqJnt7+7w4fMJ8ajN2rxj0J+zt5u8vjtL8lJ6LGHJDQFhYEfOahZQfuV//4t6AEa6DcK4/f/0w8c2msMQm/trin+viLi4njeUO/837F/9eX30NMi9fUnzj6y/vSxZWG6q8zKKGLEsXYUD8FS+mJcidIBAsAAWRLFtQUwWub0sQQNZRxJBBZ87+1iZu9wU7d29xcdRFkEUmUw9JLKAoMnPbodmqMbOHPHxwA0MN6V2dMRm1GQ99Ws0VXDclRST2YsqlOrXaCl6QRx7KcsZqrcLJyyd4vstsLqOqNpIEBaPA0dEVJUEgncckcYKfBthzl063x9wO0XUD2wmoN6sELpwf9Vi78TbV+gahlwPou9s3KJZTpvMul5cdysUispJnOSSpQODHuF7I+uo6UZiiGw7vHtyjWmvx+MkhohShG2DLIds7TXRF4vhVG0k2MQydcsVAVFQGwyGh7+P7LjKQpQlp6ONMBwxVsOdTHFdme/MeP/jhb7G71eL0/IRCpcL2zib9zjEVK2PYfcVs4KJZVbrjATvr6wgyrO+U2NQVas0aYRiReAHD+Yg4ydBLRRr1Fqoq0e/3qdWqmJZBFqc0mnVKBQtZFhmejTDMEqOxQ783QFIsbmzeJI5Tfv8//RHVaoXhaExv2OH0rM3qyhqBn/Kf//c/Y2d7l53dA5I44/PPv+Dpo5eoikKztkaj1kSXFSzDZH2lxWwyprzexA8Cjo+PcFyHXq9Pa2WVgxsHfPj+d5BVha++/prz81OKeg1/PkFOBWJ3zls3timrOQVZkwQ2VlbYWjPZ272FkIn0uxGKITKc9bl17zaNzRXcNOKi0ydDYGdni95wTpJErKyoyFnC4ckhkLG60eSqfcm//tf/gXqtSaVQB0FgbX0T0xQRhIwgePxL1fpfuim021esrLTY3t5ka2eDOE2xZ3NOjk/4+OOPiYOIoiAyvujw/LSNnwmkggxZhmaapAIkWYpInocQZVHOLhEFBEkizgTiOEKSFTIBKpUK89mcUHJRrAKqloeNZGlMnAT4nk0QRIRBjCSIjEZDFEXl0dPHKLJEoVQh8CKOz/q0Wi28MMF2QiyzQhjOueqOmI99aoUyoiBRMHU2d9aY+ROq9RLt/hXTcEq3NyJLFeS1dQaDCYViEcMo4LohoqijKSbj8YByqYaq6shyynQWIJCxutKiUd9kMHQZjjzW1jfp9nsgFJHVgNW1dWRFJUxcFEXHcXpsb67y7NkL1tc2qFXqJHG+CVI0lSha7NiXZnEsLcRfc45en8TfKNLL639lAlie9peUT4HXoW3ZX3nu9RPeeN1lY4Ds9TQgLKeJnGQqLsBt4Y2XyITX7qxc6y3IT/dCrjyGhVgvJWe4AVH8Rmzo4uOS1ZSly9YjkC6DgjIByH2YwiTBkKDVqDE4ntOsFEhDB1WOcAMXq1zk4mKKrkW43oSbd+8gSQVkyWY86FEuiMSehK5WaFaqzOSQ2Txic32V08s+X37+GRPbwTAtvvOdb5ORUtBDjl6esL9zl41WA9/P2TGarvDi6CU3bx0Q+AGyEDGfO6RpnK/Dsoybt24ytz30osnMdTk8PmWPEpvbt1CyCbo6I42n6LqB53h4foqqWXS6fQqmSaEoEsUZcSLS7vTRDIPxZMTa+iblUpHPv3iKmAlsbmzh2y5pCrdvHzCZBlx2+vhBwMwNKBYLOI6DoalMJhOyOKFeaSKrAoE7pN2/pFbfoWTFvHj8MYKoUyyWGfS7lMsaa+sNIq/P9t4m6VqB84KEkKxTKRaJIh8nsJm6Nnv1GmcvXzFu99lorSHLGk8OX+GHMVZRYXt7i4JZYGt7nSSMsN157mwbBKi6yciZ8uTJEY1Gi//y5z/lRz8uIcsyrpeytV1nOg9oNTewrAqbm3uUinVOTv4jF5c9DKPG82dHHB6+olq22N7cRBU1LK3I1fkFRCmWqpAlIcN+j3anTSpJHL44JIpTut0R85lPuVIhTmJ67RGmVqZoWvTaHZ49esb6eokbBzvUqiZxpC8cY2U2ttc4PHpEtdLg82++RNcNSmqRSqtMoV7k+OIULw1458F7CILAzz/7jB/+8AecX10hCBn1eo2NzTXK9YCff/wMyyrT7w/ot0dsb+0higm9/oA7dw44ONj9pWr9L90URsMOjUYVSVKQJBlBFKlVa9SqVaRMIPQ8zg7P+Lf/y79Fjj1iz0eQcsWpIitkSYyoyIiyQipIhGkISV5VRFHOVwSigmqYBIHHfDanUa8RiSJT38e1ZxhKQuT1cf0+ojjH9q+YTW329+/R7vZoNjUK5zJpJjCzHTS1SJaktLtjDKuEH2SYlspb999hPhoz7vQBGUXV6XQ7pISoBYXIDmi1VhEEiW53wGA4YT5zMHWFUrHGZDJH1w1cJ6S1scFw6DAazWg2Vch8BCJ8N0JpNWk06yhqha2tW2i6ybPDZxwfHzOejTl89YRKtYxjjyCDGzfvcXR0QbmocrC7TbNSwbNtrFIZL0iu2UeZkAA5e0sQlz1iqW5+nbK2rJ7XDCOBhaJ8QfFcOKouC+3yVL8s+n81NCe3Lv/F9RHiclpY1umMdNlgBOG1FmH5+ovxQpCE6zVRlqWIkrgQ0GUkaYSAkFtSKHmXSpI0d0BduLIu3VzTVCLP1ZByYV+SkQm5H1KaLgLkxQyJAEVJEZKA3a0Wl0+PESULTRMRlQyzpHO70mQ8HrFq1DBMF8sARRKJQ4lue4KuKCgSpLGHmMWMBm2KhRqNms5FZ8KDe/topknv6hjHsQkDm5VWhVqthCDklu/jyQirWGBjQ8a2PUgzoiSm1VylaNWwCjU+/fwJaabiBimSXEJSZQajMSurIYqi488SksAHwWFi9wijGF0z2dhsMZ316HaHVMtlVM3i9LRDkmSMej3K9Sqfffo1YZRRq9aIY4Gz00sa1TpJBn7q4fk+5VKBwXiKJGT0uj0ajQZxEEAUI8gZjXIJQYoIopS1VgXDLOKMzpilI6bTgFu330LXVaajS27u32MUx6TJjFpLQ5SbuLMxYhbx/OvnTGcOQZpSblQxLZO4WKRSqfKXP/2Y+soanXaXRlpmfX2NV8dHiJJAJovUmg3avR6bG1tUEoFifYuf/OTn/Oc/+nMUyeDrb54zn88pFCzOLq5yG5QoYX//JsVSjd/9t/+JDz/8Fn/2pz/h937vD5BEFdf1sQyLi7MhpYLBsDNifaXF3s4+mqpx59Zdnj19RJJErK2vc/vWHRTF4PKqy6PHTzl8ccRkOmVvf5dMEFjb2KU/bpMkPeI0ISVhMh1RKtX56tHj3LVAV0gIQIpZWW+wurrK5s4OT18+xwkd6q0qK+sNREQUWefW3Vsoqs7Xj55TLhVpdx1sN0GUVD799BmeOyeNRVRJ5fz8AlEUeOedt1jfaPLee+/8apvC+soKuqaSZBCFMaIsXatcszRB13UuLtqMJ3OKckbiB0iCiCBmpAvVqSzLpAgEYUwQxmSKgCzJCEkudoqTBD+IQBQ4fvUKz7Yxa1WqlgFSRhyMyJIIRfJ59Pjn3LrVYjR5xfllQLlcYzju8da9TTStxOnZkMuLEUIqoik685lLtd5ipbXG22+/w/PHT0jckGGnhyxrmKaJrOQsJNf2KaQpxWKZ2dRld3sfRZLo93v0emOmUxtNCwmDmJdHR/kfUqVKRkqv36HZKhNnIsenR4zGNsXiGo3mBj/7+Gc0Ww38cIbge1iWRqlUxnVDup0+X385xvcFypUWq40yh08ec97u8hu/9beRDY0kFfOil6QLf6TXR/A8eWxRuLNlHM9yZ/86qeDapVQQEBeGeK8L+uv7YEkRfQNneD0avKaYZm8wjBaPW66G8tteA9Gv3wTEsY+mSqRZ3qziOFe8u16AaRr5uihaPh9YNARRXK6wstzeGwlQyNIciM7fYkqS+IhiTBT7IPiYakzgTJDiDqE/olo2ECORNIswdIlQcGmulkCKkMWIJHEZDBxW6jUUSWA+nXDzxk3ixEGSoVKtUqtXOL/qIcrFPGcgSzi4uc6LwyPsScjGSpl33z0gDmfIkkYSRZiGyWQ0QtcM/CCiUitj6ibO3EOSVEbDKVkKvh8zHtsoah5hWW9U6XYvUYkpqBEbKyb9wRW+lxJEGbIsUihWaTQDvvryCfbcJU0kAi+mXm1BFjAZBXiejWZYKLKFIipEUUS/N0URBDRNRVI0wjCiYJlUqw3iBLqdDo1anWGc/7SjwMe255TLAo2iQbNZo1LZZDJN+Wb6gt7FEzb298gCicjtUK8IhEGC4w5QVR+tIXH44gVp5hIFAWEcoYgCV5fnNItVxqMBt27us7a9ixt6BInDyatj+oMRcRzhuR5BmFKr1hlN50ynDmES4Pgpg6HLwf4ane4E25nTXFnH920K5QIrzRa12gqfffYNYeTRbnd4dXxBpVzBdXy2tjYJ/ZDxeMp0Mmat1UJSVDwvJC0LjMdTJpMpBwc3CdMU27b56suPuXP3Pkmc8fLwGE3PtyOff/4ZXhwzmU0w9JjWygqFYglNt+gNxtRbq6SCQKPVxHF04iji+OSIIIx4dXbM+9/5gMGkz2g2RjclWs0aL09esLWzQ/vykvsPHhCFCX/8xz/l4qLL/sEupZKJ79ncv/8WX372DbPpGE1TefpUZDTuUy6bv9qmYFqlvBSkCdICVkzinCmiSBKRH/Czn/6MJIqRFRlNVcniBNMqEAsynusSxFHubior6Lqeg4mSQpLmMYJ5QItApgjEQUSv3eVmtUISx0hCSuDPOXv1mCTscLC/Qbd3Qpx42HaCPZtSMBWSVEDXZbbWmyiChphp+H7IZOZRKZWp16s8evRNLogjobXSoN+fkmQCRqFIJsWsrK4xnU4xTJNyscSg20eSZKbTXHBUKlbp9wcoikav10XTDAbDLqIkUCjqBElAmoVIMownPZJUZDafEfgTuu05haJIsaxRtEzm0z6RbxP7LmImQSrQ75zzx3/4+6xs3WR77wBDzUiyOJ+sFkV4GVifLJS9SZKQpsniY3rN+b8mjAoikiQjijmmIyCQCMmyhC9WPbm9hrhIRcspoML1Omi5tsqyjDjLENIMWZIWNFUgy6cUQVwI6rI3qK3LTxZg8GQyRBRldMPg4uIM07DQdB0QkJe5EWnOcBJFMWchCflKKU2SRfODLBPJBAFJBEUDgRABD0n1UOQQ358SRiPOXnxDvVpm7nSw7TYVU0JJDDzf5datPeyshxNN2dkp40xnDLsd1lurSOSTULlSQjc0FE0jjn2mszHVSoPV1UpONmh32NnfxyqoCHsrvHd/H00VmM17rG1WGYzOECWRNM7wnJBqrY5tD4kCaKy3mE/P8Nx4YQsvcnkxIM0E5tMxhlpkNu6jmylrq7cpmwKDziECKgIGEBFGcPjylCzzuXFzm8l4imuHaKq22LXP2NrZRtckXhyeUG820fQCshDjOy7T2ZxiIUMXBMIgwCpV8DwHRdG5d/ceV+cX+aoLCICdzRU0NWOjucJKo4koyqjFjNu7dRRdJRMGrDdX0IUxxB6DziGykjCbDgmDgLWVJpkrEUx7SJLAuN+je3WBWAvYWd+iVm8xnPZJRRHHcwmCmFZzDUWROD+/oFypoqgGQQRPnh6xsb3P2fklK6ubnJ13ieMUXc9N/2qNJrPZCC8Iefz0OS8OXzIaDRmO52iGQpi4aKbKYNJHFVV0SyUKIs4u2mxubfLnf/GXREHIB+/doVK2qFYbDMZDTk7P2draot/t8+LZcyaTMY1Wha+/+RxVk/nym58zGrf5/nfuUCqUCNyYUqHKkydnzOxjNra3SAmZzyeUCiVu37nNF58/4rIzwioZCFqGVTIQxRBR8Gg1CvS6Zzz+5gUHB3c5PuozHY1p1asULYOHD29xflpmOp3SaDRRpDmaWmA0HDEY9rh7b+9X3BSMQl6FRJH53GY8mdJq1FEtDVGAwHdRRAFLURGiACnNTcsUQbjOENZklWSBLSiSSBBGeZrXYv2g6RphFOeRm1HGxfE5BwcHqLKILEikskrJMvmzn3/GaksjiGI8R0JTJBQBxr0Bpmkwd1NMtUqrWmR1ZZfLqx7r6xpfP3qKIovY0xm7m5sc7O9w/uqUMAwRFZXReEqYuogKZEJKEsd4jkutXEeWDeJYwHEdZrZNoVhiNpsTxgGSIqHqGqoqE8YullZA1zUajRppIhL6ArOZx852g1q9QnfS5+TshMlIpFKoIGUyumwShSmKrCNqIpFnc3b0HE1TkIQISTEQpNxNlChbTFcgSNI1vz9JlgE2ry+CICBmEsssZlGQgFwElk8QCywgfW1J/VorsAR2FxjEshstpM1C+gb1VHidnJab0Qm5ad7yjbwhKoOMUrnI5eU505nEz372M+7fv0+lUmB1dQPDVEgS8hjNN+y+87eZsZxTliuuKIyBEE1OSeMJYjbB9zrM/B6u00PI5sjBOc++HGJYCpYWM54HWDIUCiZJ6KCoMZWCwXQ8IfYjinoZGY1SoYQgZuimjrIw7Es8CMMpl5cXCIJEpVplZ3MVTUiYDToE8xmmHCPKOqYmIqQxQpIwHA5ZWdlia30Lx0tIEoGV5gqXFx2cuY+hl8gyjXptHXBxPY9m2eS8MyGWYOJ5/PxnczZXqxgK2PYUs2AhCjKT6QxJEVhfb+E6U5IUMkHixYtjKuUmG5s7XF122NrZYXV1hdncJfQiDN1AVVVau7voqkxGiqYFqLpFhoQk6xy9PGTYG6BIEkIKIHJ+esEPvvchJb2Ipeg4tos3m1MrQqf/irc/us/UfoUQRaiaiC7m+ez2yKXTHtFRHKJ5iKVorDVWCMKQ/d1tGsUyrWYNzVBJZIur3pDtrR1eHR/jOC6eZ9NoNllZXWPQnzIez/nLn39F5eUZg/EYIdMIYwFVMYiShKPjc/7P/5d/wfn5MV9+8RWrK6v86Ne+j6aprK+v83u/95/wvYhut08Sw3TsICAxtW2KpsHX3zxirdXEdmf86Z/9BF1T0A2L5koVwzCpVCo8e/opa+urHJ8dkaYhQpphmQa7N1botDPevnsbU1JQMpWKWuPhrYeMphOev3rBRfsMWRGpVRwKhkmrVeO99+9TqRW56l/w5PgF3/3BeyRhwPHRJb3uEMu0SGMXXXV5++0D/tbf/jGzuc/x6YjHj56yublPsVCiXKzw6Jsn1FsW//if/DabW61fqtb/0k2h0xswmzscn57zs59/TKNR55/+k/+acsEgTWIq5TLf/863aT9/QWZHZGEEaQpJHkqfximxJJAJoKgqqqoueOYKaZISJwm6rrEMjZ+NZ7x4+pwPP3qfarNBmkYEjs90NGV3ew9Ti7BKK1zKVyS+y2h4yWQwI7R8dnYOGE9s1prbSGJKo1ZhNJ6z2qwz6HUoFiy6nQvWGi329re5bI/IEpEwTpFVDT9yUBSRySiP5Qw8j+1be6h6gUKhyGDQ5/LyCkWRiWMR33dI0hBJtjAsC9uzKZSrnF2cUi03EFKV+WzANImZzXREQ6XRrOPNfJJIIAkFREGlYOiEcY7BBHGGSMKd2zcwDJ1EEkgCkEWBVEwRkrxAJml8bYORe8Sl195CeTFfQLGZSLwIoxHIbRmWzUQQhMXpfllo89/58j4WRX9JJc0bxDJZbalmfk1Jld8AKRZoxrVX0rJDGIbG7ds3ieOYk+MGbz+4jWFai+kTZCnPZEiTXLuwfA/C9fe1yHpOIgw1QxZjQn9E4ncYjV8SepcMei8wzQwxcygoGULcQcwqDIZTEieipGSkWYTr2ChSzHzoMhk5iKlI7CcYUoJaNzm/OmZts8F4PqZQqKKoFl6QYJkWnasrZpM5YRSiqmpu/y1C0VTI9DwJLgx8arUanucRBTFCpiAgomsKT549o16p47kBmioShRnlYo35PMV1YgxNZW1F5qI/IYuD3M1zpcBkYjOZTPHbA2RVxCgohG2Ht+7dygV1qsbVxQWqqqGqBt12D8syaTaaZJm0mELm9PszipZFq1mj1WriuS6likynO0RRZDpXbRRRptVoMR4MEQQBwzTIFJkslSgYJVRRY+ROiLw5TjBjtWXhzE+ZTDqcSWNUWUGSNHqXfWqlVZ73r7g8b7NSKdOqlSCKGQ27lMsmeqNGFLl4oYcTRZyeHmPoJUbDMfV6g4xcY+O6LqPRmFdHF5RLRf7hP/iHPH7ygj/4T3+KayfUm1V+9OMfkGYhT5485csvPyOOUi4vOziujWlpnJ6fEMU+tUaN3b1tXr48xrYD/MAHISWIIjbW1oiSlEKxgqZIXJxd8q/+t9/lt/7WryEIMpeX3dzNtXPFd77zEY1mCVUVaDSLmAWZONhitV4m9gIcN6BgNfjwwR7n3SuKxQKZKdDuXfHs8XMsQ6VaLlA0TaIgYNDt06o3aZ9fISsipqbwwTvvECci4/GURkPlo4++je9P+eqLT/n8iz697hziLnt7O3iuS6GoYBgC9YbF48efwe/8CpvC/+3//v8gSxPC0OP73/s2v/Vbf4N6pYQi5MJkSRT48Dvv8ZM//RNefN0ny0CXcxqqKOsoskyc5jaySZyQihlZnKEqEvFCzZqlCcvdiCSLtForvHj2EufrL6nVTTTVxndHWLpIpVhkd/cOd/ffwpn3+OSTP0UQHMbjMZ99/ohqfQvH61CwisiSgq4ISEmALsQI0ZwoiSmVtxEXamZigZ3dfV6eHlKprbGx1mI6GuDYc0RJIiKlUimjqRrtwEdXZRzbRlcVPD8hCiNGozFWWCBJPQLHQTcUPLtLpdxEUjQsy2IyHeINk1xd3VzB81wmdpfY81GlCrpsICp5c7RDH7NgIOk5T12QpbyRiiKZnEKaIJMnkC1VzhkL1fFCvAV5Lu4yf+FaF0CyYAstlvR/JWZzgUb/Al6wTF/LXyPN8YiFi9GSlpoJ+aoKIUEgXhRwCUkQF8BwiiCmiMQE/gxLy5CSDkp8RUFo4sYhmmKSCAJRCLIkEcUuiDn+BAJRFCDLKbbd4+zshGqlwmqjij254qvP/3cqJRdNsUnjPodPO+i6SqtVJyHk8uqCarGOM/eorSiEiU+UxUxHLkIWIyQwm84wNRNRkuj0+oRRTBgkJFE+pQRRwtbOHpfnl1iFRj4xBhJJKpEksLO9SRwH9PpjEjFgY6tB5KcUS02iUMBzY16dnDKYTSiYRWrlBqKo8fzJKyqVVYZjl/HEwwtCXMcjCEP8+ZzV9TUEYNjrUrBM5s6MOE0RfInV1h6eN+Xs1RlJHDLotykUNFTRwHd9PNdnZWWFoxcntFbWKVsihlpkOOwTJhFuHPHi+IxRb8buzh4r9W1eHh7hTT1u7O1xdnrCg1u3kSTY3tnGc20UMjTNIs1E5o5LZ9BHMlLUNMUZTVlZK2MYEpZhQapSK1eZTWxqxQrSukHkeaysrdEbtqmXSsgK9AddrN1dDNXAksu0ajPOzq/4wQ9/jW6vg2Wtc/TqOScnHTS1TNEyiasCk+6I0xfHJFGKZmpopkG710NXZbptj+PDIWkmgRDjR0+QlJh7d+/x0be/x6effMWXX/0MTZPZ2GzQvxohZjKKpOI4DlvrGyiSROfyElXJ8yj++E9/yt/5e3+HTz/7gvncxtB0misNqiWDgi5hyCL2aECaOMSmgm97NGqbjMchj559wsuzI9Z3VtAUncgLeevOLqqUYukaKhnTeUDFaHD5qsu9+5v43hxJNRiOp8zmLnN7zs0bOyhSRCoF3D1Y4eLlmI/+zrdorR0gyjp/8md/Rq1V5Lf+5ncpFw0ef/XNL1Xrf+mmIAgS9966y1v3bvHtj96hWi2yYArmYyUCsqKyf/sGz558RSZLZJJEGEWIqYAkKCSCgKbpOK6Tnzg0ndAP8n2xnJ9cFVUlETKC0OODD95la2cTUZMYT674/NM/ZDa+xJkdMxm1qdUOWV1ZoVCUsIoWtbjFdB4wGF7R7Z+g6WVqtTIbqxucnV4x7I9I0xjLVNna3mI4HNNqbiJLCrIosLO9S2c84KrdJfICbuztkMYpvVGfdVHg6eMnOHOHcqmICBQtC9f30HUd0zQplEqkKQSBw2B4iWUVEZAYDucUrBKd7hBJEikaRfwgIwxSXC9AVmUUQUVRMjw/oFZdI0pdpDTh9Pyc2/Yc2SwstVgL1lFuZJ2LudJrjcJft6dYtIrl4V14vRISxUUgzTX3f4k4v8EwevPm/C4EYZnRkF43jCW4nC1GgiW2sEAHiIIYTZORpJSMmDj2efzNz1CEKa1awKMvf4+V5jqGWaa2ssJ4OqVcrDB1beb2CNd3ME0rN4EVM5LYwXH6jAZDzo5j2rUam6tVdNWhffWc9RWTQb+L54QIaMiagWb6DE87tM/G3N2/iaYpxImPVTDIohRvPiFLU3Z3d5jNbMr1au5lHzhopkaxVCBNRbq9HqVimeksyE/4isVg0CbLfPb29rEdIRfMCRJ37t/D8UakWUIUg1UoE/o2mqaxvrFOFCRYVoHIE7GsmJOTC1w/RddLOG5AlgkEQUjRKhI4HtVqBce1OTk5QdM1gkggiyMuz3q4jsPW2gq37t5GEUVs28dabTAaRAhojMZTfD8mTkQMo0ChkHsgxUHIVbuHoZmIssHl5YDzqEetUkVMJIgzfuNHP0ZTRF4evUCSod4oE3kemZhx1W2zurGOl3noJYFKXcEo1zALCnEYMhpP8JwYRSpSq1ZRJJPZxGXYH/DVo2e88+5NNreajKd9njx9imbo3Lx5h+Fgyq2bN7jqzzg5OeXrb77gxo0dDvZ3c9wsVdhaz/jd/+0/Mxz0OT29IgxTzKLGdObQ6fTwnDn2ZELohSAYPHjnAZI2R1FjwsCn1xtwcdGj3x+zuV0nzQIEMUGUBPzAQ5UVgjAkk3LXhtyhWKTTHXBydkWCyHm7TeD7DEY9fuvXf0CjUue//Pn/gWkq3Lu7T6nYwFBjglDk6LjN4fE5/emEQr2AKYQ40ymN7QqtWpnT4zNuH9yhWlnBc0+JgwFiauHOZvRHl4iaQbTADI9enqMhoStgSib/7Hd+G9WoEWY6n3z5lCDy+O/+u/8r9YrBxz/7C8h+uXL/SzeFf/xf/23u3btFq9XE1CUE4XW4iiQKRFGCaejcODhYFHc990pJE2Qxy8VrfnjNr48CH0XKQ3cQQJYlBFkiS2LiNME0S4iyhGkZeFFIq9Hi/v23OTtOKBibvHj2BYevjnlx9ATHnlEpF2nUG5ydjQhCEVlRGIzGzG2bTntIs75OrbGO59okccB45HF2coS7I+HYPoKksrW1ycAeE4Y+zWaLUrFMmqZM5nNmszmu42CaBtVqlflkSq1WYzgaMp5NWV1doVSp5MyF6QDTtBCEnEZWsMrEkUC5JFEuWUxHIyQi5tMZqqEjpCmSojEZTbGKLZBkdvb3aQ96pGkCiyB6SRJBTBcRnElO7RQX+oM0R3DFZTznNRX0r+QmLIv89Wrndf/Ib3vtbipe6xmWCof8Qa9xBK7ZSyKLUQSB9E2biwVeYZoKSRIi4GPoIkmYsbVeZj7t0b16iSalTCZdev0Ux18ljGNmA4Eo8ggCh0yE6TCm0+5hmgZrKxUEMcSZXOLYGZGxwZeffsatG2t0LjxGw5QoUFBkE0nMbYsL5QqGYfDWrRscbO5hygqD0R0UVSANQvrDPkImEMQxrbVVprbN1s4utjNFlEXGkxmZ4FOrNplO5pApBEGAoVkkiU6322c+e8X6RpPN7QZ+4NBp96k1DKIkJvBzq/WHD96nVK3y/OQIRcp/woqiYNszClaB8aSHopioskqaaYQBxHGMIstEoY8kiBQLNVTVRHB8FE3h8nyAbc9o1IvcvLWHphZ4dnFM6I15cP8DZo6PHCtkgs90PmNuu9yq3qZSriKJEsNhn+JGhZt37vLV50/QVZH+YES9UmFm2yRpAoJEuVJCVgRsd0oUBMzOZiiSzrxzharr3Li1w9zt4ntTCoUis5nN559/TZaI3LvzLqsbDW7ebvH7v/efefzsMe++ew9Zhd6wA0LMrTs3MAyDIAqxyhYZ0OlesLW1y8P7bzMc9RBQ+elP/pT79x+iSCbvvH+LWn2VjZ11ul8egiihqCKz2YDAm1ApF9nfbnJ23mM+vWL3oMVw3GN1dZ16tcYPf/ARL56bfPDhfWTJ4F/+z/+OOIuRVImpM+ei00YWhPzwq2h40YQ4SPnjP/kLFFXFccN8quxP+Xf/8Q9YbVUZDbqogsjq6jZeoOA5KaQq//H3/pz2wObOg12iKGOlUebm3rsUiwqzyYh6vUGaSKSiQrlU5/vf28Cej6kWt0mTIp1hn3Z/QJaJbHzwLqq0wvHhK2qVGhcXXaJ0QCqZfPLxp8xdj9OzCz79+JLHX3/J7/xX/+hX2xR+6ze/hyjlAS6ikCGS5ZTTjFyJnGVkacJ8PsYwDPzZdAESiteKVLIUx7ERxWWITEqaxLk4S5UX7JWcQ+8H7rVVsqrIpFlMsVAnTTS++vIJoqjy7offYjadYGgmpAKfffoFO7v3mU6ndHs9avUSpllitbXNbBownTgUS6sULI3jl6/Y3TqATMeyygRRyCcff8zunRv86Z/8MUomMOkPSOIY3bQgE5FkEdd1ODl1qZXKnJycIogCt2/fptvv4nguhqnTWqlimDLlcoWXh2fs727i2BHVao3V1RZ/8Sd/hD1zyRQIvBDLKjAbDilVG4iKRb21yuHLI1Y31viN3/h1KuUykSARp1l+JJcFkkXEZc5Cei1Xy621lw2BBatokW9wvT5a4gdLyunSDW9BUb2ms77ha/SaYPQGsLwM3/nFqM58ksmbhIiIKAuIxIhShB+MOO9coCsgZQOEtEOpMMeZ9rBDIJPpX7VRNYPQi5jPZ8RJQL3ZwHMCNDHBGU84mV1Qr1mU9JTQmRM4ULJEulevWGu1EABLb/Hs6SmeH7HfqGEaFu+8/TarjRXkGAxNotnUmM9HdDoXJKQYRgHdspg5DrPZjHqjhhdGIKk0VtZwbJ8gCJlOZ7lNt6AwGMxw3ARFKaFqFQ5fXjKZj9CNlP2bKyiKgOd7VKsFJFEiCG16/Q723GFrc5f5zGEydgjCiOnYRdM0JpMprp9SLpUolxQ8b0yaBkwmNrquAxmBD9OxD5mNVbAwdJW37j1AFBUsq0gSCzjOnKOjQ+JMYDSbEEcJZAKyqnB6cpo7zcawtbHLbOby8uUxtXoNz7GpVEtomoapFel2e5SKBqZl5Y6zkzyI/uqyiz3z2N09QFIEsuevGM873Lm7yxefPyVNEwpWnXqtzty2uepc8fTZIY4/5+0P7rF/sI3nznE9G98PCUKf23fukmQJF5dXFEo14sRjNBxw9PKMH//oR3SuBkwnLqQRxYrCO+/d4unLKzb3VumO5owGNr4XIMsxuiLwD/7et7m5v8vF1YiPP/kCmYj3336HL756zGT4F6yuNMmSiOl0wGwWIOsy2ytrDHtj5hOXiTMnjZL8T0+WiTKRIIxh5pEJLpqhUavXmE5HnHUGTJwZSZhQUCx+7w9+ysefPuJ73/kuz54eU19Z58H7a2zsVFDMiJ3NJnE4RhGgVq1TLa0TxyYvX17hRxnd4RmWrhOFPt88OWb3ZpPxwOGjjz5gd/MukmDw8K19vvjiS372yQuuumNiIeWdDx6yub2BaVicn3WIAoVu51ecp6BKyYJCKhEnCYok56DwYnetyhLEKZZpEUYh6WJ/LCGQpBks0q8yAURJJA4jBElCFPOVkShJBGEIskRGShj6fPzxz9nf3wUBFEmlXGpiFVpIcg1NSyFTaTVXIBMZj6YkWRFFr7BTb/D9H3+XyXRIt+sQ+Rqu75OJBnM3YTKfouhlxhMPXxEJvIA4Szg9PSMkZW93j97lFWXLIowiIttm/+AAZz6hfdVBkRQ0zSCOUiazCZPxJKfNGjquN2Fndw1ZERgNx4hSyunZK1S5yGQy4enTR3iTKWmcImsKe/s3OT05plCs5jL/TODV6Smu79PutEmiXMWdZhlxmCKm0sIwLi/+aZLmVhdLTGHJ6+d1gc7XQddtIy/gorgo+q9BXOAXgGPxjUYhLK9fN4X8uvRG01g2EWUZiowAgpg7ciQhouhxdfYIUZjj+hMkcUYatRHSEZI4x5l5FAtNxr02hWINUTAZj2wK5VyEVautEvgenitg6jqXF1PSNGRzc5048PGcGbIEcZZRLFUJowxFldnb36RakYjJ8B2fwHeZTudMshSImIz7KKqEblr4foysqbx8dYxpFjg9uyCKAlZWm/hhQhAFyLJCpVZhOMgdKuezAFGSEQTw/Yj1jW28sMfO3jbVahnbOUdTMiCkXjcxdAHDVBFFBQGJFy8OUSUTQ9dJChJ+KCJKJpou4Hn5OkM3ZOq1Iv1+m3LFIJuFOHaCpquLv0kPXZUZj4eserl548ZmnXoto9nY4Osnh2QiJEKKoZnoqobnhEwmM5r1FqpikUQ+WtnA9wPCKGE8mtK6ecDx4QtuHGyjqCqNRh3bnyNKKkmWoGpFPN+lP7D5znc+4tXZM1w/5umjC6bTOTdvbqFUJXzPRyDXQty5u4llmTx99pxUSJg5UyxTJ4ljDKOCphmkGai6gVW0ePvhfWSxwOb6AZ7jU6s2+PCDdzF0hYIl4QYBparO7OmANPP47vfeY9C/ZG3F4p0HOxzsNImDGb59xUrD5Nbt+8hqgaOXZ7w6PqPfbbO3t8HW+hoftx+DKKJbJlE6IJMEQj8iimIUKc9fUFUVUzWIoogojClWTOI0JUpS9IJFIgNCzmCaezNETeCrZ8+ZzXwuB13mocfVMOI7378HWZgb9jkhGQrzmU+SpkznIZ98+pjheIgsZFiWjmIInJycYuom3XaPTnOAN89wZzFfffOEwTRgOvcp1YoMhiN293e5uurw4vCUcd/hrbvOr7Yp6LK4KDspmiyydM0XxaXtWR7+osgSQRCgyBJxnORmdguQURZFUvLpQBIhiaMFHpFBlrAMaVFkiUiA4XCYs2jE/PmSqPPw4bd58NbbfPX1pwhyxnAwpFgos7t/gxSLJ08/56LTp75iIqkpo/GYjz78be49sPj5x18SRRHVYgFvPmM+miw4+xJpEuM5Ho8fPUYQRZI4xfcC6vU6bhiwt7dPv3fFw4dv8+rlMePRlNnModFssrW5x2Dcp929QFEjur2UKHZJ4gwEibk9ZrVV5OzsAseeYykGiApWocp87oOgoGoquiFRrq8ynHtMZzNarSZff/0lP17fQDFMZE0hDhPiOLzOS85/6imk6Wu31Cwv70t66Zu5x68/vg60hwXTKL9jwfJZNJDsNY1UuMYLhOuG8GZ6miQIiELK0jspEyBLUyQJRCHh4uwZqjRn2H9B1RIIvD6q7DH3bYb9MapUJo0Mkkik1/FJiPFjFR0DSddBKeHFGYPxDF1PCTwPMQvZ3bKQVAlJAFNTEUWJue0iKyqykhInLvO5wGg6Zmd3nygKUBWR6WDMyvoK/dmIMIjw5j7FYgXfD3Dd/Pps5jAc9SkUihRLEVEcMp9PiWOYziY5Ky7wkUQTRVHIMpGbd24jazu4zgVX7TPW1jSKlobru4zGc14dn2O7Ap3OmPblADEV8CMPRbKwChbe0CGKUmYzjzRTCCOHYkml1mhRqeZZ4poO7XYXRTZQNFit17h544As8/niy89wvB5pFtJq7lOp6siKgCYolIslisUypmbyyc8+JwozXr06o1q2WV3dQMgksjhj1B/lFvdRwnvvvIvvzXj54gWBv4FuGcwnLp98+jnr63vM5gnd3gk3bz1Epsioe0nqi+zv77O9voWmy4xGA/r9AWEQ8pO/+Av29nZZWa1wdnGKoWioss7VqEOjuULnakiCxNvvfcD5xTm27TAaXDKfhEiSwocfPkCW6oTRlD/4g98jE8DNdGbTKaIQo0ohCh7feu897txc4/zkKb5rs7laQ5N19ra2ODxu06g3+ObxSyxDxPfmxEHK4fMTul2b4WBOmgikEXhByAfvfcCXn39FEEWopkEUxCRAvdFgMhmRCQaapiOpEMUxtw72cUZ9yBJu373FN18/Yjiw+eiDD/i7f/dv0O8fkaST/CCciSSZwtl5j1ev+jiOyunZnF5/iCBGlEsGsqbSWqkxmvX43g++w3A05s//4id02y6zcYBVLBPEKamoopkWJydXtNsdgiBAlXRA4S9//vmvtilI19cWe2oWHjvZ0q0zLyx3bt/iW9/6Nj//6V8uCkx+so3jEEWWkSQRIcvFU2kWE8cxkizjux56wcQLQwRVxrQsFEUhjCMMWUcURaI4RdNKJKnGBx/+Bp7vXQu3eoM2H3xrBUEW+OSTP+KTT75id2+dYqlGbzhG1eCDb32XNEn4y5/8Ga1qic3VVRI/ZtDtIosyU8dlMB9TrVYwdAOSBMexibMEMQPLsqhV64wHNnMcHDumYFXotAeouoqhW1RqGq43ZjKdsLm5xelJhyRSODp6gWEaGKbKxto2g4FNfzhFnHqIpIT+lEa1xnvvvs1w5pEmCZDx+WefUWyu8MFH30aWdBQp90yJsgQhyot2KoqkYnLdFLIFIJ1PBSlxlLwu+IvfoiCKKHKGIMj5CiFLFnqRHHBYwhJ5H/jroET+G8xIkhiRbHE4WCY+sEhBy4VyIgmqBEk45uLkKyRxhJdlhGE+MbhOhCRW6XQD4rrK5dWMYrFMZ9xDtRQyx6c3GCC1x4Rehud4mLGESIou6VxdTdjaquPYc2bTMb4XIIoKURwym894u3Ubez7CsW3aVx3efusOo3YXXdcZDWeUqhWmno8kwHAwZDbJc3e7nR4CIo36Cv3+EEmS0AwJP4zIUokwSrhqdxEFiyi0kUSNIPC5uOowml0Q+T3eeutHkM6JwghFUihYRRp1CdNS8EKLNIbA8SgXK8wmHpeXQzwvpFxZZTx1yMingTgOmYynTCZ93n7nHTSjyOXl72NaAiutCpVKCYQQz5uRZi6/+Td/yOXlKdXKGp4b4dgTxtMYVdH40fd/yNVVZzGRx5RKdUrlBoPBhLKVEoYREhKlQglN1phPp9izAZvraxiqgqaoBK7Hj37wA9rtIc1mE8uo8PFffsLuzirNaovvfOs+9XqBx0++xLR0BDFFFhQm/QmhF0MCsqiiyhrzmUNRK7GxssujR89Y2djixu17XJ4PyTKN0LOJgoz53OXZ4yMKhsD9BzdQlIDd7Q0kRWXqZ1yeDJCzkLW6xg8++jVKlsjlyUualRpzQQZBwVR1fu8//B4TN8aLIvYPmui6zmQ05t/9uz+ifTHGKJTIEolCxcJ1AkRULi4vqTTK+F6ILMn4YUgmJIynY3RNhDTirQc3mMwmuH7I8fEr6mWD3f0t3vvW+/hJzOAnn/LNs0d878fvU6wWsUyLs6NXFK0CbhAzmiWMpinPnp0wm4ZomsrO9i66IRDFDu1Oh4fv3SdOBPwwo9sfEmUKISG7Gw280CMdDjCLRcI4wPccdM3k4YN30VSLzz798lfbFGSka/sEcWGbkH+2TGfPm0Kj0eCt+2/xs7/8SwRBIk4yCqaOKIQIaZLTUcktGcTFc3IjsJxqyKLAfPTRR/zwxz9EEqU8cSsDSVRIkwRRNEhJUFQJSRaJk4i6mPHF5z8niiXu3vmQggnjUQ/bdokZcvPWFophUCwWuHHnFi+ffMXe229TNUscPnnOaDzn9q17pMcv8DwbIUmpWBaapmBPbf71v/lXVKpFDvZuIYoX9HsjNNWkVKyTEGMVDTa3t5hMB0iiRqFQxZ7NENGwvYCCaRAEAeVSCT+M+M73f8Q3z48YDMcEno0CDAYD/v3v/i5aoYJhWqRZhu97HB2+4IP3P0SUctYByz2+mOXrmwyETLxOK0vhtXEeoGnKNbvoNei8yL4W86lBEZdCtteTwpJ9tFRGp1k+lQgiC3AZREFEEvNAm9xLacmEWioUQBJAzGIMLSPw+pSLAZIgIQsppDKG3qLdHqFodS47HqlcpDt20IwavX6XTDKYTWOCcEIUCtSrNbIsZu7ahFLCVW/IvQc3OTl9Sb/bQ1cLzCYTao0yrdYmo9GUaqXIe+++TZxkJEmC7wVIqUAaZxTMCqo8o9mqcHl5iWWWuDjvUC5XGA6HrK2tYdtzXNvHdWE+nyMICp6TsLG+z3jq0et1SGIPWdY4O+tx2enRqMaMhx6G7qPUNaIoYtCf44Uil+0Bk2kEqcB8NEGVNWRFIYpyH6g4zjGoycxBFCUq5RKOA7X6JqBy/OqEv/lbv850NkfXQFEUapUmV5cRbz3YxzJNDNPMV1peABmstRq4rsOTx49x3QBJkihXipDJnF9c8Z2Pvos7nTMLJ2ysrKOpMr7j4DtTLs6PCdwpa6tNFF2maCpMxz1arSYPH+xxcd5jPhkyHvSpVnSkRMAd2xQUi1F/gGbkLJ4nTw5pra6Qxhm+E2C7HoZiMuzNUESdLFR48ajN8asxlVYdzdAhS9hY3+Grz54Shg7PXjyhUIxZXa2xtbnNi8NXTMY2D2/vEmxF/PA7b/P86VeUlBqBbZMaRcRUx/ViHDukWKxy2n7F2Jnz1tu3ePedt/l//Q//K7Ohj5CpZJGAqqpEQcJ4NEZAJAg8DEsDQaBQLlKul7k6uyTyPd6795BO5wR7NmSl0aDbH+JJIrppYlgWU9vh8NUZxUoNTVVIyV0AXh2/Yth36fZ9WqtrOMGEmQcxGpmQ0mqtkCYpWSZjewFplnF6NuDw6Iqzyy6IJmEWgyrhpT6CBrVmPul++9vf5+Of/4ytjQ0ajQaSpFCuWr/apiAspKzL0yPXRWa5z84ZMKIosr29haKohHG44JlHZHGMssjTTRYgtShKZGlKEARohk6c5KuHWqXC9vYut2/fuqZDxkmKJEnEKWRJhqJICEJMSkKUhMRJjO26JImILJeoV5scv+jj+6CaMHccHD/h8uqCcrVEsWRiO2PKukoUBuiaSblc4e///b/P82dPuTo7xfPmOcsii/GCBNezOTu7IIkzdM2iXKrheSFe4LK+sZHnQwQyulJD1aF7NUWWTAQygiB//47jEyoq4+mc997/kLOLKw6fPmI2aKNIEKcpXpiHpv/4N/4GsllA0A2q5TJZApEQE2cpaSbk3qQJuY12lofWZ2mWM5BEIBOvhV5vNoQ3RWvAGyZz8Jq6tJwPrnlHudJZWKylljTVND8k5BNGPqbI0pLeJFxjDHnQTcz6ao3Z+JA4UxmNJrhBxnBiE8cGql7BDkZkQoTj+4geJIHFyeEIQVbRNBNBzBj1fMQst3AIlZiuP+Do9JS9Gzfx/ATfTrlx4wGarnJ2eUgae9RrFvPZnCTLUCUZwzDIgoRypYgkaaw217FKOucnZxy/PCJJBPZ29lAlBXduUy2XSdKUydRmNPIolfTcXsWeMJm4qIrF1PGIwozT0x6O76MpEPhQ0A2mIw9JlphPYuZOiiRa6FpIlqTUd/dyXCuK2NxcYzIL6PXmTOcBaSoxncwQUhHLMvBch2IhZDpxaXe+olpvIQohe7vbvHp1xXw6560He/h+xPOnL8lSlauLGe485rd+8/s8evSI9lWHnZ09WisrzCYhQSBQrTV5/vwlHz18m/cfvsPl+TlR4DHqd+h3LzB1mc31DSplE6uk4gY6XphyfNrHcwLqtTWSyEUWC+xsrlItFgl8m5JVxHVmTIYTrILF/u4Bl5eXXEqXBEnAxeWYnY0WreoqSQCRJ7PeWmHiehw+O2E8nRKGIe+/H6DrFf6b/+ZHKJrL2lqJ0bDHi2cvEASV/lWX/d191rfWmI+7NCoW0/GA0WBEMMsoWA16wykREqKg8uDhQ/785z+lULI4PTvH9xN2dm5z/OqKJEyYe3OSJEVCRFJksgxUWeXg5jajyZx2t5dn1IsSnjNjfaVGrVogjTx822Y8nCFJIrb7nOPzNl4Ys7m9x+GzI/6n//nfUDRVxoMT4ljDsnTqrS5Hxz0qpRWqtRZZPMa2PSyrwmQ6JRFzm52Z3SEIPKqNOpO5QxiJhKlAsVpiavdBAkWUeXV8yng0J00uCMKAhw8f4Hi/YqBZyvLwE1EUc7+bxXpiWUCua4kkUK5UsYpl/GBEnIEf+JiySJYEBFGAoOXsCeKILE6wDJMgivPAFkXmB9//Hu8+vI8kSfhRlIuhZJmZ55PGCZamI2QJ9rSNoUskoU9RkdlslgnKJqqiI0ky99/9Pv/yX/+PrIsCa5sbvDp6jqEr2KOM9VaRYfecWfsCWUrxg4QsyygWK/9v1v7r17L0TPPEfsvb7fc+3sQJH5GRwUySSbLI8mxTU5iZnukeNAo9MxijvpAE6EJ/hwQJkO4GkNRSCw2pp7taPd1TpsvSFIvJZDJNZPiI48/2fnnzraWLdZIs3ZUA7sABAgeBgxNnR6zn+973eX4PqmWi6Cp6YWAaLlmekRQZmzs7nJyeoivVw+noaJ/VesXKi/jpxx/S3ejR6WwQRj7e1KPb2SeJE6JgRCEEIk3xohRZTfni6Se8b+pE3oJOq8njh3foX51zfnGOJhW8evGE169esH/zDr/zn/ynyEVGgXx9OyhRr0uKUNSKmVSW14TQ8hf5AaV68Bdflu98qeZyeY2cqPY110bSnzuIpJ+7iX5xS6h+L1/nI/i5gCiqSiGqMUSWpcgyuI6JVOYYmoIilZR5jiJKiqzANeuscpn5asZiGbEOFMazFCHLmE7I/sERy1VA4I9ZLSM03SLLChzTwTAdJBKWC48k8rEtDUUWPP7Ku8yWMvPVmpmno8k2ZwOPMJjQ27DY3O4RxAJF1VjNpsiipNtoUuglYRxDEiHLEqPhEEVRUAyNJExY+iuKsiQVOauVh7fyWa8TGq0ew8ECL4gohEyZFeiajq4VTBdrBCa5AMcsmYw8dje3CYIUJI3d/Zv8+KPPmS7W7B8cYJoOumZx/Pac8XDJchFiGi0O9g45O+8zXSzpdJqsVkvchst65fPZk+e0WjZb213G0wtEkbNYL+lfBDi2gmZ8m/HkhCQFXbEIvQBLbxKsAlRZoSxyut0ez5+dEYcFNaeNrpnYusHjdx5AIeg27/OD7/0lFCnvf+Ud6q5Op12n1akjWxL5pGCxmrK/0yVLJBxDplUzif01/YsL3rl9h9PjC/Zv7PDy1TEPHtxDlhUMtUS0U1RJptHqUBYSV5dzXj0dsrO7j+7a7N7Y4Y7b4Ps/+pA4SLm5t0vor3jw7m00K0cqU8oswtFUus02r15fsprH5NslKmoFtLQbzEZLLL1Dp71LGhcEQcpk5eE2eyzGC24f3mF0uWY6naMoJnGeoprVAZRCoBsaslQ9IlVVRZc10iDn5M0JQZpi6zobXQtZSrh9dJf1ekUQp0xGS8pCZWfvNm9PjxlM+kRJwnC4ZHdnnyAKuOqfUwiBZQmG8ymG3eRof5+dzX2yKOezwGO9XrJeSmi2Rhj4rFYBeb6m0+2RpZClJSWiasNbzEFNMS2bZq3HF589p6BkNl+iGxovXx/z9a8/+uWKgshzNFX5OW65ChH9jbOkBKIsyUXB4dEB/8Xv/R7/+l/9PovJBEmSEEWGLBIUVUKzDESSIhXV2KEQglIIkBXcmsv9B/c42N8mpQBFIswEqUhZL1cYsoxc5NRMmbevfsJ81mdvbxfXrfHg1jbrMKGUDPqjOc1elxs3b3JxcYy3/gvu3b1N6E95+eoVpiSx193EVHUURSaKI5brNRfDPpKmcfPePV589hmeH7K50SUrBe1Ol6mYMRlOsEwd359zfnGCYZtE0ZrpNMNxLbrdDsN+Rr/f5+aNI9I4Yzy6QmQJpqlRIFCI+OmP/pwkFuzt3+AnH/6Ydx49YEdsMhhcIksKIhc8//yTKkmtGQRpzoNH74AESllgqjJpUVIqEhQShXS9+JUkKHPgmk57zTuCX6CEvrwLXD/iq8yJ9AvraYWRKPiSh8T1DkG6Tp9/uWuQgKIQGLpMmua8fPmWza0uO9s9dM1ALkoUtURWZZq1Br6nYTttGvU648U5WaGzDgskTWa6GpMLg3t33sNQtog3IgajPpbb4KJ/iZ0IbMfEtB2UimGNJGsoyhaD0Zh2d4NGb4c8STC1giDxQJIpJZVU5KglrFc+W50uohSIUhCLGH8asVx5+KGPF4YMJ1NEKVFrtqCUCVYhsZeSxTHN+iY1s8UXg9cEcVJVeSYFdr2GqenYlsN8mSOhU7ctDvaPOD97y8FhjzCOGAz7rMKAu/dvomsGJycXpIlMnqtESYkk24xGHqYRosoaNcdFUsBxbeaLObKiEccpk9dDvr1xg73dDkLSWCwyZCUASeenHz0hTlaMBjGtWg1vkaJIa1azNZPhFICPPvoZ56dDeq09siAjjD2c7Q3qrkX/6hyZklbdYm+rQa9TZ3ujw8nJGx4+usvYm2O7TR60dvjxjz4CIdEwXb77698hCgMMQ8O2HTIhODk/Q5JUXr89p1lvsL+3x+H+IWnqswhWHB0dIZVtZvqKIEk4759hNh3i5IR+/4o0zjm6sUMmC+48uMFicUXDVinzlOHFBQ27haHopDFMxz622eJyNUVTtSoTVWRQBsRRdSBTNJ3TszP6I5+ziwmlrGK7OrWGS4HAcHXUXKIwNSg18iRHVXS85QpFktFkjVatiVMUtJs2wXJCHEWMhjOG/QnnFzOCuEQ1LV6+OSfLS6JIYNsN5os5/f6YXGTkKaiaSeILlFKjLFTUUqZbs5hGU7a6NRQ5Z75aUngyaZZhGS6YkGeC+WIMcomq6eimSrfVYbI8Z7nw+Du/9fe5uhgwGEwQWZUVE0XC+fnZL1cUJosFtVoNTa0Wk8q11VD6xXQBURQoqkoUlzx8/Ijj03P+5A//AEU3KNMCSdWQSkGZZxRZhq4YFXRNllFVnbgU7N44ZOfGAbkkyIuC6aK6OhmmVTmRlBLNkCjVlK3dOqqx5vjsKcvFila7R7e7S7O9QZZFaKrBowf32Wg3WK0WDC8HmIaOLBkYho2mNzF0i7IcgZQRpxk3bt7ig61voRaCszfHRF5BiczaWyENhhy/PUZFpbG1w3Q2JMtCXN1gr7PFfDnn7PQtIsvY6HVRZQVNVVjMpli6iVFzSJKq4tBUJbIwwVAU+mfHlIXg5MUzVA1EHKBbDhkVUuCbH3yTl69f8sMf/YT//p/+U+7evUtZloRxcB1hVlC0ar9SFlWPwLURtRKAUrqmnf4NQfhydyBdj5jKArguZ7heSKuKfG1d/fIGIaoPSbqmrVb5FF0HyOl1akRhl3a7iWubQF5lUhCIPCETKaom48cxi+WaQqujOzaFnGDZLmglx2fn5LnJRnuPXsPl9dsXxEmGhEKWFghd4vDgNifHb7h77xaff/6U5Trm4MZ9wihgOp1zsLNLzdHIIo/V6hyRy9TqDoOrM9K4BBRMw+R0eMp0vqDT3aDZqiNESRgK7t58wE8+ekGxrTIajrl7dIeyXnB5eoEsFGIvouk0iYIpKhKO22B/a5enL99QJCm6DIaqcbizzXw8pNutMeiPUDSNslRx3RaSbOD5AY7TYDod0WvvUXMlJsGaJM7wvYgoygi8DLfuUmQ5uq4DMk7dRZcLHj98j6L0+Ivvf8jG5j7f/a07PPviGR/99RMCL+I3f+1XGF7N8BYxezs9hpcz6laL4WTC1aCPUkj4yxV3b94lWK9YTKd8/y//kiBYo8gQ+ivefec+lmkCYJomr1+9pb29TRr6DKbnHOwcspwtUCX5erRlY9smYbwiLwWdpoPdMDk57TNdrjm6fYvlfEpZpHiBR3tri1bHRBQZjZbL1fiUIF4ymy5QNZludxM/jdm/ecjLt2/x10NMJSf3Y7751XeIooK79x8wm6fkAp4+OyXPYqbTFbu7PepujePzIZ4foJgWXhiR5ZAJgaJVZSC+H5JmGWEQUFCiKiqdTo92s8url2+p12rUXYfVYo6iFnScJrJucHFyjiapDK8CZsPXZLlMJlRkVaZAZjgcYVg6hShYLBZ0Ox0cx6Hf76MbBoqqYBkqN/b3SMMAw7SYzVe4tQaSPKbZajOZrwjDmBLIspJGq0GaVQYd23VAkSlKiTdvT7BrJY16i49/+jOCICLPBI16EwmZQX+Eof/CLvRLEYXnJ2fs7+3Ra7cw9WuPu/T/G2ySZJmcqpS92WlzcPMI3XFZr1fY16pWxiFkeUXBV2UyFLIyJy8l3HabX//ud3EadXKpIMpS8qKkXnfxgrhaXBc5qq4QRFNen77i4vw1tmlRShlJ7HF5+QbH0dnd7KCqBmnYqVq2SkGWZ7x+/RpLtVBRiYMMqZRBMUlzH7feRNUN1n7ARqNBt9fjdLViNl+Siax6uJsGSlnxycuyQNMVJtMB2/uP8MIS16wjlQXPnz4lDEJ0VcPUDFzXJop9siwHJHRFR5Ek8uvEsq4qeMsZzVYNpRT43pq9W/f47/77/5ZWZwtJ1VC+rUGSI+WVndTUDBQEeVlS5MX16bcaIZXXKAxJllFQf47IKMuiWkJfv6oaT6pF8fWC+Odjpi+LfL78JQkk6Uue0fWfKUtUVUWVZVSpRJUEksgo8xRJrXYPRZGT5wmfffYpSXpBIUEsNFZhSq1usbW3xXLlUZQFvY0OsiIYTE6phw6Nhs2GWRWlxFHCOi+QhISMznIekGUSb44vuHXvAUkumE4XTPpTmjWDIl9Q5jGSZJJEMRQymiyxu7NLnkXU6w28IKDXa2E5DRyzzb27TV6/fourW2y39ujaW9iGw2Q4YX/rJjsbOyzWa175bzFKg7uHhzScJuE64d7hLX762efoIqPuyBzttunUXcrcoxQyC8+nu3PAs9cDVNWnlDIuzoc4VovLyyFyaWEbLjvbdcpSptfbpu4U9Pt98hJUJERZIhWCut1EFjrPn7+FHM7fXpJtCt5/9JjoRoC3zNjtHdCxDphfZYgIVpOQje0OJDKaKKnXeyShwF8suXG4T6vpsLnRYzIeMp2OuHf3Dpoio2kqq9WKMIhRFJ23L86QJZW22yMKQrqtHsvFlF6vReivkEhJy5LbD7e4HFyiWhJ/93d/k9l0xXA2Jgp8ijxlc2eHyXRKHJds7XSQZMHf/51fpZRKXr2U0bSSs/MBp/0p7dMpXjzjcK/J+w8Pefj+PmUmmFxeEoTQ6e1SCpWSmGUiCFNYh4J1vCAMI8IwZtafohkmRSFhmS6G4aJqEmXgI5UFmqKSZYI0E/jrCF0JK8v8te3aqRlsbLYJ/BBvtcCQJSy9hpBL0lTguA5eFBKGISUZqqqRBBkbm13ee+89VE3i7du3OJaNLMs4To1mq854NMNbLpALCU/36HY7VV6rKBAUJGmO6zqUpczB/gGffPYFnW4PSZHxooA4SejtbNNoady7c4/T0z6SJGOZFuuVx2bvJqv1nFzib/X6W4vCvXceoikqaCpJUZIXBapUfQEFCVmuXC9JniHKCnz3tW9+k4uLKz78/vfwZmPKNEcpJVSpBEUGCgoEkm6g6hp2swa6RqEoJHFKkpYkUUlsFFxeTKnZJls9l0yURKnAsrd4550eeRphair9ywu89Yqzk5c8fOchs+mKw909nj9/RRAF9Ad9hv0+hwc3uX/nEdE6ZW/zgP/7//VfUKvV+c53voPdaHA5uMBUZJqtFqqmXdeAFrh2DbmgmjsrCnmeYTsmXauJqpbcu3sTR+/w+adP8VYeeZpTagLTMsnznEKAppkkaYooIjqdXvX3TFIMXWd7b4fLy1NkTUMqM+azEf/j//tfgKTR7u7w4OF7rOcLyiRB1nUs3UQuctSyRFAgihKhXIfavuwpBkSWURZfVnkWlMWXWQUZGYlCkpHUCp9Rff5vZht+8SFLlQW2LL/MrJToqkqWJGiGXtFNS0iCBL3ZQioK8iJDkxXOzvoEfsr5ZZ9W10DTTTxvxHK1QNNtTL3kzq0j3rw6ZT6bE4cZot1G02Vu3b7DypsDAlXT6HbbTCeCwaCPaZtIqsLL1y8xdZV2q0Gw8vjub/8WWxs2L57/BNMEQy3RFRO9pjAeTNnb22J7W6bVaYEEohRQKvzwLz+k5jb4jV/5LbxVxMHuLdI4w9qqE/g+eVygFAqHuwfIqsZWZxOygq6tohoWlHB8csLXvn6Xw+0OrZpCInLqzQZulBPEJVubm3iBRxAH5LnEaDSlZnQIPMFs7NFub1KgEAUpwSqmzARpFiGKrCqEiULSKOH/8j/8c37vn/wOrWadn3z0hCwqONjZ5uL0ku2jLRyzidlq8u4//TpBEPHi5VNUtUTKZH79m7/NdOIxnSyZz6coZcHh/n51OOj1SGKfrY0eZZESBAGKBJvb22iKgcgjNNWk02oTaB6Bv6Zeq5EnMXv7m3zy6UcYTZV6x8aqmXSlDmEakhY59+8+5OT4LcP+lB3DZj3wWC09rvoj9m/s0NtokaQZG5u7/OZvfoNnz485Hw75+LPPiVPBo4fbuO4G41FInqQMhh5rXzAa+oCG7dRIhIKsG7Q2d1j5awIvYpUISs1kFQparTYrLyAVgjjLkVBQZZk8FRi6iSpBFuWM4zFxGKKrKpJUOe8UTaYoExxLIY8UwiCiyFVyISPrElGaozkGRZkjldVeQgK2dzb58Y//mvF4DEXlPvI8n8D3oaiglmfnl3RbTTJRslqt8IKArBC0u3WWcx/brTGdTUAusB2DJBMglzTrFY5no7vJ8fEpeQa3b97i5PgCx6rxzjvv8tFHH5JE4S9XFJI8QdFkRCmQyxJd01AkrtO01QOi/DmKs0QUFf/om9/6FbqtBt//0z/Bn01JvQVyec1CylIkVUUxNG6/8w43H9xH0qrOBlkyiUOP6WTNZBKyWKyoHbrkGSziANusc7j3dTZ7TfIkYD4dYGtthqMTLq/e8Ad/8G9Yeyv2tm/QaW2wvdVhsSrZ3q3z8J0bXJwes7txRH90SZJGaJpBFAcs+x6uYxKGawxDZWOjzWI6RRIysRegSBLrpUe9UcNxXdb+inC6YrmacXh4g8ZmD0PXkWUJx3Vpt7sURUGaJjy4fYe1t+bl22NU26XW3aStacwmE1RZZumtkFQDTZORzQzDVrm6OAZ0/HXInZt3uRiMefzOAyxZAVlBRUaVJNIiQ5VlsjynlCQKSUFcg7NUVboWBJlCgKTKFMWXdtJrqmkp/gYG42+G3aoEuiz9IpuQf9nlgAJSiWM6aJKEJoOtmeQ56LJCIUoUxUSiQFddVLVBECiMp5fs7Da5sdcizRNs28Yyahi6RtSz+Hx4iuO47O3V0E2HovTY2W3h+UtELojCJffuHLKYz/js+SuODnbw/AX1rR7j1YS7RzdpuCqmVpLEK2JNYTwfoUgFRZbRPx+z0emR5SVJkiGpEqWksPYTdnZ2QajMxis227ssxj7z6YLD/X0kSyH018ymczrNHppposs6nWaLOIxpttu0ux2+9cG7KGpIo65wfv4K1ShJCkF/vCbKS877F5iOQafXpdc1WRYBDbtJw9DxZym9Zpd2d4vziytW2QqkAkUuiUKPZqOGqoCpm5TCYn/3AGMic+twi7u393nx9HMQKm2nSxIkWHUY9i+o1zospisOb+xxsK2y2d5if/MWq6XHYNhnsaxO8HmaMRxekYuYwF9TFCndbhtEQa3eZDQYk6clO1sb+GsPTdXx1h6mJdPttknzmBs3D1gmMzKRoUo6bq2GotX58U9+iFvvcvvePZrtBmmckucFs/mS/mDK8emIBw8PODq6Rf+qzxdP/jV7+0f81m//Ju99/Wt8/uRT8jShf7mmf/Ka5cyj3T0gK2VGMw9FVQn6E0zTppR0nr48wbR0FMPGcCRW4zVFKZGmBYqqoevg2jXiMKLTaiGEwLZdZrMVcRyTxCk7e1s0mw0Wy/F1qNHHtnUkkTMexuQCoihGlAopGbmUI9IMWSmRCwXbMnFsm5fPX1HmJSItMA2dOEkpqXJBUeBTc13SJCFKBeFoQqvVJPM9NEPFdAy2XRvXdViu1zQ7dR6+ex/PD3h9/JYgClFCQf9qwHy2oCwl/FXEeh3w+N13GQ/HFHlJIf52V4W/tShcXZ5QCIGhG1i6wY29A1TDrEJKVA4XkWc/tysKkROGEbZjsbu/x9/7nb/P888+5fT558TeglKUmI7NvXffZXNvn2//1m8xnM04vbrkYLpkNg5Yrz1m0wWabrCcL9jf3iSNBbVaDV2DQlUJ1xp1p07D1ZGETq+7ga7rbCabfPbZR5imiqYXBMGM+eLqGmA1Q1VzomjBdLRGU0uCyOdHf/WXHDy4haSU7G9tUhYpugIbvRYnb9/S7m7iuHV875QoTPm7v/O7JGnlPKLMydKSKE6rbIbbYGNjm5u37pBmGdPphNcnx1i2RW9nl+7GFt1uB9MwiJKYt69fISNQVJlM5BRlShZnSIWCrukUScT3/uxPSPISVdf4x//4H6OpKqQ5lDnGdeLY0GXSPKMoQbougS/LvDoNFwJd14jCGEXRfrE4LivH0pevXyycqxugLFVuJVVSoIQg8pnOlmxubuIYFkUmUHQJU5aRhUAuKvSGLsnVfqOUOLpxH02X0Q2FyewtqjwByef+nX08b4WuaZyeXLCYjUnikGarxmo1whJ16vUei8WINA3Y3tonTULqDYvnLy4wDIXxcEjDNRBJwKN7N3FMHYWY4zcvaNZ1vOWUN88vUBXB9maP9XxKpz3CaZisw4BGp854PCXNLearFbLQ0bGZjuYkYU6n3WU0mOLWnIo0mgn82ZJOp4dTNzEMG9twuexfkpU5bkNjsThDs3oVybSIQFLxPJ8MOLpxwHw1Z39vl/loRXOvi5yZGHINEUiIrKRIM/a2dzg/PSXNIsLYR9YUwihgd2ePKFxTehJxVKCrDfxVyueffs7tm3e4ffSQIrGYTwJCP0JXIc8TNnqbzCZzVLlkMZ6yv1vj7cvXpEWKbmislkviIGS9CrFsePbsKV/72mNM3UCRNbJEYBg2DbfBcDBEkiR2dreq8iGlwK459AenOK7J7GrG3tEuV5cDFuuQlSfwPJ/PPn/GZDLh5tEeq/UCXdd459EdNF1hsVojSxZhUDLszylLmC/GfPyTD6k1W0yGY158cYxWhty9sUurvcliHfL65IreVgNFUfDijFKu+iIqq3slWnkGaVJZ2UWe4ZgmuutgqBq6ogAyeZYReAGOZWKbFqZpEsUhtmVxdRWiaiqT6RTTkPnVb3xAEMTkmcrV1YIoyUFRUeSqWrYsqmIyWZLxvICiGBLHIbIsMZ0tAHDdGiLLqNcaBEGAYzsEUQpSyaPDQ27evcnJ2TGKAqZtoOkGRk1nvfY5v3hLvdEmTUIcW6dRdwj84DpVL6HIMaqiYOo6hmEQRwnj0eSXKwpyEbNcLAj8gFajRdO2MTe2kBWVLx8nuqaQAwKwbR3d0KBVp+Fo7G91+Mb7j/gf/k//By6P10iKgmKofPfv/R3MWp3N7S2WccR0NiPwAvqXA1zXpshj3v3Kbf5v/+x7fOXRLbqtLlmWoqDg2PXKRyxJtBtbmJrOeHLC5uYtLPc2+3u3CL0l88WYMPao1Rxu3LiBqsjsbHcxpQazwQJTV1gsFswmVxRmzs1bh+SJz85mizdPPubx43eZ9C+wNZObhzc5Pb2iROX4+BJNV1nMAzrtJjW7w7Nnr/n7v/O7TKZLBv0Rs8UaVdNA1fnmt3+Nl69f4hgqtbrJF09+xmI+RypFRQ7NRIVV1nXqNYOiyLFMB12z6fenlLJHnIGmSNXtxtCwNZk0E+gaZEVCLlKkJMRQtGqmmeWgKohrHHYaJUhFee1SkisxkOTrnoQvs8ryz1PJssQvcBilQJEVms0acRLhuiYUJboug0hI8pwiCwjCkCiooWtqdWorq3yJrtncvfuY8lUMpYyuu1Dk+Os1ui5Tq3XQ9JRbd1qIsiDJYrzRhKvLNWlWYpk2g/6Q3e1tfvbxT/HXKzY2DlGMGr/xq9/gox/9JS1D55NPf8bJFz9jY9Oh1pCYjodsbWzgLdeMB3MoBd46BUUhinOceklRSoxmY/qjGalf0rJ6bLb26HS6pHFGo9kiTWOWnsezVy+w6k1OL4Z86+vfII/HtGs1lqsV4+WE3YMOds3CDwP2b+4RpQHj2YJ2t4Vm6GzvbiLrd1itQzRZptfqoBY2RaSytgKePn9TARMVhU63i7wCIeUomsbai4mOLymyFMuwePHsgo3eBq3mDbJ0Tbe7w+XZhCJ1sI0mvc4mYbRkMByz9tasVlOKNEGXNdy7Dpubm4RJwO7hFuNhn163y0a3zXh6wZ1b9+h22swmcxqNNmUp4zh1jt+eMZ8vkCRod2vYjoVuwau3r2h1XFRDxTZdXj07ZjxfkOQlXgC7+zdQFBXHqSNJKvv7u0SBjyhSWu2vcHLa56MPX7BY/ozHj7/Cr/36V/k3//Z/4upywO7BDv/Jf/Rd3ntwwLPPn/Ctrz2g3enxvR9/zJHRpaBkuQzxQh8/SqGQiIKcTFTYnMV0Tq/dqnZ6eY5rmSRRxGyxoNXq4K09ZFlFU1V0VWFra4eiLLm6Sri8uKQoIE4SDEtDVhSOzy75yvtfZThcECYF6XhOmifIqopjugRBQJCHdNpt8jRmHkXVgS/NcG2Lnd19ZtM5qmURhSGaZpCkOaZl4joWb05O6HTq7B3s0d1qcXzyhp39Lcy5gWoqGJpBf3COrqlsb3QxTB2kkuFgRBjGIJW4rst0NsH3qzGVrv2S0dlnJy9oNdvYukyRJ8zmY0xdp9vu/ZyXUyE6c8q8QJIUVEVmNBlR5oI48ojSFMet0shZllOkCsv1ko5jc3x6Qr1WI4liRJ5DkROsFzx+9yaD4Snj8TGKHCKTo6sS3trDMixc166YO8jous2TJ6/Z3uuw9kP29o8wtREnp6ecX53T6TVJkpQoy1jPxjTMHldn5yShT8M1ePjufY5HFzz74hPK27cgjilFzHI2pFWvsZotkVFQFR2zVsf3Y1arFa3GJmmc8ObVGQ/eeYdWZ4PT8yEbWzvcuHkHP6j2GUsv4N6Dd/jkZz/i6vwVSRRQZilQoOsWmiYhWxa6rmHZOnmRIUkS6+WMmm0xGC/R7DrD0YDnL19wY3+fxF9BmWGaMleXJ/QHF8xnEyhLmo029WaHR1/7FigaiqIiFTmlqPYPkqygKCrI1/8Mymr09+VNoRKD64Y1CSTEteSr6IaKJBdQ5gRBwOunn/KX/+EPSKMIt9nArblsbm1y7/4jNjf2aTQ7tDsdTFciiGZMRgnNVoswHKKbDc7PF2zvHNDquRhOk6cvn3LnxiHnJ2/xwpDxxEMIjXqtRRoLfC/Edepsb+6SCYOG3eLR3UcEizGuZrKcTJj03/Cbv/0BruGQxjLbW3u8efUKx7X45GfP+Oo3HiGkaulYrxdcDlaESYiMQV4UvHz1iq+/9wGz2YznL55SygXNbouHX3mP08srigJKWaE/HHF+eoqsQl4KVv6aOEtRLZdSUoiSilVlWgZpFiNERJErZGlEmeecnR7zzUffZhGHSEKws9Hjoj8iSDPMVhtFsxDFiiTKqNe7iFwmzRIMw+bDHz/jYN9jvZrxlffuE/oy3//+J0z6Af+b//X/lqurEUURcNnvo2kWOztb2LrGjf2bvH37liAIKKQCwzDJhcAybIJgwdHhDZyaTpambG1tomkWRSFzeTmgu9EmiDzW6xUXV+cUUs5Ws0OYROSzmHycYFsu6jJAEhaubWKYCgoaj955zNnZaxbzMTtbbQb9E1rtGt46odfb5N13H/Lq1YCXr97y4FGPvT2HLJZJ4zl3bm5St2MsdYll56hGgmKlaEXBjZu3yDOFf/tvvkewjLAtC01T0WQN17IZZxOSIMY0VHpbmyRRRBLHFIVgNl0gSTKGoZIlKVJRcH52wmrtUZSQZgkoYLkGRQlZUjDoz3n9uk9vo8dyvSJO04qaKyBPZep2B4qMLE2p1+usvQVhECJJEoqmspwv0VQNVVUIy4g0TVFUjSwXrIIAq1BY+wvenr7BbZncvnvEN779DU7envLZp1/QbTUIAh9NCUmTENc18UKfRt0lDELKUhB4ITtbPdbLBbdvHVH8DYPJL0UUhhdnvH72jM3NbSzTJfICsjjFsVwMw75mIH25rEyRpGv/uhrz2RcfY2kqwWqFt56SpWnlqVfgez/8C+6/+y5be7vosUmWzjHVlOXygr29XbrdOrKUokglUeBDmRNFMbZtULMs8iwjzlJOjk/o9dr0+zOiJOMr779LHEBR1DCNFpsbCWkesLtzwPPnpzTbe4TLGMO0qzyXqmAaGt/+4Ku8PX3Di6dfUDctVF3HtCxu3+nwgz//CZ63RtNUIj9g/7097t66y/HJMaP+gJrT5vDgiNVyie0YBEFAmqVMpgsObtwkCBc4joqlp2h1mVVZUmpKlRYoqvT3zlaPIPCo1w2CQBAGMbZh8vrNBbrZJIsiXjz5nMl4SKNuUjNKomiNbamkaUjor6Es0VSNty+eEIQZZ+d9vvaNb7O/f4iIU8oCCrlAVrTrHXKBLKlIpYzCNf9IrkZP0vX7KgpIohBD0zFMBUOVkMuM0JvzP/3+v8CbD5nNTljN5/TENkFoMpuf8eTJRzSbPVqtDVbemq9+7Svcun2TO7d28YMrzs4hGXloWhPPy2g0t/jkixckmUJe6MQJTGcejUaHi/MhoZ8wV3VarQa97hago2sW48EcS3f48Sefc//2Lo/u7XFy8pTJYESUJExma27fuEWt3qwcZCosph6D2SVLf8XhnQOiPMZuOKwGPqPViDsHDymRGIzHZEKw8paMVysUw6DRbKLIGkmU0G63EKmNYamEaUhehNi2jqwqZCIizkNmiwm6aePWHQxTRdEUpkVOlmeYhkH/asD0asVq7jOZLChEiaborBY+fhySCwVZtRgOlqiKgSarrIqcmuvy8U9fUndt5nsFTz97zudPTlAx+dGPP6TmWjiWyv72PleDPi9Pztnd3Ga1TMiSguXaZ3t3i8lkxmK+QpcVDEOqekJWOTdu7JOkMauVT5KVbG5vMhwO6G126HSbtDstwtCrRjWGRZJ4FQbCaXB+NkCSKotnu7OJ56f8+Z9+D0kWxEmKaSjIsoLjuBi6ycX5JY/f+Rq/8o1vcnzyEn8549vf+Aq2YVHKCqP+K4RIURSBrEmMZkM8b4lb69HtNfns82fcvN3j7u2vMLia8urFW7IsJY5kep0ecRqjajphGKFrMqal4boWWSLQNJsgjIniCF1zCMOYPM8JkxhV09A1HUU1UVWF2XSGazi0WxuMR3OiqHoPi6KiCqhylYCWZTAti5ICIQriJMUydQxDxzB11isP6XrUrqgquRCoqBXWRy6xHZu1t0AUlT3/+z/4AavFCk1TqDVq/KP/4j/jx3/9I6azKZohYQiFVCpwGzZ5KuF5M+r1Ov2LPpEfkcTJL1cUgvUCf+2TR1UblGXWSPyIGzuHJErE8xcvaXSaNFp18jJAlAEXF6+5PD9BZClvBiMatQaIEEWAZmpkaUwpJcyXVwwnb9B0GblYEIcnJOkVhzcekmcBURSRxBmDwYTNzTmevyaNY+pWk8loQr1Z45NPf8bNm0f8p//gHxL4FdDsD/7gD2l3TJ58/paHj7aYLtdYdosPvnWPn/z4GWlajVb8NEE3bWazKa2uzVfeeQCZYL1YI0oZWTXY2NrCqhkYhoxIQwzdxpAlaqbJpD/AtW1cx2Hc72M4Gl6woj8cYzoOYGIaGk+/eMpieUqWTkjjkF63hchlfD/E1AzSKGA6nZAlIRvdGrJtkicRi0VAq95kNFqDrDHyArzphIODLmpPQ1NLEi8jjnwoSlS9whBrioppqPzsw5/wg7/4K/53//v/I3JeXYNVTUPIObIioCjQNRtVtSiLgjLPUXWd8jq9niYxqlLNRktHQipARBE5Od//D/+O9fSC0B+zuWHiWC5RskBoFv6XNuJ8xaD/miyDTz7+EQ8ePORrX3uEaQmmw4gi1siTiNWyz8vXH5EKlWanw3LuM1+GJKnAzFJcx0AqVZAU0ljw/PkJqjWl3uxhyCVS6nN4uM/2dhPyFTVHI1wHzNZr9o726W51kWQFuVAwDI0sC/jgax+QywGWZVNvNdH1GnKmsepHbG5ukGUCgcxovqCUJWRJQU0konVCFq/YabTZO9wnSSv+UbNZx220WUdjZEWQiYAwWqAZCqPRmDSLKNhGVhVEoSOpOhoOaVIwGEwx9BrNZhc1zVn4IbLIqTstxrMZRSGTJgWZlKJYOmGQMJss6DQaDAdzfvD9T0jSBLe5SZ6k/PDDv0LKwbUN/u53f4PpaMzrl2fMxj6mVcM0LBRNYfnqFXfvHhHGEccXJ8TRisfv3mNra4PJeEir1UYzbGRNZTDqYxgK7U5lRx0N+mxs9DAtjeloRK+9TRR5WD2Xm3dv4UwnTKcLBsMrJMlmMl5y7+FtciEoS5Ojg11UteByfMZ0MOD9hwVZOCUPVzy4dROpyIiDNVf9EZnI2dnbZ7mMeP7qlAfvPOb+7Uesg4TFZMx03Kfd6REmI37vv/rP+Of/7F9yfjpEVlVkIWMbBp1eizyNMXQZohJNUdE0iKMcKMmEwI8y8qJENQ1iz0cpQdJ1yqQgyyVUxSHwIkQ+R1FVGrU6i4WHaRlomkYmEkRRougqigZxmpDmOaqmYtoupSyT5imFlFOKEuWak1RKIKsSpVTw9W98jb39TU5OX7NYzrm6HBEGCUkco2sanz2ZkGQRQkpptA1+93f/Lj/56BM+/tnnCArSomB7dwPdsCrbfV4iwuyXKwq7G9vsdCG9tm3leYG3mPPnf/JHaKpOlgv6AwNRpmhGie3IhOGc1XRCsF4TrAOIBYqkkOYC/drfPhtPsVyL+XyMKhesV3N+/1/+P9k/OuKvf/iH/Mo3f5PZuI8m53jLCR9/+Ndc9S+xDANL05lNp+zs73D/3g10Q2Fzq0UUGqRJhlsz0TSdTnuLJ5+9pCTCf5hxdGsL2+xzc/sWxt37vH5xjGma/Mq3vslnT3/Cr9/5Vc4bLq5p83q1Zj6bsprNsEyN87M31FyD4WjMm9dP+eCDD1gvRxh6l8loRhzNKaSUjIxOb4urizd8+9u/gVz6pNEUka1ouBZXywVrsSaMcmpOk/F4hiaBZBmUQmY+90iFoERHUTSOjvZZr15SopBlGWoJo8EIzy/Z29nEMnVk2UZRFUbjMVlOVbto11gsZ2iaSZqG1b6gFAhRdTFY6CBLFCKjkGQ03UACRJIgyxKKblAkKUKSaJgaP/rBn3H89g11x8a2NF4++wypiNE1iMKEi4s+kqSg6Rqu69JoNXj75hhJMTBNl8VsxtMnnxOsp6hqQolPu11DQ2I1GSHnCZ2axWJ6hb8aEQU+pcgIvDVZWqEz0iRBMRVs2wBFocwzbt864sHRLqcvP+XV0x+zmJ3RbtZQZJ33Hr8HhoymamxubLKaLZgvJrz3tQeE6YIHj49YRStu7h/y5NNX3Ng7omyrqJqGt/axHJvoIubh40eEUUQSJtw6uomlqShlyWw2Z29/h/F4RL3RQJQZhzcOMRoxiuHj1uoUksC0Ss7OR5imTa1ZZ7FOsWSXJAbt+qBVr3cp1yGjxYBmo8W630dTlesu84Kaa1YdH3mAY1UI58lkiG3ZOI5LmudIskJeCErZIM4i/GnAH/75X/D4K+9QaArrJCEuJKyyoK47zFcTnrzwePfRPZaTIUmR03AtHNtgZ/suy+UK07J4c3yO54Xs7+/y2WefY9s2u9s79K8uURWZulOjzFJuHd4kyCOmszmHh3uYls144rFeZ3Q6DV6+fMWt2wf86Eef8ej+IQcHuzx9+oKDvQ1OTt+ws7mPrJR87/vf5+jWPs12k3Z3A0UxsK0aSXxFWVikicxqGVXAw04bV22ShQXnowH/j3/2z1kuQgohSMoEqVSo6Sa2JOOlGfN5gKTK2G2XRbgmp6RQIVcVMk1Bt03msxWqZSNLMllS0HRcKEtSWaIwSqIwQlYVAj/Etk38IMK2QTNM1t6aNEuQFRlFUTBNG8/zCMNqZNVoNFAVHSFl5HlOEAaomkLDrpPkKVka4a1X9C8vUVQVBJy8ucR1LAxNp9G0KdKC+XjF7m4HBZm3r9/QqNVo1DR8L2W1DDg7v6BAwrKq/pdfqijYskmap1imgSQpSAoIUSIir5o0FyWasEjzhDKBZBUj8oiWUaPdq6Nt6pydXpAlAllWSdOMDMGtO7fYanUpwpBSZHQ2D0hFSLgcIow1n//0ByQRNEyJ9eiKxDBRspQ8T/DKFH814SKbE/ljnJrD3k6b8XhCUcDuTodWq8PzLz5CLnTqzTqrWYDYL3n/8fvsdDfIgiWypKDIcPNwj4urZ7TqLocHuyRhhqkoXJydcuPoJoqQOD4+oyhkDE0mCdf88Ht/RqdlgwgJ/QWlCLAcnSgOeDufsn94iz/5o39FWaZYpqBhq8RhQqPWxnYazGYe62VI3WmxWsyRbZMCmSgBSdMYj+bYZpvz8wsajTreOiTOcpIwQisU7I0NTk4miKzqsFBkGct2kSSZMPSZLcaUcs7+wT6yXCDEdZVnAUgyIqt2CLIuV5WeQpAkAVIB4+kE13VxLJu/+PM/54svfkpRxkRRxEWas7u7g2NqFapa0jk+PiMKcyQE3W4dCZ08A9OqXVdZaiDDZDYlDtbcvLlNs2mShgn+2kdHZrvdYDpf0LJNxssleZIgFTkICYQgLXxAIfBSLMthd3cLU3eZDwe8Tlb89Ec/pFVTUTCJvJROp7KL6rLBKlkRhwlX531u3tpjMp1wcLOHyHLGl0NevR2z09nnzbMLfuWr32E69BgOBqiaTppn+IF33bMRcnVxztfff5/I95jNZlxdXeLWbN68OSbOfWpTk3uPu7QsverjTkGSLDqtPcbDgNOzSyTZpePqWEIhjFY0mxtQKuxs7TBdeCRJ1TcShQGmoSEokVWVyWRGr9clSyIaDZPCttA1k/V6haqpxFnKOgzJi4pkG+c5+XKBeX5BKAr81KfZUFlMBwymOWGwwrI0Hr97yM3DLWbjkt2tLqalE0cBRVkym80QRcHZ5SXdjQ003eTioo+uGFiGTbvZII18UiHw52vOJue0WzXKUhDHAbPpBEl2uXPnDvHTJ1imxtfee0SroTMYnHDn7i57e3voikORF9y+cwvbNWi0mmiWS1mqJJHg08+OefrsgjQX7O8qzIYh45FH1z3Aos2LV68J05TtPYPISzA0jSROcC2XIgoYLqdIskqS5siahqdEyLKKrpfIcoqSyURJxtqL0HWDopSwdZNeq4up6cwmU1r1OrLmEscRV1cDDFNH5Dm2ZWGaFuLapalpGhIygV8h/mVJxdBNhBAkcYasyKQipdmq06i36PTaTCYjgtmaq8tLJqMR/nKNZblIpYqtWwSrmERJMDWdwcUQcsE79x7xp3/0Fwwvx+zsbiIrMvPpDEnSkRQZWdVY+SFplv9yRSGPKh+8UlRJV0VRKhonlRdeVxUQSYVByApUVUbSLBSpai7SJJ39zQPOXo9JsgxV1qnZdRpOHVezubV3E12BLI6RVIFiVlbG3AvRJINvPn6XPC+RZYVYytENjeVyTbdhk+U5uiyI/SV/9O//P4zGExRNqzzokoxUJGx0uugaGLKClGcc7OxyeXKBWmRYuo6/WvD0yad0O3VOT14SBkuyuCAKVyzmY6QbNyhFiUxFrSwKiYU0o91tsd3tUBQpsb8gj0MyuagSvobKajpkb2+PNC4xDIXR6Jwkhk5nt3JHRII8KQjzDNtqI3KFje4WcRERpRGW26Z/MWFv54goyCqrmSxT5oKG22E4WJEmGWVRoMolsiyxXq/RTZvxxMewDGSlpBDw5LOnvPv4fXRFQhQSsqyThlWPgm5KBHnMRsdkNZ3xyU8/5NmTT7lz65BGo85f/tEf0mzZbG538CSJTJMJlhNMyyIOI06Gw0qMFMizjFQoOLrLYh1RyDrrKMGLM3TdREiwu3+T23du8c6DQ16//IKGm6BrDp89+QJZgjj0KlxEzSWKkusZtExZQJpVndXNhsvZ8Rs2OlvU373LcjHh9PSEoFnn1uEujqlgWSqNjsvJ1Snz+RJTsxACokhg2jaXp3MCP8FbFBiFQ7xI2NvYIU8EsiSxXC1o9TbodNrYts3l2TkKEpSCPE3I04SyKHj2xVO6Gz1UXSUpPA7MDWR5m9FoThBkxLHEahmRxDJS2SAJ1sSxYN2fsNPapue26PV2ePHiFUk2w7Yd/PmCNMpI84osXMqwu7+BbVksZzNc1yaJIrJMECcRmm4QpSlRFFFrNXl4/xE//tFf02zUQSo4ObtAU3W2NzfRZBlNKXFtlUZ9G00r2NtuY5QFknAZXJ1z89YRhRCEYQCqSW9rg0a3hyqpyOqKx4+/wpNPP2Nve5uH9+4hkohB/wJD1WjWG5RqVf6UJinNZhPPL1guFkgleCsfHBljo46uG7z//ru8fv2Wek0j8FJqRYkkq1wNJ3jxmKvLFdPxEttyWa8K1uuAP/njH6PKJv/wP/8H7PT2+Ojjjzk/mWE4BbGXEvkZ7W4dUzdJooQ49DBVDUXTcHQLIUoWkwUHB7sE3hJNKslFiR8mKMhockmWZoRxSj+McE0LVVHJ8wTHNilLg+3tTXJRVFgJUbBee9iWjcgEmlbZR4FKKAyDKIpxXZc0TUmSiFzOSAvBYj0nFTG2ZdDbaHPr6IhvfvA1/tX/+C85O72i3dmi19xgkk7J0pjhxZjLPKFWs/j4w08xLZUHd46Ik5TFYomh6GS5RJokiLIkLQUZv+Sbwnq+xLYdTLMCpJVF/ov2LkVGiJSi4OexcKWQEUWBKAos1SJPC2zDxXXqaNq0guSlOY5hYykGcZqjyjKaZhMnMWWpYBgmsqogSQpplqGroKgSlmZg6DpNZ6dydgiBphuohsZ8vkBHIksi8jgiEzGhN+PW17/CfDriw+/9Gav5iq9+7Vu06jWeffYFWeyBSEkTH0VKGXtjdM1isphgGRpSkfPy+XPyRMc0XHRdEIUphubQaW6QRT66obDR7lKUMkEY49ZqJHmKEClZ6ONaGq6jM8818hyW0xWGVcPUbFIFFNkgjlKG0wmLWUAmZci6RFnIqJrN2fklcqlQCnBs+9piZlKEAl3XydOELK2uppIMURwhyyZRJFAUkAoDy6izmHpYdo2rsysWS4+ylAEF2TARIsNQBKvZJT/76Ps0aipnbz4jzyPqTkbLlciDJZaqkIYhXhBhaFtomkIYRkxmC0zLxnJdohwkP0GIksFohqpZLFdrrEJCU3V++7f/I9579wFv33zBzvY9FEnCqTUx7A6lLPirH3+Pfv+SPA2xdQOFkiTJycsSVZKIspQo9NEVmflogL+esl5OAYnLyxFyobDZa7K702I2XeAt1iynS2QlwnVajAYeH3/0DMcxaLZqPHhwl5ZRIwoTsrQAAYv5Atd1mE7HbG520TWF3Z1Nbt+4yWw0JQx8RJaxXq3RtOr9I0lAydne2iRLCuarNdPFlCSVmA596rUt0rgkXK1ZLSKUQmI9OMa4/5C0m7NcrTAth2aziazoLBYBsgCBoNPuMBnOMS2TRq3NcNCvukpKCVEWTCZjSlkFRWZ/f5+nz59iWAZBFKFKJa5hcPvmHYpcsLu1gW0pPHrnFlE4xTBKRBYRxRGHB3u0Wk2iKCLNBYPBgINbd/CjiNF4yt7uIUma8OSLpziOS5blDIdDZqMhX//qV3jz5iW25eBlPhIKk/EcISw8P0eSc/K05PJ8yNGNHq1mhziO+OlHn2OaNp+fvCQMMhr1JptbPYpSZzSe8PTpa7qdTTTNQQgPXXVZLRJsq8R1Ovz4rz7h+OSUut3AT0PCJKDTbRCGIdtbOyzEHF2tkWUpgoLY9zFUnabtIAcpLXTS0MNIJXZrbdKiICkFpmngJwmOa9DutlkuFxi2RqvdIggCyrJkNJlQImMYJrbt4nkBWZpjuzJ5XjGrJEmi2WyyWq2qPo84BonqJqRV5WPz+ZSVLGFbOtPJlE9+9il5KmjWWyxnS0b9Ma7jUq81ESJlNpuy1dtAkRTee/we9abDH/3hf2A1HxAEgm5vkyyHoTel0W7SbLV+uaKw0dskjmPSJEXXNVRNq0RBlq75ORKGrlGWJUUhKIoSVVaRlCrZqqoGQhRVujfJ0AyNPM2gAIXKd0tZoEgypu4iSo0ykykViZISVdaAqq+4GoMkFELB1h2EEKRZ9QBqWDZNxwK5ZDobE+VzUtchD2d0XAN/UTDtv+KzMuTm4W0cM0cuExQyyGMuz46ZLsZQyDQbPcpS42Bvi9PjAaaxgaqaWGZBHHqoikWWFCiyjuuYpElEq9UhzQr8ICbJc1zbxVQrRnyWxNTMNlkcEMWCKFyx8kIk2SQKV9elRFWaO4tz8lgQRQGNRgXuy7McRZLI8gzLtpBklTjKkKUSRZYxdJs8z0izDEmRKjxRIZELmf7ljP/Xv/hXOE6Nnd09am4TRdZRFANdtymSEFkq+fiTH9FyJXRyFuMRtq0gKwLb0snTmEJI9IdDehtbyBT4wQrNsFFUGcs2yQtYrNYoiko9K2nU6silSuQF7Pa2mc9ndFpNNltbdOub/Ojqh9y9fZP5fMFm7wYfffyUrIxoNHu0Gg0MTeXqqs9wPCVMU5KkIqPalkkaRRiyjWqZeKsZ+wdb/PD7EbJs8Pp4QJ7D3t42RZmhKQa6ZhInoGkOy4VPlhrkmoOj7/Lisz6djs3mRo9W20GkVU+EyHMePXyAU3Op1eu0GnWW8wX+fIEqSSxXa8oCVLWqAR2Ox9y9f4AkZKRcpmY3kBWF6WyFa+eUWYYkNFpuh8XgiixNubF3wJ079xB5SrfbodZocn7ZZzZfYRkOvlclrRvtFrKkU6s51Goui8mSJMwoywJFV+l0ukRZhm6Y3L19hzzNuTq/oChLarrOrf097h8dIRVQ5AmhvyDz1uxsdTg9eYbtWsiqwnA2wYtCdra38eM1hmXh+z5Lz+fi4pR+f8h7j99H1wzSKKTeqBOnCXcf3GUwHrLwloyvZhzcOmQ8njAZ+dTqLiITTKczao0aKPDks2Pee/QOzXqXbnuT4+NzQl8iDCXqNRtVr2PrJY4RcWN/nziSUCWN0IswLZtet8FiseAP/+c/wtJdoiQhEXGV6VBkyqLAsW2G/UFV1mWoBElMmUfU6g5yWlAmCdvbO7x3+y6WpKAImaKQCfKUTJf5s5/8FWN/RSIyhuMheZnSs7sVsVaSWa1XWJZNmuQ4tkOWFzSbTQzLJo5DTNOs2GCqShAEZFn1ftXrNXRdRzYrkKCqynQ6XSSpZHd7iySKePv2lMlkQVnIyLKKyOOqaKyoJib1mkO/P8LzVpydnXB4tEe93sQ2pzimTpZmNJsd/G7MwY19JrPZL1cUCknCcl0URSZNU/wwxDQNEFWxjqaZ1+Gnyv8uXTP8RQEgoZsG5AJJqayoFNVcLY7CSgxUhTwrKL/ELoiKRyPyqmFMoqjStYqKJJfXli+FPC9QZYW67VYL1CJB0yWQctydJhgmkW9joRKsAr7xlUesooDlesSLJ3NqVo0iDavNPzl56lcioemcn7zF0m1sw0bTZa6Gl1Bo6FoNy7FxHAdVVbEdg8lkgOPoFQhL02i1WoTDiOVihoRJn5TD3SN2NvYI/HNm8wndjS3CKEdRDfJcYr0KUDWDIIjJctBNG9kwUEoNz1uj6xqGZaOpKl7gk2ZplR8ooRSCLC8oihzHsoiSBNsyq6IQRSUMImQk1qsVb9+85p2H73Lr5l1KISEVKqVUcHr+Fl3NWC/nxMGKvd1NJLnADz18P8a1babTBVkGs/kCSZFoOSZJlrG/v8VgNGW59MjzEkkuWARjCi9GeBFGCXok2NBc9pwWTc1gftFndtbn//z7/45/8l//d0TrjMH5CKupYZkOju4gFTk7G5u4dp1PP3+CpeskaUGRC+I0ptQkOo0Wv/4bv4ofLmm0O5y9HaOic3w2ZLYY0m3rbPQ6rJcRltPg9ZsTKDU6nU0O9/Zp1mr4us2L55+xt32IoTskaUqWZlWYq9MhzVJIM86O35AlaXWFX3vkcYapG/S6PQrg6NZtBoMT1jOPRsOkvdGhUXcQSYaIBGfHE4pMg9JmZ2ODxWzB17/6VYRIUFWJvcMdbLvGbLFg7fnohkVZFqyXc27fvUWr0wQgCiPqboPQ95A1hTRP8b2QWrPBYrXiL/70z5AAXZLIioKD7S2+/fWvc3F2ThRF3L1zi4arkqYRq2WGokhMphOa3R5hkvLmi2cIZOr1JgUSF1cXSLICckG33SYIPBQVao0arU4TXVVYBGt8b4GQYXd/H0mq9mf7ezsMBitCvyRPS8bDBSU5pqHzP//77+O6Ku+884DJOObVyyGrVcLb4yXOkzPeffcGs+EYEWfcPbrL50/eUIiMPI1RHIt3Hj0kjTOG0z5+FFBIAtmQyUWOVMqkUYq38nBsF2/pU1DiOiaFSImjkIaiE3lrolWIbTXY7u3gB1G1cBeiqjINYlK5QEgQJTGtdotxf0CcZliWjR+EtNptfD9EUaqsQZplZFlKLgSOY1GUJZEfoCgKUJKkCYahE3oBjmsznUxI45ROt8VsOme1XlOIkjARZEmMIqvopkmtXmc2G9OouxzcOKRWswj8JePpkP0bN3n75rjCy0gFN2/coJQUptMp69WCTqv+yxWFvCxQFI1UCBTdwFZUiqIgjiIkWSErqABQmoqqahRUtYdfMnQSkZFmKUkaI0tQlgLT1JAlAZIAqaSUBZKmURY5hilRFAW5EBSipMpdqFAqSMV19aQkUFW5KosvBBIZCjEijjBMiST18YMVG70uNhZmLuMvFmiazOD0Lbpuo/T2MHUVLw65ujyju9FkY7PGcrlGKTR2NnZJ45z50kfWcx4+eMR6mfDm9TnjWcnWzkOECHDrFrWazmK1wDIbZKKg02ugqjZZusK2NfIsQUKm19nAtGrMlisUZMIgxPciXLdBGmVIqGiKRplV5R5xmFSz9DRje7tBWZb4UUiWZzi2hUhTRJ4iKxKqYqBoSuW7jmI0XUfTVJIkZ7WekSQhWZZxcWGQRCu2tw7IRU6hZYynZ5BFGJKg262hKSphnCBLNfJc5emzC2q1GqJUyIOUrd0NVutVNTv1FhwcbNHrtFhOPFaLFYqkkC89GqoOeYYTF2iyghNlxP1LBos50WhMQ1ZJ5ksuU8H4ok+XJq2uw2I6wdJUNjobiHzEvdt3mMyWTOdroiSjECXdzQ6R7/Gvf/9fMpoNmM7X5KVMkhQIraChWhzduoM/X7OzdYOLqz6qUvnj250aspLjBzO2tzcx1Pd48vkLHn/lEZPpGEM3aLaa6LKCKEv85QJVAdXQuXfnDt//3g/otno4Tg3brRElGVdXA67OL9jeaHL+9oJMNNnYbdBpuEgF6LKNo3d5+3rE3sYm8dYWi/kYSsF8NuGDDz5gd28L3TL54z/+M6JozdZ2mzcnCz7//GNs10VWFEzdRFOrm4yqazhOjbUfkmc5tmFScx3WCw9LN9FtiztHh2iKhK4qFLrK6zcvaHfrdLd2cZs2mRSwd/uIy/EMR7doxilCUji9uETVFIqyIApCDg/3mY3XnJ33uXVzH0WSSUSNZqeLZWkUSk4ipURRzNXxMYqqokgqjl0n8ALqNYfxZIiiSRRC4utffZ+ffPQZf/6nP0EIiSRWKUuJOBbUmwrHb85JgxXt9gbBesnjR7f42c98clHguCayUtLq1QkyH5Gm+FGAa7o02w1GwxGKrFOIkjwREJWUQuA2DSS5oLXhoqYFKAWtTo+a1sBLoDTrdPccEq1EyB+To19XpCYo6Awux9XP2baYzZe4rstoNKrCZ1lQ7dYAWZHodVqAVKGuHZM4TsiyHEPWyUVWjf6SqqRJU1RmkxlXaYZh6NX+DQXFUIiDkCJKoeyj6SqtTpurfh/DkFGUks3trWvEfyV8zXqdMAjobWwiUbBczNjcbP9yReHk/Arbsam5terBUFQwKU03SZIUSa7m+GWaIQpQFBVZqb68JFUjpbIUlNcjJyHy65SkQFZLCnLkskDVJeI4IRcRiqKiyBWfvCzkqkimqND/RVmiaDnICnmWIkQ1AmrUDNZrj7m3wnY1yDPO3p6yUdvAn3vU6nW8KODxO4/QdBu50JDLkgqtLui0OmQiYL0uaDVtWu0mgR+zt7/NZOXTbLsUhYRbM1n7S47P33Bw0MUwZMJ0DUqGXdeRFQvTNPDWM+puiySMiMIIIRTSOEZTFDqtFmGUsFwFaKpOHKbkmUAoQCmjqTp5JoiiGFkB16mW6vVGDc3T2N3b4exkQCZyFBk2uh38YI3tWMwXC5oNhzQTRKFHLlLiOMSt2SRRQRr7CNFAVjLW8zF+PidJlti6SpKkBKuM7Xe/xtXgFGQTVe2QJBM2N5pcXB6zs9cjCiPCJEA3dHZ2tjEMC0PS+OrNh9zeP+LD7/0VlmohSzJFLtBkGR042Nnm9PNPGU9G2CJnu17n5MkTxisfR9EqP3VeIhdVLqAQAllS2NrYwvdjdC0iSVI0TWE6GSLynPlS5/T8ijzTSIWOrhnkRczBwU2iuMCymuimzkYmGE7G2I6Ooha8PX7BbDxkd3uXu7ffxXZqPH/xijgJ2dndpqU3Wa2XGJpOs9EiySKSOOLi7JwbB4eoqkan3SMVJVGSM1+tqdWaLGc+m1vbNNwGiiQxGU9I4pz9nRus5ikbnQaqbLDZbWGZJpZl8OpVzqvXL+l0u+iKws2jQy6uLjg9fw1SSSZACI00KUDkuI5Jt3Ob18eniDTHMi22t7YIo4A0iqm7Nlu9Lnvb29y/fwd/tUK3DFTL4NnzM3Zu9JANmVKH1maP08srXrw+ZbO3ievWOL+85OH9ByiqxGA85MHD+4R+yN7jA3yvqnZ8/vwZlmPQ22xxdtFHkgom8ynrZUAYxsRpiqxYWLZJu6Nydjak7tpkRcLO9ibd3haP3ok5O7tie+sGr99cEYQJK28JpYzIU7Y3ezx8+JDpbMnKW3FwuMVstiAIVty7f5el5xOkPnFeocEbrQZZmqOoCmVWYJomZV7SdVsUIqduWPz6b37AxflbylTQ0Jts37xB6pXU6j0iAaaukColsuGSiQWKLGObDYLC4+L8kt3dHnmeV2Ih8urWXkCWCVRdYDkOrmKg61X163w+5/bt+zx9+vS6oVAiSSIa9VY1flQ0dM1AN03y9YoSmTCMUVQNU1PRGzVEXiAKQbtRI4wigihElCqWqbJYrvjoZ5+gSiquU2M+m2EaFlFcLa8t1+Ly4uqXKwrTUUSQzai1arSaNVxdYy3PaTXqQEkpK5iORZHnRFFEnmcYmoZhaOiqUuFnc7A0nSTNURSNOMkxTJ2SGMOQyFOfJIhBlknzAjnLmE7HaKqOphikcY4kqddF2jEYOaqU42gyDUtlvRhTxCaWpZHJMJ3OGM4WNJodzsMJiqQQemsURcGQjApHa7nkWYJSCo72dunWTdZRRqdV5/xiwmQ5pdncoBYXbPa2OXl7SrO5RRhHoEroro7brZHlVbOVIpukZULqL8lSC38ZstnbY+2X5H7CjcNDwiAjybIqnm6aJFbB2hNYRo1UzUmy6DrVCFku0CyDPE8QFRSb4dUFTbdG3TQpRI5lm1DE3Ly5xyef/JSN7g7eekWn1apuPKUgLxQatRp7u3vUanVM02Bze4s8z2i3a5TyAbPZiKvLC9obe2z2DjGtNrP1AE2V8fwJW70d7t26RxysMVWNumvTqLscn57QrHdo2jZSCe8f3adTSFz5Mbar8HoyprG/x2w24bDVpq3BLIkg8TFkjSCO8JMRlmljd9qY7Sbj2RynVqfRdMjznIbbwPMDDvZ2uX//Hsenpzx59pRShqKQmIx8Wo1D+sMphVSSCh9dKnj14i37O5tIRU6tbjGeT/D9NY5TIwgEaaagWXVO+kOmq5Tt7e0qoRsH9LY3GU7GWLYNasmoPyBPMixNh1Kh22kjSxKWbTK+vCIT0Nvs0Wna9NomO7tdWk2L4eSCmtlhq9tkMfeqIqUiYDkNaLc6JKGOqNWQUTg6uMHZ23POzy+YL5csIh9RFlh2jTQvyTMJVdXZ2t7F81YEQcBytcR26ty7f4fBqE8hFfjJmnt37qKgYDXquM0GsiyzWEWUueDg8IBEeES5wl7jiP5gQZaWGKpOGARoikKaZ+SlwNAsHMdldDWmXm9hGDVWy5iT42OePX3N5cUJd27uoKslZyenWLrOIvf55Mlbbtw5JEsD9rYaZMmcv/Ob76PqBn/2l39JmoUUZY5Td2i26/Q2G7x+8xaKhO3NFkUZc+vWDfb32tw42kO/LDjUunz8s885POwwGq3xFkt8PyIKUyRFod5wUBUYTacohVqhJ+SMnAwh5WztdJnOBrx8/hpTlTk96fPu/R7vfPMDBldTriYrVLuObTuoeY5jN2g5PlEaQCFQVQnbsStThWmRiYwoSa6rhCUUVJI8JUfQbJjIikKcJohSUG/WiJOUWsNFliQ0XQOlQNYU1EJD0W1KoF7Xmc+nVRdJnhImIa7t0GrUqspPb4VU5pimwcHBPm/fviZf+eiWTqNW4/bhTZ4++QJJlhBFSbu9xWReJeF/qaIwmXkswxUHto0exfjzGYvhAF2R2N3fo9ZuUmvWUWUFy9ZJk5I8TUmTCOWasqmr1Wxc06vRU1mWvD1+iWrsYTsymqaTZyWn532WvodpmPT7A1r1FpblIrICy3SIw5hSgiBesbXRorXVJQjW9Hpt8jwkigPmyylxlqJcI5xNswq0jQdDaq7LaDTGsWrX5fYaiqQQhwXTcchi7bMKYpaznEbd5NWrC5aLiDAoubocc3SjSSlUCpGzWgQcv7mg3tTQVQcJGZFlBP6asPBQJYfpZE0clYTBmp3dkjQTxHFKFCXIkkKr2WE0vCAMS+yaQxInmJZFIQRZmoJU3aYq/lCBKkvkSchkPODm0Q289YIklphMxzQadSRZot1u4zoucZxy48YeqqrgunU0TUeSZfpXlxwc7GAaBvW6i2notGouzVqDer2D72Wcn13RanU5PX2D7Wi899X3SKOAe/ceoKg5SR4wmgxoNdokfsQwHFD6KVIYoxZQKwvS5YK9bovXVxfIms5qveQnH10RSVQ02Dxja2ubZRDhZ2ueff4cudHAaDb5R7/xO2z32kzGYwZXA2TZwalVC8X3HvcYjmbMpkss1yVJc3a2eggBSZwQrNcopcJ4sWTt+8iyhGnKKEpBu9Xmqj+j19UZTtbEWYCmK3jzOcsgJAx8NFUmzDJu37rJXqNJlAnO+30kIaHJKjePDiuXVRDgBwFCCOaLFaphoZbV2G+9WlNvaxiaRZZKhH7GehVSq9eQFQlNl5Dkkla7RRynBEHMbLZkPJpyeXlFvdlgOp1jOS6LxYpmqwdU7q2riz61usNivqggbrqKbdvIskQcRTQbdU5PjmnVGzQdhzTZZzoZM5tO6HQ73H9wl9aGgmamXPUvaLc3+fjjp/Q2twl8n8t+n5tHR4xnM9SFws7OPrPpkjhJeP3mBYN+n3a7xbe/8z47W22EiFn7CzrdFo7jItDY2hmxd7iDKApszWRvY4v5eI6mOri1GoPxlB/9+GPef/8h+4e7ZFnGcDxho7eJ7/uYlornrwkTlfGsj6IVNFoOv/Xb34HC4E/++K+ZjIfMFj4SMrZlkyZJVaHqh9TsJnkqYVgymUhZB0vEMCHNQp4+fYZl6iioyIaO7prs3TzE3U54e9FHtRWkuMr05FmVz1qFC0qlRDdMUj8jiGKyPEOUBcglYRTi1BpkWYGmVuh6iaqjXtd1fvSjn+C4FoZh4Lo1lssFs9kMJKlqXZtNsG2XLM3RVB0hMvIsQ6GEssT3faDa4SZJQi4y3rw5pihgufJgLVEKeBa9JE4y3n33NpKs8f0f/pgC5W9pSP3/Z6cgyfhxQpikNCSnAqpREqw9nnz6CbuHBxzcPqDeqCHLGqomkWcFuciI4pjhYIRr2QAIUTmYcpGwtd1jZ28Dz5+QZGu8dchyPSbKC/qjPjWnRphF7B3u43shsiRRN13yPKdUcur1FigyjWaDOPXQVJk4SViuloiyJIgLkgRct8TQDAzdoMgLTMOiLEpmkxkgk6Ql81lKW65xdZHy9nTIYhXy6uVPkVWFOC6wrA41Z4PLixmaXKPME6TcwJsnjAdDLFPm7t0bhF5AGqXEUUYpMvz1nIbbRdcMjk8uKUSB7yeomlG5Pc4nSJKMbpjkaUqzUScIfNIsx3IsZEWhVne4f+8Ow8EVKga7W1vVcjpYV/sESaLVaJLEMa12j63dI+qtLrZt4zoqhq6jyApBUOF7Hz++jyKXGIbGer1EVxWO9g/Z3djA80N2NxvUrDkfrz5HVkpsR2fvcI/+xQWzqzH7+5ts1jb5zq/9Gqvlmtl4Rikktp0GRl6Q+B7hcs7m3jZJzcaNfH7w4Uc0NJ3vfudXeP3mmNPj13zwlXdYzvts7uywuOzjBzPiIuNga4tv/9rfY6vThlLw9PPPCTyP6XzKrbt3SLKclZfwR3/6H7AbdbLFgoODbX77N38Vb+2RJin/9t/+OwqpYB7EyKpGQzMpoxSJiMBL2N52cJtdlMLi1t0jPv7wZ4g0R0gqpl1nsvQJn7/i5OKKr3/9qwRxTBhUdNJDReX4sk8hMhbzJZZlc3Z+wWA4otuosdX5Bu3WBlEYMx7PGAwmqJrNweEtBsMhmYAwjdBzi3XoMx7N8eOMJz/4axzbxa01kTUTpMpMQSnhrTxqboMszrBtiyRKyFNBllZLW2+15vE77/L6zUvCwMNUZfyFh3aQ8fbVc9qNFoYh8frVF9QaDzlw91mHAbVagzwXdHsdBuMx/f6AmlvjL37w12xtbPH48WOiJGcwmuA6Bo5r0GiZ6GaJpqq0NxqohoykVGLYHw1ptrtkImQ4uiLJcu7fvMdoPMbRXM4vrkizEtOuMRgN4fOCra1Nnj19jeVYTOdLSgpOL4fYtX0sZ5dnz5/S2+ghiur/jsgVClIWqxWWXSeLUyRKVGQaNRtFkSgliSTPyNKYKA5xbZdWp0kYyuRZwtKLONjbZh16LPw1pu5guQbtrkuepsxXE8LUIy1ikqQgL3KKomC99nFcl0xUAbQ4TYnTBN0w8DyPdruLLCmEQUAcR6iqgq4bWJaJ7wdomoFtOyyXK5rNBkVZ4PsBeZqj2A6lLKHIGuswrPDXpo6macRxjKpWfeuSpGCZGmEUIstVYloU4HspUi5jmDaSojGZzGg265xdXKFo2i9XFNAMFN2ikGTyokDT1WsbpMbmRgfDUFmvZtSbFuPJlFazciicn13QrFUzvkTJyNIMValK6Q3LIooT1muPMA5ZrReoioZhGyyna/KiJC0KLF0hLQRLf02SZFimhaIoaIbOcrlkNY9oORqqnKHrYJoanc0NvMCn2W1BqRHHCcPhmM3NTeSyYp5rukGj7iArp4gC3h4PqLe2uHfvV3Hr06psI4357ItPsZ06fiBoOAaWoZGGGUpZEixjVD1HM23yJGF4OcayNUzDJfTXjEdLbLNLlFRLc0UtCP0AXdNJs0owsyxH5IJcBCiaxMpboesqnWab23dvs/aXHNzYo+Y6BN6C+maX2XhMvdGhFCmUgm67xe3bd3DcGu2NLVTDZjJf8eb0AseAb37wAUmak6Y5qiLRn89QpJKyzGg062RhRhrGWJZGt9lAkgycowN6nQ6S+luUSo6qqEymE2RFQ5JUvvHBryDynJ3ePsZDk/XCY6/ewJktSPqXbLRbTPpXKFs9Wq6NIlXEydOrK4I4oZQgDjzu3j9iMDzhYGuDcdwla2zS6HSQVAtJMimKFNuu8fbVKxrNOhu9DQbjMYvFkka7jaIrOA2XZ08/58beDv/5P/iPWa8DLvtD/uonHyEVEqpuUigStqUR+xlNp0USZbi1OnIp09rocuvBQ549eYqmqMx9j3rNZuEHDCYTxguPv/PdX+Ps/ApFVvnT7/+QRqOGSBOSMKbZaJJmObKsgCTh+QHe2qfVNRFxQa+1yWC05OXzk4qSufIYTGZkSNiNNqWiMpzOCdOchTdFmS44ODggywqQisrWLVe37aIQrJYLFEWm7jgEQcBivkTXVd599BDlzl1ePn+KJgmcpoo/nzLtn2GYJmEYkeYJXzz9mCgb49R1TNuqMka6RpJluPUWq3XIYLTC8wT15hZp4uH7a7obN9nd20CIlCSJ0TWV/nBIEKzZ2dohimKCOOXixVMkpWRze4P+1ZDNjQ0+fv0pfX/EZOHjNlroRcrdezcZDK740Y8/Yr1KMHQXCZ0sy7BdlYv+kP1hm0Zrg/PzAUUx4M6d+1WC2w/JRYmtqNiGSioiKGR2d3Y4fntBXpQUCJxGjbxMabRr1Fs1UHKSqMS0dbb3dxlNxvxX/81/Q7fb5Zvf/g6z5YKffPgRdbfOdDlHSCmaplMza+QiB1kijGKyPEXVFAzDQJSCkpJGo05RCKIkQi4FpmkR+CGmUR2I260O6/WaQX+EbTmIIiZLY1RZotZqkMYxZSFRIFFzXBRFJU3CKg6QppSldu32lPC8gCSpvodSyDTqLRbLBcs0pNezOD29YDgc0tvcxLINwij+5YqCrGnkZYWzsB2HdBlSFIJmrY4qyyRRgN2qZtydTps0yVA1FcMwcBy34sMXEAYxFBVFkNKgyHVm04Ag9tEME1nRqdcNzi5XxHFFXJ3NfIajJY7tkuUCWQ5oNZtoUkksFYjEp9vZJ4kTQj8kGHvU6i6rdYhjWayWc+I4ZWd7lyjMybOUNMlwbPCDCn6VFYKLwYj1X/2U7/zqdzk9nTAYjiklUXGDshwhqgKcltsgXE+QKEmTFFlVqddbuK6GbUK/f0G722Vn9zbn509IkoRWo/reO12bQlQExTwp0HQdiRLLNojiFJHndDp1FAnqrsnWRoebt/bJRU6SRuzt7fHg3j22Nzb44ovnHJ9c0u328L014/EU261z78EDzq9GNFsKzU6X8zev+cM/+nP29w5wLBvfW2NaBpQZrZZLGpdsdDcwVIWyiJHKApHFaKpMs26h2QZ5maAbFqb5Lt/8xlfJ4ogwzFAlqXKnZCV/+O//gN/73f+YeDBEDXwWqxndjTaxrnI8GFK3TZBg9/CAVXpK6jYxZInVZMh2u84s8vkn/+gf4tldjN4+lmmT5hm6Akc3djl+9Sn7u10MrWBnp8P/8n/1v+D3//2/44//7E9Io5jYD/js88/5vd/7L6k1e+wc3GD9598nEwWWrVIWGXEcY6Ei4hS1LJHyFF0r0NUSz18iqzLiusq03ekxmxaIMqfRsnGbNeK3CYPBOYokc9bv41g2UlEST2fYls1Bo0mZRKiqASgspkscs0YYZZUDRlZpNFss1j6SonB+eUleqOSphO3WSTKJeDoHRcUPY1y3QRhWhwjT0FFkiKOIwF/TbNYpRIYqZzx48ICNzW3evHpFnsV0Gk3kMqHTsPnVb32Dn378U+I0wzBKLLfB3sE2s8WYo5v3mS2W9DY2GIxeYdo2p+dv6La3qDVa9Dq9yhqryZTIXPavSGKPIPC5ceOQtbciz1IWyxCkOZPxgppbZ7bwaHa6rNY+W1tbaKpGb6NL6hRcTV5AnDP35pSvRVXzKsuYtsVGd5fxaIGiQhzF6LrLaOhTlmuuroY0GnVu37Zxal3ef/9bXF4MGY/myEiUosDzV1xdXqFqCuQKilqxwlqdNpZlEoQBF5cD9nY3aTbreEHAFy9eomsm8/UaP/ZQ1BLT0YjzkHrbQQ0VvLVPw2lRxiVRFJOXBaqmkouCIksxTQtkmWazSRRVbYJZHCNLEple9bI7joPneRVBQDcqaqqtUa+7qMgkUYKX5siKgqIZrNZrgizE0BXyXPzioa1qCFGSZVVwNU0zXLdOHGWosoFl6tw4usPz589YzFc0221qrlNVEvwyRWHteZQUtNotbt26xfEX64oNUhRIpYxpWjQabQzDroJshkqRgywpP0/0qaoGyMiygSpppHFOmshEkYKqNwmCNZ8dP6fZ6tFobOL7fcpSR1VV0iTDNKuGpCTLmc5WOJqETIpUpqz8gDj0aTRcbt7Z5+mzp8iyxuVgQp5KyLLCi5fHmIZJs9nEtutESY4QgrUf/X9Z+48ny7ItvRP77b2PPucq1ypkRmakFu/l0/WqUIVCG1QV0AZ2w0hDDzjoESccsNk9oPE/aGuOOCDNwEEbSaAJGmSj0I2qrkLV0/VEqkgdysPDtV959Dl7bw62v8SQNXiRg7AM8/Bwu/fcvdZe6/t+H9bzCaKYja0d8rLi9Owc5fkUZYkxBhUo1jdHmM7StAVB6DJ0pefR9IarRU7R+KwNYtY2boFUXExbxps3GA+2XXyiqfG8gGyoGI1SjOn48uEj0ixkMIq5vLhic2ebv/13fp+PP/oQawTf+e432TvY5+mzZ+zu7rK1tYWvPB59+ZC19W38aMjB/j6DLCOKYzqtyUZjZouC47PHVHVD3wm+/73fY2t7h9Wy4PPPP6NuO/K85vR8TpJEjMbb9H2DoMHD5ThbLfDjlK6pCBKfIl/ieQG67wjDkCSJnfTOug/w3Rfu8T/8D/+Mv/vOO9zc3GKws0VPz3Qx4/bNA6wfEGcpZVXy1muvM1sbs5EKZJ+zmM4Ybe9zdnRKkWguHl1w9OQZ/8X/+h8gTE+5PMeXDdOLxwhZsX/7LjcONnjppVv8u3/f0rYNVaP58JNHPPjskDgd8O3v/DVefvVr5PmKtlwhdUu3XNKuSs6fn2PQNLYkGk/YW3NhUJe+RRuLp3wGWUpZhDRtQZyGfPrZAwajmNncI1/lGAyN7tCtBq0ZjkZUVUM+W/DJJ19wsL9OGIZ0dUMSJmAkg9EIY+HeS/dZ1R8QJYpVUSJFTDIY0WrFMq9YLpdIz+PmwS5tO2A+m2N0i+lr4lCwv3OD7e11prNL/vAf/C20hl+994A0jnh8cszezhbjQcTNg016U7K7t8FilbO+sU+S+VRtydVMc3lxhfR8ZtMpq9WC3VsvcH6+Ih2MCKY5x8/P6VvN/ZfvsH+wQ5HPODo6pqorPnrwkDt39hkMMp4dnXHr9is8+PiQrZ077N2AVjcsViueXhzz1v2vsbO9x2cfPyTLMnol8LwIYwSt7tFGsr62QXVNYe41BEHE2dmCy6slSjqyrZSCn/3sE6JwwM72Fg8+foKnoDeG3lRM1hMur67Y2dnhvfc+w1MRo8mAui6YVTNG2ZAoDFksV/TautAdIRBKEaUBSZKQ50u07kBIGt0jfYEKFFVd0beaqmrwQh8h3LkSBAqhFL0xzOcztHbjaU9KhIA4cfvB2WxKHCd4nkdVV2AtTWO5e+sO2+vrmK6nyHPef+8DdFtjTE8Y+vR9g6e8rzLTR6NrWfoqRwqFEO59TJMhbdXS1C0//snP6XVLFLrJzcsv3uO5+g2rjzxpkcJQ16WL7lss6fseFUVoYwn9iDBMkTIkDiOMMZR5izWCqmoRQpAk7gXp2hIpJHGasZjXDCcZ+SLn7HLGjZsv8fz5KVHoMRmtgxB4vsdsNmM4HFO3DfnlFdZTiMDDDwL6pufx4RHjUcpakHIxLdE2pq41QqW0uoFekGbrSKloe0W3avBUwGg4JoxPyZuaxA+p6or1jTV+53e/T6cts/kVs/k53/r2u4wn67RVzw//7Mc8eXiE8n2GkzH7d26hQg8/8PCMZjq9pOs1Wii0EAg/pWo6xsOM7Z11jC7wfEizIZs7a3z6yRe8//4njNeG/MN/+AfcuLnNZ5++x2ya8+d//qf81m//NpeXU+aLFZ9/8ZhbN2+zWDX4ccZaMmSRl8wWK9I44ezigieHh7z34Yd0xjBbLPGMz2qx4tadF3j33W/w2ptvo40LfVG+oG0bxlnE6fNHhEpidEeAixasy5yyq0lIUFKh2xptLb2Q1HVFfG2m88OQV998A08b0t0D4s0h3voao0HMmlS8/8lnDJME5QcsV0tW8ykbG2vMzp4yTDze/K3f5RcffsoL67uMRnuMZUxHy7/+V/+C1168gW1nFMtzLk4WXF4ecTE9QUYp07NDNoYJl6dXdCZgbecux5cN/UlJ5HtsbOxy8fyM//Anf4xuKySGQZzSlg1SGIpqyvduv0VfLLi5M2I1jbm6WjAeDpldnXPjYJe6XnB5ecb9+3fIVyt+5/vf4M//4kfM5jl12xAFEX0DYZzS1NrtuRCcHJ9x984WTdUQhh4vvvACxxdTnj5+yFvvfo3J+iZl0dJ3kjff+AZpMuJXv3qfx48fMxwPwGpefuU+nzz4iCQJ8YRgOAipyiVZahEi543XbzPMJGcXC4Rt+ejDj5nPVzRVwc7WkBfu7FI1pUMy24a6maNC3/l7pGU2nRNGCcpzIobDJ48YjyL6TrOcrVDSZ3o14/CJYjgKeOXlFxilA9qm4wc//iFeELEoCppe8+CTz9g9uMGnXz5iZ3ePLz97wtVsShKGHD07o166BitNJ+zePeBHP/kxp8fnCAGT0Rq6g77VlLmbw1shiKIUhGAyWUMpn/PzYxARV92KMIqRniAdJlxOZ1R1AwvNVhwihGQ8HtJ3guVsjucJqqKiWdWEkc9ktEYURZxfXDqz4HTB2vo6g8GAru8pyhorIUpidKdpOgdm7DuNNS5EzFpnHDXaoqTF6J66rtna3gUEbVmilELrjrZt2NzcpOt62rYlioJrn1fDxfkU0Rt8LJsba2ytr3N2de7GSte5NEoFGKPxPGdGS9MUKRWz2QJrLbo3NLJGXKuaWu2Umr021FVLEg+Io/Q3WxTGw4wk83np3l2qIkeAA7GFztGqmp6+k8ynJTOT03Ut08srtLFcXV7RVA1qzR04o2FC3VzHQ7YtxydnLIpLJhsDrPXY2d7j7PSEXmsQAq0FyhOs8rlzC68NwGiCSLE2yoiibS7OT9jcPqC4VvUsVj1pMqDtOsZrE/JVwWC8SZYNWC3mjEZjPBVw8+AWP3//C4qi5vf+xveI4oC9gz3GdXeNm27wfIG1mq7tsFozmaxx5J2DsITxhCjboDYa34/RbYMXCySG6XzFPF+QpBapLU3Tkhc5ftCCMRzcvMX29jYvv/oiH330AW+9dR/f63ny9BNu3trh5VfWKeqGf/1v/0eWq4q+F9w4uM3nnx9iNQRhAMqRMKMwxJMz6rqmKitu3bhBbzWvDwZMBmN2dvbY2t5DU7OqWgyCi9WMTvduNJUveO2lW+xtDvBti+g10noEUYpVkNcr2ms1l5IeZVG6EB4hKJuaKAyJsphl21AEIf1oRLS7w+HThxSLnMFghJU++MpFeIYhvdUMtjZZLuf8+BcfMtg64PHpFLuSXFUaPxDk8xNurA/Y3Qh59cW7lOWCeV5w9vwIGWVM0oj1UcJoOMT62/ze3/zPCIcHfP6r9zBVzne/OSTxE2YXOZqOTrdceBWm7ymXC2y/4t3mFdYmI9586TbHT75gc3gDbQWn51MCf5P1tRFVvSKOfO7deonzs3Puv3SbX73/CU0Lbd8RJymPnj5DGhj4AZvjMcPBiLqs2VzfQEifZ8dnHB4+5nK15Od/+XOCaMRiUfCf/2f/iN/57d/n8aNDmqrls08+ccl7fcPh4ROqumIyGVKXS+pqwe7OmN3dEUdHT9jfHxL6PWcnT7i8PKFtawaDAXlR8tmXF4zWQl66t0/ftMRJwHx5zmCygZQ+G2sTzi9ynh895nd/7/sIX7JsOt5//yFdA3VZMx7GRKFHsVqxvbWDkjCbTckyRzydznJefe0Obd+xf3OXqu74re9/j3/37/6c09MVTaepi46jZ1fcv30T3Vn8tCKKffq+xZchnqcoljldC8LGBJ7bKcRJSG86wjjh8mrGYJAxHq/h+QHz5ZKT81NefPkGV9MpQRxiVUc6DN2NaH2bJIy4Wi7RXY9uLYEKeeetr7FczqiqCozCkwFNU5JlKUVRc3R0jFIhxkjKqkAGPmVZujGmFDR1je9HWK0RgUL3PdKDyAsQwqK8jIuLc9bW1r7yIggh2N3d4eL8Es9XSOmoEC7PXlGVLYMbI5bTc+q8II0d0mJ7a42L+QI/UGxsrNO2DrMN4hqD7aYvQkh6HB1iMBrTtC3Kdyl/1liCKOAXv3iPN19/5TdbFCJfkMYDdrY3EbolTmLkeEKWJszmU7SFBx99grHOiez7krqqwGiSyOljq7qi6xp609Bpge0MvfHRRuAFbk5WFAW677G2p21rl1C0zAmiAKM1fuyRDjJmV1Ok8hxBstLcvvMCs9kMrTXPDo/Z2zlgf/8mo9GYtfUNlywmJH3XXwdVhAReSBTGhOEQT/UMsnWGk4TzyyviOKOu8+vlsMT3JZ4KaHVFGCYoL0Trntm8ZLNTWD/i6fMLdtcmeOGQZbGk1QKDYjZfUC2vOJcVnrpBXV8QRpbF6pTBYMjO1gF/62/9PicnZzx99pDnzx8jZEjgzwnTjHff/QZtL7DWp201+bJEY2i6/tpUExNEERjD9s42t+/cIh2kGGuwVjMaDomTlIurBX/0P/8Jq7JmkRcY6dQZQeDTVTVXV6f8/b/9u4SRj0SgrEdb14hAEYURSgmasqLua7Ikww9DyrrBCnFtUpJ88vAhG+sHqBBqP6QPY9b3RuTLCmslGxvbzFcPIfTw/JCuU9w4OOD5+RXT1hBnIcr3yfyEps7Z3trBGsAIkigh8GAwWuPucI2mM+TlAqMN2/sXzJsRVsQsC83NWy+RiJ699W2Ki0vWJjssywWWFqskfdMiEoPXWR49OuTVl77PdPqMd998hTRb5/R8hhKGw4dfcH55xUv3b5MGinx+yXgQ8ejxjNEgpjce1gacn1zhCR+JwhpLHMVgBRcX57z68j2eHZ3w+RdfIKTEWO1wB9Y9UyfHZ5ycnPHxx59wcnLCeDQAqRmNEpSU+J4LOgpDD0yH71s2NobUzYAnT79kOB5y42Cb2SxnuappGhDSKVaeHD5je3cMfc9ifsVoLHn48EsGowlt7RH5GePhkB/98EfcffkOO1vrvG8ekEYjbt084ObBLa4uT7l//xbK68AYoGeZz9jb2+DmnR22dzeQASwWV9y4eYcf/PgH7pYuHGQxjlzuydo771DXNU9PTihMx2QyoclrsJY4HLFatRR5h+k1URhRlSv80KduGpRS1HWNUhBGiiT1sTQU1Yz1jQGttgxlRK8rympB27ZUpZvpWw1tW2Okx4MPH9B1DRaQ0kMoSZZkLi41ivBUQK8tbd8zGA4xFoSUpFlKtarZ2Nig6zRWCJSSCF+hrYvNdQQHSeC7MZcRliDwCPwAazWeLwFD23aODef51FVP7bWEfsTrr7zGL372Q5q2Yndrk2A4IG8LmroDLC+8cJf33/uAoihJ4hRjLEoqmrYl8N3NvusafN93eRrasr6xjtU9y/kVn336+W+2KFT5JVmwRr5aUuS5w8sKwdVsynAQEiYS1fnMlwXSSrq6RzcdcRySr3I8KbB2eP3iGYSSaGWo+hrfeggZ0NQdgoa+axmPRpTFitAPUYMUKzyKombVVwgjUSguz2bsbm8hI59PP/4cay1ZNkapmN2Du7z5zrtYbei6nigZMFuuqOnxkoy6qpHSY7koiT2Jbxv6tqNuwQof0UqiMEEJg5Y9rekdkqJqmUzWyJKU6eUK0zY0xZIGiAPF2fMTuqZyioGmIKFD11OytCfJBF5akAUN91+4ze7OPuPRJhifrckmu7u7nF+e88rrX3Mz5nBMNhyjNXSdIQgilOcxHo+wxlJ2FQRuSadQVKsSJRQC8JTEGo3nhQyHI8Io4eNPHxKHEV4QsbW9Tad7giQkipyzfJKmXFwu+MVnD3j1xbvc2jsgDAV4yhnnVIBKPKe9rhraJieMY9JRQtk2lE3NeGuLUvd8enjK9s6L6NxSCkHrVaxWOdOrgltvf4tHTx+RpgmjcYb1BXubW8yWBT9/731WZcvlbMH29g5HTx6zv/dfsswFkWeJA0WcJPSXxwjPxwjNyy++SqOP6dUmy9Jw8vBj9rfWSQLDR3/5IQ/e+wBbFGR+TN8J0sGIyltR25YoiFGt5mB9m1CU7G9vUjUdWazYmoTk+S6+DPAlrPseKoio+o5RHFCmPsILOT2ZsjYIGARDJumYF27domuWDIYjJpOYvOhBhWzt3OR8tmKYxpydlNy9f5vLqyNOz8/44MNf8uTpFzx98hl7u2skcUiWpiznOYsrsL0mThOiOMULJBdXU4yAg1t7zKYzsCGjYcQL9/bpteDJk2fk+Yq66vn0wRFC9Uw2Uo4vcsLIZ75aMcw28GzPbDpjc2vE1toE7Qnuv3BA1yu2tw549OUTAr9lMgy5OJ/y0we/4u/8wd9ila8Yjkb82Z/9ObHvMT2bYoXg8PEhv/297/CDP/+YtpGU1ZKuq4jSgJt3DhgOE7548ojpdIrng0kUcZSgNZxczgjjhFb39LZBKIkVEql8fCWZzqaEgc8ws4TKR1nNS3du0euOo6Nj/MBnnGV4VjA9u6BtOsqqJhvEyNZie8uqqRFY+tbdRDE9o3SI7hsINZ3XIfwAqyzaGoQRRF6ARdMr8D2LthoRhQS+j6cUgT90IyUrKcuSJIzRjUb3Gqs7JsMRu/u7dG2N1hY/C1itCpQKyKKQSAXML6eItuT1V1/lcn7JrXt3WLU1Wzd2ePLoCaEM2V5b52B7hywdMJ/OsQJ293YI44i//Mu/RFiPqmqYZBlVVdEJga5XaGvxg5DVb1p9ZLSmrirarkd5AcqPaPKc9WFKGAn8AKarFWmWUOQVeVEQhzFR5MxY0lNIz0eoAOnhgHZBSBAlIBRNVbKxPqLM5yRxAlaysbFBliWcnJyRJANn9daGMi/RpsXzY46OzzG6w1eOh7RctNy5+yK7u7ecF0Aqeg2r6QqNorxO7/L8hLbXJGGCRaAtdNZSd8ahN5BESpIkPou6pCgWVLrH9BrPKqRZ4bGiyudMzzyStRFt37K3M2E1b1BSIWVGnCqyzOfg5iYbOyOi2CdREs8Ijp+d894vPyJftWxu3WCysck7X/seWuBoitp1lJ4KCIIYIaQz49mOrncAtVob2rLBaEvghQjpY3pD11mU8Ol7yZMnR2zv7PH2W19nb+82KghJB0OCKGSZLyiKHG0su9tbLv3MS1ksGxbD2qWyAa3WFFVFEAQujMdaJJLOtNiiRSvB1WzF5w8f8+TJKffu3GOWNywKQxT4lI2lw2exmnP68ZdICafFFHl+SZgEZMMML/D57m//NnleOAJs3fDaKy9xdHKK7VsGScTO9gabv8617iwGeHR8xJMvDyn6U6rGI89L8rOUt1++zdX5IR+9/3O8YAvleejGkWvDQLHsCnYGA+p8yqcffc6te5v4sWLN99FGE0chSRQTCkmzKvCUR61bTq4uEBj++u/dZn1ri5/+9Ff0tSAUKfQSgcf2zpvovmaQRUSRx7YSvB7F1J2lanuOTk8Zra1zePCM8WSCF8TcuXuP87MzsJpAKZazKXGUIrEICdYY+tZwuphxdtawvT2hazTrkzFdLzg9O2N77waX0wVJFmGtO6iqyuFjfC9jMVsxu3KwtTySLvegF0R+TN+0pHHK22+8SlG2nBxfMMw83nn7G27UdPyMb3ztXWzXsZrNKFYrbh7cYD6ds7OxQ6c1SZrx7MkT1iYpjx4/ZjxOqKscTcvDR5+zWqwYj4ds7O8yyxd89vnneKHHcrokySKKvET5EqVC+t5zYfPGYgR4KkBayeXFnL2dNZbzGXXR8M3vfJP5qqFqKpQA3XRczhf4KiLwesqqJAgDemsI/QDdG8JAue8tLdoYvMDNYYu2ZJiFqEBR1xWB5+NLBUDftZRVTjoa0gjQ9PhC0bcdwlo8q/BlQOg5/hhhgCctvqc4fPKErmkQQmFEz70793j1lddZXC05Pz6hyFcMYsVkss323iZ3X77HLF8SPH3CxmhM4sfEQUz0xpucHJ2ihpbheMgf/v0/JExCQk+QhCGnJyesjccopSiqiuOzCy5nC+LBGG3/amf9X7kodD1IbZ3OuW0pyoIk9BGeoLMdXaupdUO37PC9EBl4WKVYFCXCC2m0prM+dWuAwBWazoW/ZOmIOIoAJ/9cLZf0RhOEHtoUdNpyeTljY2OXKPboTYd3DeFrTYMU17NvbXjtldfxgogkylxalwpAWBcgLj18DyQKozvQFi0tIkjpZMTJdMFYgO1rPJNgK8vzR1OCOCDOfALfOFmXkgzTDskCKXq+992/yTd/+3v0tuPHf/E/MRl7jCdrdF3P5s42cRywsTVhtrhgtljw0ZMzYi/j1sFdvvX9dwm8DG0UZd3gBW6P4cc90BCFCVpLmkbT9T2mt04Hv8q5uJrRaBczaXqDpzxWy5x8lTO9uqKua1arJa0u+D/8V/9HssGIje19jLWs8pqLyymdFkgRc3j0jJ/+6Ff8zm99h/Oriv/wZz/BkwJjNUZIUB5V1zinaeijEAhrGY/HjparBF7g8ez5OaPBGKkO+eM//ROUkCxmU3xfIbDX6GC3i8A6uaf0BAjBP/ov/hEvv/I1fD+8znZusLYn8BXFaoG0Bt031J0m8DyQEiUlnii5uXeDL4/OmTdzBIbT0yvae3vs3rzFS6+/ycnpisYI/Cjk6bNntF0FbUmXa7xmhQiGvPDq23RdTV6sHMUyDqnqmtZqxpN1+qZFdoYb+7tMxiM6bfGanm+88Sa99vFUymJV4wXrhPEG21vbZGnM1saYznREiQOxadxYT/cag8sAv5rOkErw2utf5/LsiHJ+wacff0hersgGEU1dMRyktE3BMElRMmKSDEn9mCRKOT6bsru7z+XVjKKor5/1AGEFWlu8XlBeFUySCcfzgo21TUI/JV9UWANV3mAaw/Z4gw8++oTj4zPu3LnH088f0eQ5uafIl0veeuMNnjz9kiiMkHgcnc3Zv7FNXVZs7+zwwYcfEkQRQg7wfcPXv/Y2dVUyPTnl/PKCV++/zC8/+JCwbVlVBVhIk5S+s3SNRaSOkHt1NUdJj/F4wHwxo21cNvFoOOLy8txJfA2cni947/3PMCJgVcwIlGC2WLKzu8dq0WK0oG5LsjhhWs6RYeRgMRqU9Gi7liKvSAYxxbJCWoXuDZ7yUZECbbAIPN9nMBhirZvlt1XtRnuhoi0bsBYhJG3j5Klt1VB3FWkSYYymrisO9vdpm44wiLm9d4Ozo2OXQNk3nJ+dkIRwcRHw+PGX/Or9X/L2N76G1JrTZ88wneHdd95lkMQ8bUpM3yGt4dVX7vPw0RfopkCblmHsM4pDLIK+aUjDiDJsOXl+gh/Fv9mioIKEXrsITnAmpGES0JuGO/du8vEXD9AS/CRyrCILTaPp256mbkmimLxsQfooGdA0DZ6S6N5wenJGFPtsbI44Pz8FwA9iqrqgN06BEEc+SZJR1zV13XBwc5eTywWtFTRFAzZgmKbs7d+6jtf06TqNsdrFEiqPtu3BuA+lNAaLpby+/XhhyOnZKWfT5wwTD9EO0UXB3tY2aZZR1EukUrxw7wUW0xXbu9t0RnA+m/L48CHDLyZYDH4Ys7t7Ez9I6I0gToeUVcWf//gh5xenvPLyS7z73T8kDhKXuFb2zHPNfL6g15aqndP1PRdXx1TlgrJsKPKWpu6pqw6lAvKioGtbjLU0rUYqN9/0fR+jDU3rZpxCCNq2xvc7ojghyTI+//yQH/zgJ9RNi5SKxcJhR9LhiMFgk48+/JKiVkyXTj/eXY/6vFBhpUeQjimKnDgIwPZMFzlyuSJOIozVbG1uEAYBeT6j1w1FXeGFbpZaFLljyIv/CDVse400kjCO+aN/97/wwx//iihMkJ5HEnu88MJt1iZjpDBY3bsboe7xPY8kiRmmAyI/I40FWaRYv7fPbL7gxv6rzOdTfAQX8wXLumKwtkHce0TKYSYiTxALzZNPP2LVQJxtMoo8NoTrzIWEOs+ZXV6wrCt8FdC2FfUqx1cBWRyxXFagEnqlWHUtnz97TtHNkN6cp4//FGkka+MRv/397/Lqq3fY3B5haFktpjz68gvmecXFvKCoXXpeUy4YhpB6Hvfu3+f49JDbL9xmenGB7Xs8sUZX55T5DNkLNgbrTh1TVZTLJbOLKUVZglC0dUscJfRtg6cVJnS0z7sHt8jSIRfnM/a394hjRd0usL2lmC3YX1tncXaFLkru7u0iug7PWPa2tnnw/gcM0oy1tQlN19EsF9zcepvL6SVdUfLdb3yDn/7sZ6hIcu/eLvfuHnB4+JTM3+f09JjvfOvbfOe73+HR8THpcMDx0TGX5xfcunUPtMf5+ZTZbM4gG9I0HX3fITCMRyPW1zd59OgJ1iryvCMvGj548IhPvniGlwRIz/Dmay/heyFHT06p8h4lAzzhkUYJhV8zny3J0gFR6ExlBoiCmLbSFE2F6Z2HAAtxFLOcL0B5KKFIs4yrqyl5NSXOMmI/hN6AcePUyXiMkop8sSKMQyfCqCoCz2NjfYOyKDk/OSfyI2QnWS1ztLB4viJOIy4uL+jqHIyhySuePXzCYDJkMhiwub5FX9cMkoTtjXWePzvCdA1Xp88JfYHoW4xuCIWkK1cI6WHbhsXVlLbu8IRCIX+zRWE4GnO1mFLXNX1fU+RzEhkTDnwuZhcEacTaIGW5bMirktlige4sRlu6pkd6Hk+fH6GxgGOMp4OMk/NTx0vJEq6mV2irKcqSTGQoX2Gspm1q+r7BGMVoPMZYw9V0Rttb2lbjBzHj8TqB9EjijDiKicIQJXu0lWijWcynGAxt21BXFeVqReh57Gxs4CuNqZckvmbzYINhFpP6ATdffYONtS0662Sxvgf5quT4ckFjJ8hEEjPg+WXHJ//ff48fxqThGGsWRMmIbLiG8hv2Dg7YOvhr7Nw0aN3yo58fMrs4o297dGM4e35GVTb0RlA1jsHugltCojCh74zzB2gPt6hSCBlhdIcEl/8qBU3bIITECksYx7RtB0rhhYJlXlA/OuTnP/8Fy8WSrtOAQBiYXc44fHbK9s4O40HGfFESpWOUpxj6PtJTqMBHio7Qt2A3SKKQ9fUJwzQmDn3SNGKQOvZO17vxmdY9q+WC87NTPv74AavlAikleZ5jhLuS694glAdSYQxURc5ysXQxqkJz9OwpSgq3J/EkCovWPZ7nkSYxW5MNDnZ26aXhxz/4M/7wH/x9Btk641HC5clzPvz8MZ8/esgir/DPT2mNRfg+Wht8KbBNhW8106LjwedP0aYjTWM8JdlYnxCGAWs7txgNB2B6VvkMgNhzuQ510ZJ38Ed/+gNmjSFb3yLwMv7yLz+mLjS6FeTa45/+yz9h/QcJf/2vf5NvfvNtjAw4uZzxw5/8nHmpmec1ZVUSBZB4HYGp6Os5w0HAN979On/4W99nnGXopsa2NbPLU8rVDGENq7pkPJqwOR7xygs9s8WK1aqgqhq2t3bI84JAKQZpjBCaIPSJopjID9EGmqbixs09kjCARhMJj1fvvkBTt8SbG9imYXlxwe29PS6vrtha3yZLUqyAz6TCVDX7G1ucXZwRCcEb9+/z+Pkz3nn3HRZ5wf7OFs8eHhL5PmEUcnZ6zCBNmC0W/PZ3v8eDjz/l6ePHzKZLwiijqTuKskApFyjj+x6r5ZKqbAiuf+ZV0dBrhW0EVdPhYwkDOD45Z3MyYTQaUiwv8CQM0ozFbEnbdsSxoyF01+h1pTyydMjZ5TlxklKXFeFOgDUGjHXkA+XTdR1SSPzAJ1IxYZygu46+Ne5rtWG5WLK1vYO1OX3XYySsb6wReIrltQpqNBjSlh3Pnz7D9wMIFUIFBIGPbjXKD9jd2iOMAjxfsbuxTVs0VHnJeHvEajZnPMiYhj5KWEZZwtnDI4RxI6woiujbCiV9PCzrgwF1PSMNAuq2+80WBWOcGmg+v2K5uGS1nBKYgNAfUp7NWbYFq8JQVwZrYJANsAE0dUeUZqxNxiyXS/qiwtiGyWQbo2sCXxMEoJRmNE4wsiEZRmAtRbFgbX3IYrFCd5qyLvArDyGhNz3WuLGQkorZdIqH5KMPPyRLE27cOGA0GrNYOS3548ePSNLIZUr3PYM0JYkVymuIvAbZTvn+N9/i7e9+iyQbspwVfPzRlxx9ecFHn33JqqipFiUgiaIMKX3C5CbpVshaGmN9iVQBPiO08fDDlFXVcHJxwUH2IsezGQd7m5w+f8Tx8yVffPIFxWp1rU9WrE82GGYJsiiZzRc0rUU0zn1trHN2G9Nff6gVXqAYpCmh57OxtY61mravKOuS8WiMlO7gMxjCQJBmCU3V82//zb/i4uyKIIwYj1xnU5YFWkoOH3/Bd7/zHXwP1tfGBGHgRkPCEoQ+L9+7yYt394mjgCxLEMKC6ZEYTN/hS4FSgq53+bPWGoLggMB/C/OHv0/XtrR1TVlVaCUBibGCvjf0Gnrt8peNMVyPkvF9nzAIkEI4to3noXVP33f4vo8yAt22fP7Fp3zt7TeRpuVqNmN3c4N//s//GctFiVIB2mja0kn1lNQEykdYQ9XWhEnC9sEBeBFN2ULVI6xlsTym71uSxEHMojjCD3yyLEMGA/7sxz9gMS94+OwUmU2Y7N/hMi/54Jc/5/nRU5oaxtk26XCIER5Hpxf8v/7Jv+Dw2SH/6d/7m/wnf/Nv8+bXv8Of/sVP+PzhU54+O2K5uKSmJhAdcZAyL0r+5M9+xGefPeYP/+7fZRInhCJkNN4iTTLqcsVwMkEbzZ2DA6yFttP0fU/bGwbZ0HlzrtVh09k5L7/yEs8OjymLFs/3KasFg2FAV+dE0qNvGh6fP0Rawdv3X+b582Ok77O2tYVn4Pz5CeM7d6nbhq+//jqx9KjmS6r5guOqJMsyXnnhLpEUPD0/x2hBFsZsjjbIL6egNaZvqFYLfAtroyGjbMTF2RWjoU8d+DRtz2DokgStlSyXZ+yubRKGKctlzmK5ol2t6Izh7XffYV7NeH70hHfefofjZ09QnuSb33yX58/OWK6WDlMx8BAoRtmYxXxJ1xksOK9GGGG0BiOx2mI19L0hCiKUUqxWLsMhCCLUdVMRBBHCs5hW01QN6+sb9L1mfX2DbDjgcnFO27TURYknBb7v3MdFUaCEM/KiNIHwQfmMJuukg4wvHh3ywt1brCUJz58958P3P2Iy2SDyYiaDEYOtAVEQkQ5ilssl29tbjonU1KjreGSlFF2ryeKQcZYyXZZ4ofrNFgXfsxhdUayusH2Jr3rGwzFJHHC1WpENUy5nVwzHa2xvbtJd63C7piUbDMmSlLy4YG0rYTweEPgeVVEzJiQbKtKBRzZM2b+zzYOPPiRLI8Zra2RZQjaIeP7s4pon3rKxOaHtapQS1CtBsZzTWsmtgxvcvrnHyckxbZeTlz3WtrT1krfeuIUQhsCXCAlNVZHECePhmMPthNl5gtUdF2czVk9nPHpyiiVhVXsM9u+TGg+pA+qqI88rjJXUWtNUHdWiodOtS4ETC1ZFjVE+Ta/JRkNknLAxUrz/8fs01YKLyymT9S2iIGIQhaRBRJY4c1hRxmxtTvDDkDiJXf607xPHAYNhwmAQMVkbEIQKX4X4YkAQeEjpUueMNUipsAak9F3WhTL863/xz/jgvQcksWJrc0hVllyePaXrWifRUwptLP/ynz9kNBwhEWRJBBiiyOeFF+5QXFr8WyPKWc30vOajjz5ge3uLk6Mjvv722+zv7uCFAaEn8KSH7nu0rqnb3oUqhSHpMCYKBcsqp+k0nvBACbAWT0h8BX4Q0vfOICSk5wxD1sHXrNUoCcJX+IFCIQjDhHe/+TXa7mU3qzeGX/7yfbY3N4ijirruEFXhMnVNR98pjJQON+BJZvNL/u//+P9GEMLu7g4ba+tsb26xNp6wNpmwuRNihKTsWtbWRwzWbiKF5Hha88Mf/wrhZ2zd2OYXf/oLzmYLjFlQlwt8L6Gsrjg+6djf2Wd9MuT2jR2s6bk4u+D2rT1u7mzzD//gb/Pw0SF/9ud/wWy1ZJHPidIAJQ2J7Ih9Rej5fPzpU/7Tv/cHnD59BMYniodEykN3FavFFCE8lssVO9u7aGvpekMYxxgLg8mEVb7g9ksvkVcVd++/hLAe+apgOEpodI5ne6rpjEeffU5fNdzeP2B+cgJ1Reh5XBw+Y304og5npL5kEA+w44xVUbKxtcns/Iy1bEiSphhP8t4v3mNjZ4eug6Pj53g1iDsvcvvggGmV83C1YH04ZGtjxAcffoInLYvFJWk2omlr/EDS9YZWd8RpymK1Imo1STpgNndEBd/3ODo8At9w7+4d6qrkYH+Py7MFz49PGE6G+LGP8iSfffYFxkjeeftrfPDehwgJbddhtUUYSTbMmM/n3La3kAhM7zASdV27piCKKIqCWDnshLUgDSgpSQcDyqohL0tuD0ecnZ8jr/lzy8WS4TC7lo92tFqTJilWKZq6IYg1YRDTGsHRySVS+jw+PCYvK6SEk5MLkmREvqoQRhEGCUIqbt++g1QK6SuElDRaExmDkpK2bbBGo4RlfTykaVqm8+Vvtih4SjNIA3a3J9SFpJ5B6EvCwE2q2rYlCkLyxYK2Knj5/oskSch8NiXLJF2zIAo1L7x8h/XNEUooTp6dcnVlMKbm8Nkx0SDl7r273Hphl0BahsMEIS1No8myAV989pQ48aiqHG1apJGsD2MiDLa3NNWKzz/7iCgJWa4u6GzEMIu4eXNEWUzZ3JgQhU6yKeWQ8WhMVdb8zu9+m69/6zsUJuBf/I9/hgzXWeWGy2nOdJUTjQc0WtA3LjK079wHTghJkiT4oX/dxUqW5YK8qumMoO56F7xDSxL5fPrRr5heHOF7kts3D/j7/5t/SOIJYk8RKInRhiIvXf611C6JTih+9rOfIWi5cbBLEEh6XdI2PZoQLS1F3pEkPtgGTwksAtNbDAppJVVZsbu9zuA7X2c2dV6OrmlREoaDlCR1PgeLJU0z5DWMK/AkgyQk8CVhIOmbBl/1xIMQv7GkaUg2iNnd36GoCq7tBC4eFIFQHr5SWOMhrL1GAdd4vscoS6nbluWyxGpJoHx64yJX0S2eMGgtkPj4StD3Gt11WOCaIY7uNa0xhGFIUzcM0hRh3E31W+9+gzffeIeialC+T1PXlGVBVVVYbWi7jrKsQEBZ1+RlQdUWGG3whMflxRXFouTyfM7RyZTheMx3f+u3eO3VN90A1ML/9r/83/Himz/Hj4Z88PFjXn3rtyjKjjhseOneBknm1HLFqmE8GjDOMtaHGXHgE3kK2zbIpiOwlrf3d3jx7/8hv/j4E04XS1Z9z8n5KRcnT2iKFdJCsZrzwQefEfnwO9//Fm+89iKDYBNsz6a1WG3YBXpt6PvexWcqRZSkgGG8u0+na3xPIbTAkyGD8S5lOScOPaSpsWFNEITsbG4RCMFgNGRmDFmSIbol0+envPbSHb784ktu375LUVU8ffiY/YN9dtbXsVbw7NFjktGQgRchGsswHiBaS+yFeFZQLhbcuLmP97W3ODs5ZWd7j9WtA4qiRPkxN2/f5paxPHv2nFWxwADC87g4uyKKKuR07mb8SURdV3SNxPaa5bRna/1rRKHk3t09PvnkMVfzhnv3XmRV5Ozf2uf0+Jy//OVPyeIh6SChmc3xfI/O9BR16caWKIJr42pT1yjlYaxhNlvg+T6rskRISRYELqZVW9bG6xw9P2YwHHI5nSEkdHUFVtP32mXTtw6zLQKfeZETBjGGHr1YUjf9V+MlKQTWlxS1ZjzOEF7AdLZiOFzhqZCr6RwpBQ8+/pR4kLC1u470PHoEVdcjcURoi0skC5RkmIT07W84T+Hk+JC6K7k8h6Zacu/uTV6+e5uzsxOSJGHn9g1u3wr59JMvubo6oygWbGzsgY1ZLC6Jo5jB2CeIeor6nO3NHawo2N4ecjmds76ZMVuu+OV7f4nyYH2U8MbaK1xNL3h2eMIw2yZJQ4JAsbt3i9Vyhm00b772Bl97++uum9QGKy29bqianGfPnwAlSeyztbGOpwy+0g5k17acn12RpgPi0ToLLfin/+RfU+oBF7NjqhLCMEGjoSoYbmyg4hCtNF4AttUIBEGkCANFVa6Yrwq6pqbpLRqF1mAU5LMZQedz+fwY01ZMdtbZ3FhjuZiytreFtD1d09B1Lb6StHVJqxusMBR5xd72NttbO4RhiEVjlCEvlhwenRD4K4TQJLFiOAgZD1I8qVCeT5k3FHlJaRuSyEfaiPXJTYZZyvr6hChQgAYsnenxfR/du1mqJyShJ0hDhZI9pqvplcBKnyCJCbOY3/sbv+800Mpzsj1j6HuNkBJr5VeZGdoYl75mDXXdQNXg+W4UtDZep200deMWiuY6nU9Id/b3XY3l13RQ61zuThFLry1GCfK6JZCOR+Sj8D2fUEUkY49bt4YUZenSrgTXenLnptfWuZFbranqBqPcwtCXPrY1BH4MwnMqN635wQ9+yiB2Tl7lKTrl8c1vf5uPPn5EsSo5O37I/vYtNsYbvH73DRb5kg8++pC79+5w9/YNPGEJhEBZsE1NfTXn9Nkh1dUVXVGye+MG//6f/FP+1Z/9gLn16KREiobtzXXSJOLs+BmYFkVL3bfIyOdv/P7vukPECtx/FoF07wGC3vRIP8B0LtskUk751DcaJUL6tidJ16mqGW3bMJ0vyVcrirzg7t4eui7JZzNsb3j6+Cl1p3n7zXt873e+y/xySlHmRL7P+mTMYrmiKCq217eouhY6S70sSYMR43SIZyV5Xro9ktHcu3OLJl/R1jlx6NHVJWEUkcQeHz741EVuDmIHD0TiBR593xF4Em16pHb4fv/6/7MkIY0jynKO5ysObuwwGK5zdHzC2eU521s7CAWr5RKlPPeeo/GkBwKC0EmRrblWGV47hpVSaGtcOFcYoq3B4HLKb+ztUyxzyromimPiOOXi6pIkS0iTlK6tsUlKWTUo30N4ComHQtMazdp4hO+5XGepPIq6J00iqqbl4OaE8doI5YcUZc3R8xM8GXJw4yZad0xnF9y8fYeqzZlsbDgXeBxTFyV122KtQSno+o44VFjd/GaLQpaN0LlhNl/w6v27qK4i9EOyJGNRFqTRgJPTFWkyZLVYcnZyQRqGxInvDhklqMoVjx6VDEdD6rJnVeRkGSSZz+7aNgfGqYE+++wLHn5xiOlchygJuDqf4asID8vB9hYvfe9brA0HGGuYTFxoxapc0TQ1nh+wWmpGk7tY0YDt8T2nWumNoVyVSDzCaISnUvIu4h//P/8/zJYdQWyoVwuSeMiyuGC0sclgYwOtIlrjYYXE990L3Dc186tLp2+7llvqHsIgcqabviYwHX254rOjE3pruXn3Hu98/eukScSz58dkUUJX5CymU+azGVezGfPVgqv5lKbtuH3zFsNsxLPnC3zP8dqfPX/O1WxK2/YIIRiPUr7/na8zXyx4WOfMr66Yzxd0Tc9gPGK4NiRJE9bX1hkNEqJAoaxGWek8GUqiwojeWqww6N7lXXRti/VDBBare4xQWKHpdIfyfYS1hJ6H0W427yvnEO36HqUcZMwag/R9tNZYpfCSFKxxM+WmR/cFfhAyHLqEtbqpaJoGaZ2iSnkWEKCsywmXgJBYIbDW8WY85eSxxlh6BF1Vu1AdG3H2/IjhxI05tdEuorAsrxeZHmHo41uFVKCRJEGEbhqMsHRtCTLA81L+ww9/xp/86V/w9PER/81//V+xubOJ0hbVG2zbIYzLNLicTYlixaMnF3z56FN+8cufYEzLwc4GUZogAKN7BJKryylZmPLF5z+HumV2dMXf/Gv/CV484aTXPDo7p28rpDREScRgbYOt9TVM1zDa2OZf/qs/5uWXX2dnd+crNVc2yLDGYrBY+2vonEUqSRgq+q5Bdy4n2AsFi3yFbkuk6Gmrkiwb8fobb1Evp6i+JRqGbO1/m7KueeM7b1PUNYvllNnpIYPBGpOtTcabO/h+xNb2AW3bEEQBQZrw4NPPSAdDqqrl5s4OVVHxws0D+r5mlA5Rtufdl+6RlyX3D3Y5WB+QDMd88sUjqGc0leG1t97g6fE5Z6dT1kYpQgS0dU+cpCxXC8YbI5a5c6rfuPkq1nZU+YqNccb5ySlqvIaQgldfe5293X3m0yXCCra2NlgtVrAmmS/mDlgpDNZ2CAFpmlKUFV3b0usO5SnWJiOGg4yTs1NsbxFdR+KHEDjByzCbEIQRo8mEsi7Ii4YsjanbGum7psZTHmtrI2ZXM1dsOk0cJDSmR8A1FqNnlMQUyxWvvHCXl+7cxZeKZe5S9p6fnBH4HmXVMLuasrE5Zjmbkw0T1sdrXGpNnVe0be+y3XtD07WIv9pK4a9eFDbWdjg+PSXOfC4vrxjFHqdnJ1TVirXxgHK5YLWaM51NyfMlO5u3EEbQ5A2mE8ymS/oelA45OZwyDV1usBCK3d0dJpMJZVlRLVaMogHbL91EINm5scerL7/K9tYWdVXyq1/+nKYoGQ2GVHWJEYbDT494/8GHLPMFbeec1kkY0TY1ZV0wXh8xzIaY3jBMxygRMEzHxGHA+iTm3/zxD3h2Mmcy3MDUDbGnqYpjhPKo6pCwGaMiHyV8hO0xrcY3Aomi6Ru0NiAlAqffV8LH9yW2r8G0XF6eMhgm/O0//APyIqeocYC/OOFX733KL3/6MyQeWkM8SjFS04kEEUnOFh0n56f0zSFKeuRFiZUKL8owgXXQOi35Dz/+JcNYMRlF7O1scv/1t0jShDhJUAqUkpi+B63xrjWhTdPgXSt7aqMRysNTPlZ217prXLBHmjgVlxBYaTCmQ/Xi2mbfOfkm5jqz1hnLrBGA68zNdQIVUqKEwFoJFpT0kZErTF3X4HuKUZbQBoq2aWi6DikVBoHuNQjpDGjGYqwAJL7yQVg39lGeG1tJd0DmhQspKVY5KJe+t2od4iBNHRu/73qsNYTKozWANCRpiCdCJ7+WMYvC8PzkFBUmHJ5e8uFnn/O99Q3apubRkyOK1QqkoJOCThguyoKfffCAvi/Zu3GLo+cnLFc5SRBgpUtuk77gxr17XD49ouqgWTVMD88Z5g03X3iBjTjhjjWUeU5RLvFjH98PmIzWnZJkOeWLzz7lvV894O/duo2Uit5aWm2xWDfCMwbTaYSxNG1FWSwxRnN+dsJkMmI0TFnfGCLFAKt7iqVPX83xfIjSkOnpEb0woCvOV5dknmsGCqUJJmO8dMzNO3sIPIrZEiVhEHgMhylt3/Ctb3+dIi/QKPq2x00zNHtbG2ht2RgPOTt9xnoaU1Ylr929xdNnz7i1NmD4zbdZW99ma3efH/3yfR40NW+//W0+fPA5z48vEKYjTX3Keo7vW7Z3NnjpxT0eff45bbnkzo1bfP2N16g7wfRyzqcffcr7v/qI1XIF2vL06SGvv/4GaZby/vu/QvoeZV3R9S1+5LMqcsIgoqlqjDF4SpEvltTFijSOCIOQhZR4QhKHMcVqxuXlFYO1MU1fY6WhLks8XzEYjVksllRVhSc1pjVMBmOaqma5WJKEEXHgEcYhRWFAt6yvbVEsZhSzOd985y2Ojp7R9z1GeJyen7G7t8vJ2SW66+nrhsj32L2xzQe/fB9fBCgraVtNEMaYvqG3FvNXzF77KxeF2I8IVUgWR9x74S716pLhJGW8HqKVwc9inp4+RsolL764w/7OGlZbTk8umaxvEqcJW7u3eO2VV3n6+DHL1QwpLZPJiBs3DtjY2CBNM+I4JQhTOu0jhaJperpOE4WBi0mMM9Is5v0Hn7PKS5ZlQaN7ZiuNMSnPjqYOqyEURrug617P0L0hjdNrM5hgkI6IowwlFO89+JQwHjJbLDG9pesayq5iuL4BngVhwDRIeoTR9G2LJ5RDSXiKWmu3VDUaqRRV17GoXaGQUnE6XfHhxx+ztTHC9g3zWe64Mjs3iYRziLeVRiPRVctka8LWeMRquaLM3a3GD50KZ7S2QW8tZVPTa80gS3n9lRe5c3OHLPKJfEFd5bRtDTKm6SBSitAL6W2LsS3SVyAFUlwXC21QWITpsH2PshprLJ6UrsgqDz+IaPoO47zEbjRkLFZIFBLXz1ukcK5OrHXAvGsJsnUBGm7EId2MWOsOLASej/AFfd/T94YkmZBmkg4oyoqm7cAK55mQHkKD1drNzrWm086BnngRwliENQS+RxCmWKPRpsfqlqKt8AOHPG471/2lcQZA1/dIYeh17wq/NaCgtz1RlpAXc164dxtjLX/0x3/MW+9+nUdPnvD+Bw+QQca0KCl6D61ClBfg4VHmJX1bspie8tknn7H2zXcZJjGmd6BHo2Dj7g1+7z//+1TzgqbT/OrjT3g6X6CVQqmAyBg62xMlPlJ59MbQGYtQPl//5reZLpYcPn3K+voaURI7cqcQKCkBi+47BA4oORhlGG1YLGYsFgsWyxl926KEu70Y02F0xcb6iKbuSTb3aOoVxyfP6f2EomgQqmdzsoevQnrrcTlbsTbZJJqs0eZLp2YqcvJqycXVJVE6YDQYsbGzgbSStu44OnxKmqYgBkx29l0GQV0gpWR7c4c0GbBaFHStZlEWvHH3gK1BzGq1ZOh1hLtDmq6n05YoXadpet587SXmz494aX+XZ08qBoFP4Pt88cmn6EqzORxxsVjSJylWW/qu4xe/eo+maxmOh8RxQFHnrAmBNh1B7Lv9W2+I44ROGxAeFsiLmtZY/ChkVazY3NhhtpxhbEtRzEBBnMboTlE3miwLydIhddXSdi5OdzpfkqUZG5sbrPIZaxvrtG2BUo6E+sor95hfXNC0Jcofg1KEScxwvMbR81OOT465//KL5GXJ2toAPwgdWqh3qk2MYpAMycsK5bvxFPKvdtz/lYvC7PIK33PwshsHN7k46dnYnHBxdcx0OWNjEPLmWy8TBTGT0RrTiznv/fIDDp895+RizvrGDu9+81tsb9/i1ZfeII6dDlcKN3f2fJ+26TEGHjz4lKK2NE1HVTccHh5xfn5OVVdcXJ4yGKW0XUfdgb6e9/bGojwfIYdEySZKOHWJNQIhPFyEnesyb965C1JydnbBxfklyWifploRhhGXyyvSNHYGobpmMlgnSWK6VtN1FbrTCCRN49ySv/7+UoEvFVVboqWitRJrfN55+xucnBwxu1rx8p07rA9jZsMpUezjCUtZ5GRZxrQtCIKYIEuoW0s/XWF6g5Qheb4k8DzE9QdOW43neQShR9c3fPHoS46PDzFdjbQGKSDLBggp2drcZHd3nUHmunQhfKSVGO0OjDCA0PcQWiOcCOgauasR0qOzHvOiJQgCNIqq1QhpsNedfl+5w9Xz5LUxLcRYD2utKwQChLROCmycBFAKiVCGwHdQr7braJuOqtJUVU1ZXdE0LZXWaAtXV1POzy9pe41FAoJea+qmZVmUNLonCgKGWcb1PYXQU6RxRBT4eNKibOvAa3FMNsgYDUfESUKapk677gcIBUEAvnTjsr7XYBVaO4zz2saE2WLJ+w8+4tnxMY8PjzifL/AiIAgRUtH3MFuuiOWQXsNPf/oLjp5+wacffcjk//Tf8I133iL0fYedD93oLdwYsTAuI8Tf2cR2HSIIsMaifJ/x2tix8ZMY0wuQkh6LFgoZhJycnpAvpkilCOOINElIogSMxWrtQuJDHxX5hGHAi/dfAgxNXTK9mqJ8D9O29Lrl+elzLlY16+MtZrNL8rwnJ0AFPlGWslgumdWK3e1tdvdukM+WJHHi1IRxBH1NowWe9hlOBqwWK5KdbeZXlwRhxPTsgs2dTQ6/+BIvjti8fRutO7LBhL6tyTYTdNOBH2CFwTMdxSBgI93l008f8ta9PZ4dnZANJ9x54TZ1W3J5cU5Iz72XX6RcLri9uQFlSVU0qLZD541rIurSTRDaDrRkNEyJki1u3X2BDz/5GC8MKKuS8caIyWSINZbL8ynWGNqmZTwcUFUl+zdvcnJxSm80QnSsiiWjcUbXVXTaOdfzsiAIUpCK6WyB73kI4RFFbuwTBzGdtkDP5uY6nqfo2p7BIAWjOX5+hAKUFPz7/+VPSLMR0vOxGJq2ompKJvMMPImRgvWNTS4vzjBaUpQtvheRVzVtb9BthQoCmvn8N1wU5leA5fPPv+RnP/kRu1sT0kgxX00ZbIyoRcr2+jprW3uMR2vsbt/j5o1XmM1yzi6uUEFMOhjxxZfPeIRFm44sSxmNBgA8fXrIl1885Nmz5xgrmC8LdA9FWYNQSN91y1XdU+gSAGM8hFBoI7EorFYoFZKXliwdMBpN3BKn6VkVJWfnU3YPbrBz8BrPnj/nyfFD8rJjfRgzz6+QGJoOvA680GWeIjyKoqRprQvL7g1WS9rW0DQGJQMCP0QpJ59c31hjbWef/Tv3IUyo6xqM4t3XX+WdFw+gnvNEdKzyOSdHTzg9OWW+WICMWOQVounQwu0KfBE4g5oEbXqKeobnK4LQxw+kW8olEaHvFnDIABBoY3l2OmO5WCE/eeo4L7qn71uwPWGgmIwGrE1GbG6tMRpkZHHA+mREnKYYa1mulsznl0xnUyebCwKqtmOxWhGGMRvrWwS+m1dPJmPiJGK1WrpwcaOoG6fc6PreFQPrbiVBEBAGzuyGgdl8wdnFOYtFzjLPqaoGbTTGCrR13BqBIIoTDFCV9TXhUjlscBAiA5+i1TS6ZZAmBErQNS4Ax1ca3baUqwVd1wKC8WTM1uYmSinatqOuK+q2RkuD7wuyNCKJHWrD8yNUkDFe2+LRw0Nu37nF4ZOn/Pmf/QVC+RR5haglnp8yzFKGKkT2jhOWLwsOD58zysZsbuzwR//2f+Kd118jkNIRLU2PFXA8u+K9Tz9mtiwpW4P1PDfWQuB5HkIKpGfp2haBx3R6RbGYsjGJWeQlRVHQVyvA3RKEdVnJoeeTpRlRFCEDhRaG8Xh0XewsFsH65jZNWRKN1vBCn52bt8FoqmJJkk3Y7Duqrubs/Ji6b7HKEo12Mf6IZQ1+tsbDp09pyxWhsgTK0nYF62sZbd5TtS0/+9lP2RivE/kB52fnFKuSy4tL5m3Hjdfe4OzoKVkUEIeS+fSSpijxpSIJYw7WNtnen/DRRw+YJAGj8QYv37mJ53sMxkOyYUYSR9i+p6lqhN1hMV8wmy/58MHnXD0/Qvohk2yIkAnWU0ThgLOzK27fuUPbaZ49/ITAtqAMO5tD+mbF3s46aRRSrVYoXDJj37b4nsfscorRgu2tHfqqpixyhmlGHASsZ2M602O0pekMQejTtjW6qdlaG3N1OQVjsMJiLG46EvnXt3WFaXvqquTk+AzTO5/S1vY2Vgi2drZZLlekgwSlFEHoIT1JnGWUTYtSgaM3W0G5KtjZHhJHHr3umaxPaNrfcPLabDnHS0KMlqhwzNOjJUkQMZ7skK8CPvl4ybOo5vNkSt/1SCmI44QwCN2CMAyxJxfUtXMlBoFHUazwfIVSHo8ePmKxzOk7g7ACKXzSbEAy8Gg6TdtrkBBnCXESkcTuUFktVgipkDKgLFtCPyVIYiySYtnQNXP6rkNbSRIPMVrxz//5vyHLUhoNGsXp1RJhYpTwCNMBQRYSepZkkDHZ2EOqAVZGeGFInGRIGYIIkMLHUwFZmtI2zsWb+hFBPGDZGnokvpKMR2uMhyGzyzPqxSnK9mxMhgSRx49+9ENOTueMJwd4QYyVoK25xokYnMHL3YikdKgJ5UUgPKqyIV+trkcz0HXuYbTG0jXXoxnfp6yMQ3+oFE9B1WjK85bz2RWfP75EStjcHLG7u40xhulszvHJCau8pG0bOt0zHIwYj0bkyyVCKAL/nCCIwUqquqIscmcoRNBq51v/j78EXP+JEBIpBcKaaxSH40lYXHqWUjHS81BSIURPGo+pm8YpjIKQbDwGoG1a+l7Tt4K2blBKUFY9edES+grb9+i2da7TXrtccc9DKcXx4pKPn1y61+2aWSOVQErnY/F9SRQFxFGMsQIvSCgryAbrLGcrZueX/JP//v/N3XsvsbG9RzaO0G1JLENX1HSPjyVfztFdx+uvvoNHz+XFFR988CFfe/tNhDAoT9IZw9r6Bi/ev8/Dx0eI+ZLFvKCcr/B8HxNIetsgPVzjgSXwJaPdXawu8aRFa43yFdLpjtB9j6kbKhrKPEdKiVUgA8np8XOMMQwGQwZJRrPKadrWFZEwxo9CwihikG4wHiqs7rDSsrd3m053WAR52dM0mqIoOT99xuzynGo1Z5RGrK8NwUgeHZ5gTItUIWubqWvehOTWnXu0TcfWfsFHn3zK//W/+2/B9rzx+iu88so9hNCcTk/IlytuHNxgxJDAi0jTIb/9/d8mTgekcUKYZZiupaor1jYmLK4uQBt+9MMfcvv2bV64extrNHHoc+fF+3hBxNniiqdHzxiPJ7z6v/q7HB+fcXJxSWBKegzzfMXNnQmjxGOSBTQrw+7GmLpqiL2Ine0tTk+PHVnXC5ikA2ScUiwWBEqRBgGR57OejpkMRpxPZ2RpwmS8R12V+EoRKxAqYFW6FMbA88EI+lZDL8jLAt/3KPMG3/fY3NpHm54gVGRZhuk1vqdQSnJ6ekIUhdcGRBcalg1H5FXHqjijR7I+HjOdTbk4PyeJf8OSVKKERVWzvrXDjTu7rKVjIhk6+mkYsGxK6mJF6EmSOGI4SBHSIqXFSMPnDz/nybND2t6S5zVadyhPugUkkMQpQTCgvgZfeZ6iF4EbzQSKLAno+o6udw9a1wuk6RmkI8IgIl8WTLKR6+CrGiFdlGbflwwGA5Igo9WKy8sZu/s3+Po3v0lrNM9Pn2M6TeQnRGFClmXEiYfyLI3uwIbUtaDuJSL0Qfh4fkyaTcBKsBbdNfR2SeQH0AqKZcl8teJyPmU0Tthei2nLJbVqmF5dMJ4MefT4IXVTsVqVdL11mAftugcpJcqTSOuB5Jowq4hC36XXCUFTu6AYN2dXxGkKSlOUFRaLURLTa2wvwEiwzkAgcEA9rMDa3h3OUnB8dcwvPn6G73vXzCjjaLhejBEwrSz4grpVlEWNFD3WVmAFyvPwVIzF0FlL9+s39frdtda4miAEUkjnTpbux5BSopQPUrhlpO8TxAnGaExfYmyH8DxkENBqTVf1BL6PH2fYtqOta0yvkSi0MVyuZi4G8XrZKpEI5YPnI3yfbDxmOBwgpaLrWoqipCwLmrYlVgoVBAglaHpDkzsDXTWdsba2xUcfPSCSLn7z8uyS5bJAqI/ptGW8tsntu/cYDEesra8RDVJ8pYl9SeAp+rZhPBzwb//oj7h//x6DQYI1GikUH773Sw6PTpB+jKlrUl8y2l7DWqhNR49AKoOUEqxC+JKurrG6QwaCIAgRwtK1HR6SKIjobIvvKbS17uaFpqmb66IsmV5eMDUXeJ6PpzxCPyQIAjeuGk3ow9DB2toG35PEwwSrNV4YsjYaorwAAdy+eRuw5MsZpuvo6oIiX9J2ORfnx4RxQpnnKASmNxgpCPyU06sThpNNJlvbRJGP8gzz1ZKiXNFK8EcZYpCQbqwT+QO+tn/LmcWU4vz0FFPmxHHMYDSkKCuEkvSm48nRU/Bgsyl45fUXeevrr7O9u897v3yPF+69whv3b3B2fs67r97meByRl1v83nff4qNPPuH47JydWzdA16xnAcHWmNfu3uHq/IrxeMLlxQWp1zJbFdzavcXrr77C1to6z58d8tMf/ZgslCih6csVSZKwtzbA9xWJr9kaDHnzjdep65ay0RyfXfL85JzAC/ClZLw/ctklQlAWBdOZG7cpzyeJYiZrQ3TfEgUe5yfPHaGgyHl2eMjNmzfwAp/zs3OmszlXiznxcEhrNI+ePEEK10iY7jeMubDhiMl4l1XVEKwMxWqBrTukEMjIozY9vW7QfYOwBiUEWnfUTcEqX9D2LcpXaDwsIX6Y0HY1Qrgg6lVliaxA+gN6a+m7HtX3WAzCKqQ0DnV7HRQvPEnXtVjd0zQOETybLlBeSJikqCDETyKszRBSMpuXdDrg7v3Xuf/qG6xtbrOqCjpCsmRIGk8I/MgF2ktNp0uWqxVGe3iBT6idocSgXCpTbejaFl8ZdFPS1wVtPqOcLtjZ2SZRFbPjT0jEFo1MGE1izs+ekyUxHz34mNlyRhBFrG/uMl+dgPVQXkjgSaTSDlltBJ3urhepULcdZdUR+QF1UYLtMKIDJMuiw1iJNu4AlggE0mVXAALBV+27EG5pZt0B4fke1kT4ShBGIaMwdGlTbUdzrXcGWBQNaZxiG4u2EqMFUZQ4MJ8xSOlhpVvacg3kA2e7/zWgT0iJkAKte9epaxDG3Rp0b2j7hqp2/6anDNZ0KOkMRb0xeFJRNR1Npwl8n0Ga0HWSpq5BQBrH9L12ctMgJopilO9TdTVlWXIxnXI5m7kDFtdlG2PBQtf2lE3vGhmrHXoage/5fPHwIWEU0dUN2WjsRjBSkhc5ygs4Pznk4uQZWMtoMiROE8ajEcMsYjRICIMBa5Mhv/zFT3jw6ce8/fabWNNjreTy/IJqVRCEBt8atNVUxQIhFXXb0/UVbeeMd1XZ0ncwn05ZzC94993Xeee1O3j0REGEaTuausFXHqtVgRcEhHGElL3bMRg3WrLS0FZT3w4AAQAASURBVF8z/9Gg256qLLDA6fEpnucAlFEUksQRqzwnChWGBfFwxNXljCQdkA0GhFHEaLThlGfGIAQY0XL/2n9TlyVVWaLblrZuaKqa26+8ydnZCWuTMca2WNuhfBCdJgpSZ1ZUMc8v5ojmkr29XRaLOUmSEA8iJhsbVHlOURZ4vg9phGc8/s4/+EPee+89ZKw4vniG7ykuLg/Jizl7B2vcvfMyVX0bCdy7d5Myz2naDl/eIw0tMrBs7W0R6p5wf5+9/Zt4yuPw8WOeyJLfevdllkWJHw8ZDwaEvmXor3N3929w6/YLvP/BA37ylz8nL2a8/dbrjIYpvrR897vfYmNjAyU9fvXhx0zGEYGnmU7nKAGXF0f4UvHKy69QFAVRJLmaz6jqnLaXQM/acMT6eMTN/V0XoTtI6esaCVxcXnB2eUHXuUQ4KwSNbtne3UZZzfTinPEw+80WhWS0z3S+oOl8vKIn9QS+tWA76uWcQtfUXUnTuM7NdD1KuE46zVLSKKM3Gl/49Ne66dQL6XpnmrKexGoJysfzrzn90kOpAF+FpHGGwFXRtmmpypKqWdG1LQqPyA+5fesO6WCIH8Xkdc3To2eU5crJF0XC/dfewciUDz85ZO2qZX1rg2S4i+dFVL1H1cKqdlgIKyxVrdz+oHFUT1v39KbD4A5frVvKruTq7JB8cY6yLW+9/jIbGwJfZTz8ZIHfWQK7xvu/+AWr+ZQ0cYeoVCGTtU30dEkQLNzS+tr1a20D0mKForfGafJ7jR8EDIcTzk/OSMLo1/YIjIC2syjfwyqB7lo8IRxRVPQYcz1LFL+mJEqXMSsFRgiMcQt5iaSel7SRpiwLrNb4vne9qLfUVUUUe/iRT9MYEIqyrZ3V/jpRTBsXXCuFcLVHCKRwDmRrXWG3xuXOSinxfQ/f8/CUk6ZiDF3nkqmMEPTX4yV3G3EBP1iD7jqHUlGWLFBkQeac3MaBK7UR9NqyzCsMOcY2GKPdAl382ublfk6pFEoqrHFJdFKCVFwjvi1dXZAOUrqmJYqHKDWm63q0tawrj7ZpKFZLurqmqSryxTlVoTh7/pTNtU2K1ZzcGqZXZ5yen/Pf/l/+O166fw8rLaaFo6fH1GVNWVQsVwuC0CdIAje6a6wrHqJ1e6EO6kqjpGRnd4NbBwecnZ+Txj6i0/hCsT6eIIxF+QHedV5v3VZIBUIptNZorfGUB/Y/3t607jHCXJvENLPlAq/0qKuCOA4IfIGSYJ4/JwxjlDW0RYEfxk6SLRRhlBBHMa1paboa3/eI4gmDya4z1GmN0U4R9gqWsliyWs4oigVdWxH1Lp8gDHyKeY+gZzQMeHL8lCDwuTw5ZzDIePL8EXEUX9OBQ6ynaLuW8dqEF167zyCJOLi9x3x6zs39XYo8R6qEsioQBpQnuTh5TpIkbG1M0LbFC+4zGIw4Pr3i1bs3OT465f2f/oC1yRqBr/jd77zD9vYmQRQwna+YjEf4SlJWNVEQ4YUxgoLNrYwgjAlDn3u3bzEeJOiuZjSO+PLLh6xlCrE7oSoXbG0NefjwEaOBYpAOuDh7Qqc1iJ6d3QlGwGqZE3o+F6ennB4ecuPGPrqpmU1zDp8M8KQCBPPVnLa3ZNmQvjeUdYlhyPe+/W2Onz7B9r9h81pRVXiBh8EQhB6r5Ry6Gt1VdLahpcNKg8UQRAEEnosmtIKibpHKQyn/2nnpgVCOBmqh6jqkUC6Aor8+4ALJcDjEky4UoyhK+k47ZHTf03QN+B7C8wgjJzU9vlxiL1b014uhznRoFBbJIBuwKkuUJzDGp6kajp+fo61FSo8gSAijBHENdQtChSC47rAdT0R3PbppqNqWqiopijnHzx+zmJ5w62Cb7b0tkkHMfDVjMb1gOIwYDUKqfMoXnz7AaMPewQGD4ZiD3R2+fPwlxqprxISbBQssyne9vcXxfrQF6fmsrW0wny3wvYCm6fF9D6Vc4Q2koDe4mxXCBbLgdOFWGMAdcCAxWl8zhDysNWjjbnZ93yGEpSwKrHXkVQFuiW0NwpfkVUkcpjTVr5EADq/Q9j1CCYRQ+MJDSuH+vgBrtZOnWoE112Y0XAHQnQbTYaTCk4I4jpiMh2jd02pN07W0TXP92uAOMhwfyS2knUzWVx7W8TXQVUPbGYxV12IBA3RIKZ2bWQoXDG8BDMI6jHpvf107JcZajOkJfR91vcgXEhcI1fZIz3ca8U5jBfhhSJKE9G3k0AZtR+drymrFfHHF9tYmq9WCPF/QNIKf/PSCqqnQrcbDx2pxbd5rEKWgn2lXSBsnkXa3CoO1ktFwzQX+VA3/j3/83+PFgjgOuXvrNi/dvsvaaM4gSRgOBkjP4AceQTTECoei/jUfCwRWGwz2ukmwToprO7wgxBcKrQ3pMMMaVwS7tsP2mrbtKYqSTlv8wI05lBcQxSm+5yOU+YpKvL6+TnF8ymA4xFOKdJBhjCSIIpIgIlvbQkqLvG4IdNPRtT1925HnM6r6EqKCMl9hw4xlZ2iajtLaawyFYpYvEUqwNh4h6FEoymqFjAQfPfqIOIqxMkXKCGkNaRTjDSW97Lkqpww3xwy3N+l6TdG2TIYTbt+8ydffdk1u4CukdPL0OA3wZMJwEKGEwKdFoLm8PCGUmlfu3SRMEozWlPkUaUqE6bk4P2a1ytna3uPWzVv87u/9LtPZgh/+8IdMz8/Z2tigWK14enTE2dUVvbV4fsRbb77O9OySdGNEGvgkgUdoAzwRsjZMEKYn9hSDJKZqe6R0CXCRH9K1lvff+4jEl2yujX+zRSH0aqqqoq8KSps7wqd1nZYSkki5OSZSYHqcmt0PEEKBDAGJUh5KRRgtQUAUe9dzZYdDdhcLS9s2KBTFPEdJD200ddle45SV65iTFC2d4aptNNNcf9XRyl/zfyx01j189156kdALiYKMJB7z9PCE3ijWtnbwowirBEJq2r5nOl1RFQV1WVLnK5f8pHuqunEGFE+iTUfb1gShT5bGGCF4eHhM0XuMhhmJP2ZrN6ItphSruZv1X7tvN7e3+fLxY1bFivF4HWNahK8ROMuhO9bdvN0ai6d81sbrNEVDnZduASosVrivtVgHv0I49g/u4HMnnALp3L5SXK8ihfMTyOvBEkJgTef+ZSGRnqDvXapZpzUWJy21GmwjCeOIjg7bW3w/pO3dgtkiMVYi7LV18roYgb6+OVwb1xB4UiKVG19Za3FrdcmqrJ3yTPfXReyrmRcWqLoWz5MOjidwxb9qUJ4LptdW40USPElVucwIIdztpO810gsQno9GuNfB6us9CwhPuq4Z40x+VmJbA0KgpSt40hrnBO9/HZousGhklOIrc42TaPCDnoGSdE3No0efMbs6oW1LytUU31N0fUPft859bAV97zKDeyMQxkNrJ9UWGGeOxPkWPOVMasYYTOGMeFWusarh6dkn/IeffEwWRYyyjK3JhBfu3ubundsEiSIZ+GB7As+BB41wcEEL9NfjvK+W/2j3DAqXF6CkvP6sKoxyYgZtLVJBrzvarsEYS57P8T3PnQtKEEURy8sLEIJGgIpj5kVOUZb4cUI2WcP3A3w/QCkfL7hGxxgnaR6L24CDNtZNRVHMyfM5Xd9QVStsvnCLWKUIAoVKIoxuqIVFBT6666g9j7pvSEYJRTUlUIKHR5/iCScpb1rY379DVbv42jwv8NMIFfoMJ6kLV7IOvVK3FVVbYgSUZYGnfHRrEAZODo9JkowQCITFywImG0M8BBjL1m6ARNF2mjBKEXXNepTwB7/3++TLKb5y8vDnZ6ccX1zxyaNnPD05A90R+Za3X3uNF2/vs7U2Yjmfc3E1xYsyhGmwTQl1AR0EQcbW2hbb2/tkccrd/T0OdrauAZe/waIwPT+5VmvAYpqTJsk1iVM696S1JNEQK50G3sn5AjdnFm6Z6CkfBBhtrjtyeX1QGPq+Q0pB3dQOaasUTdOilLvuGyGJY6cpt8ZStw297umqFmudm1hr477/9RFjrn+uNBlw76VXmM8LVssaZQTzVUkYJvziZz9DKYjiAOn5TOcLB4vzfHZ3tgikpmlK2raiLFY0TeskaKMJW5vbLmh9PGFr/YD1jQ38OATT49EgtaFewcnJOUEQ0tYG3w/4/NNPmM6mLPM5CsEgS2gbZ0m3Aqzu0b3zIhjjwHNt05DnK8IguB6vOE6QQSOk+Gpmj712D3+1wr+G41mDsNd/aqwb1Yhrmql2h4M2PUp4RFGEtU6S6UZamrZt6a07KNrGHcBd1ZCmAabo6HsQyqPr3W3RWud4FkJ/tVOQ18+BUj7K/4+BH/bXh5wxKJzhTxkPa9zoq+/duMNa65DA3XVMqe9GjfQWrRtM25DEkTvg6NndGqN7B8gTUiKER9F0VE1/bbZzOdbGOLMenjPlYUBYAVZe3yTFtX7KgpRgXNGNopg4jcmyhMEgJQ4Uka+QRiOMoSxXxFGIMD1VmVOVS/a31ijzFWVVUNU1q9whmbVn6fW1UVJoxHVMpMUgA4HRxsEGhaCzLUJLtFUoz93AO2NpOwtaU1cLFosVF5dXfPjxJxjdc/eFmxzc2Ob+/RfY3dpA4IqDEoJA+QRe4Jop68LnsYL+14XpejfkfhPOyyIVnvhKUfDV+/drf0qvNX1nqUqXC6yUe5bCMHSZBGGE8jXL2QzfD4ii+CvXuryeInieT5wmhKFPEEYE4YAsG7O56d5fKd1703Y1y+WUssopihXL1ZyyLsB0dG1AGOyyvjGm7JdkYcj04hQ/CQg9xdnpGaenl0xXM3b3bnHxfIGwiuVygTIwSUf0dcP6ZEwQBZRlTpoN0K1lPpuTRQmzixk7O3vcvX2L3f19jo6PCSMfqwS2bQjSFE8FtFVNp1sCP6QrcvKiRAvFcDwkS0KK5Qxd9UyymCDY5dadF7ic50RRxOGjLymWM9omx/Qha2tD5ssFTV3y7OEj9vf22L+xByrAi8ZoEZINJkR+xCv3X2JjOCAJ/b/SWS/sr/WM/39+/e//6/8zjx49YjabI6Sk7xwwygoIfBf+rq3j4wsh0Iav8MfSczp+KRVSGYJQuAe5c0sxe82tKcuSpmmwVmCFIoxifM8jDN1No2072qZFSJcy1lvHcJHSR4rAyTgtDtMpDBbnenz5tTfZvXGPq3kJMuLk+IIqz6lXOZ6wLC5PCXzpZrC+Kw57+/uYqmSwMeGTBw+IYo8w9fG9kKpuKPIa3QteuPsid+++iEARRCGalihU6Canr5dMz4/4+KP3QHecHB+xu7PDfLZglS+pm4KdvT2EjOltgBW+KwpolOdsWFHkXKp9d/1hvZ6b+76Ptj1uU4tzEF8vbIWUfHWMWYtQboYvrl8aazWeUg6kdi3VFAKCwCeKIoIgQkpFVTbUdUPX9rRtS6s1XuCBadjeGLGcX+AHAUGQcDZd4ocD2lYjhYeUzrTmgHt81WV6nudMhtc/m/lKmYTbjxjj3l/PGfN+XQyMMdff53p5Ld14cS2LGKcBUeSxWi2ceqWtkVKitaYqa8IgomstZVljpAcqoHMhfO5Zs+AGbz1gkQhcQuh18cRicWM4T3huPCcl0vOQynXVceQTKIcbH2cxkzQly1LS1PlIxoMUJS3DLCEInDRWG5d7MFssODk54ez8gourS549e8Z8saTXmkZrqqa5vmkpdK9RnvPngHsOjPBRKnTOQ2vdcK7rHGDRUyRRhJKGOFBkWcTbb73O66++TOB7YMy1Sks5JLlyz5DnO0eww5b8WkX26/dMX3+e5Vfvx1cHynVzYnRPr/uvvq5t2+ubj7vZGmMIoght7P+Ptj/ptSxL0zOxZ3W7Oc3trPM+Go+IjKaYmUwqSVHJQhULNRAp1KgG1ISABAkoCIIgaF4a6gdoIP2CGuhPaCJUVTJFMpnMLjKSGeHuER4e7mZuZrc5Z3er0+Bba59jnmSFEwhdwMPDze4995y91/6a93vf9yurLvcYbei7DfuLCxECLp6sFP3uAoDD4SAkE6Po2oZN34nXll9onCFGjyITUsB76XwXP/PwcC82H/NLXr76JS9ffEbfaYyOjOORrtvw8tVrrq8f8Zd/9e94eDhysdlz0e9ZjhNvPX7KdtOzzBPWag7HkcfXb3GxveD65oata3EYtBF7lpjFhRdriCGsHkcagU+1tmLPgmL0XuaHfmbbOsbjkZRhDgndbPn408/Y7nc8e3JD6zR3L5+jUpKZFhptN0xB4fqW9771PpePnqHslnZ3A6rBuQ5iwgA5BfT25jeXFP7b/+v/jS9ffsmP/+qvirJOFZsBWdfoXENWMriyrsFokeSTDbbpsFb2MkdGUJ4YE9M4E3wkxoz3svt002/pNlsimhAEy/chyGBSGTK1sozAIvFfGXI2xQ9HHnCQxKC15fd+/x/y9re+j26veHUvK/k++/gjvvzlz3nr5oLDq+cs0ygWAjnT9Bt2+wtubq44HO65u33NvAy4VtF3G0Cz217wwQff4vHjZ2hlS/trmOYDh4dbVJxYxns++8XP+OSjf8d7777Dj3/8l2w2Gw63dxwf7rCNQAGXN89QdocyLRhNVjKENdrKJrXMCi0JDCeiq5Q8xqniSpqJIZQFG7Y8rOUhjlEWixvRCJAT1mq6pqHrWzaleiPLutVpmpnGhWnyjMMMxXXTJ0gqoRi5vHC0NnE4PrDdXjJOMIxJBHRKle6lDmtBFOX1PYFRec0F2pg1qWmtS1JD3FKVXmmUa5epxKo9psDWGXadI+fA5eVeKKwqE2JZ5agUYVkIixc75JgZ50iImpQ0GV1U0rIyVIbQ8gCBzFUykUyQ85Slik5ALp/RGEXrNCp5dI4YpSBGjFHstz3OKDZdw/XljpvrS9569oTLywt2m05U4EbTtS1FrM04DQUqGXl198A0zSilZBFMkutXu0jpvBqscTK3UYp5nlEK5kUgKuccGjCIoaBWCWsEkFNIt6iVLvdGroHWuvwjQd3amgQ0aCGKaKVIOZVlV6pAlnKPG2vKM3rqElaVO0jQrq61ShNixLkGrQ1t0+BsS9M2uLZDlz9vmmY9603pmEMIOOvQWrHdbnBdVxxiwVhxMsjkAsfNxDgzHF8TwpEYBj777BOmaeR4PHA4HtjsO17fveTJoycM9wONtjx99ITj4YHPPv0F11eXhJB4+eKOy90F266jMZboPTfXN1xdXKxWKk3X0XU9x+MRBXz2i0+5vrqmABmyjjYGQlzEgywGCd4ZliAkiYfjRCTy7ntv4eeJ6GdSCNzePaBtjzId3e6Cfr8n6czFzRPe//b3MXaDsj1KOaHyajGNtP3lby4p/NN/+l8TUmKcJkJKNG0nPjjGorQmloXW2pgyDNRSBdiGrExxcYSQRhY/4L343CgMZEPbbrnYX2CMY1w84yLDSV3WOsZULZMlyGilsDqggZxNqfhOFQ0IBmiM43f/3t/nG9/7bXR/w+CFnhnHA1988jf8+N/8S3pnMEaM15LSTD6Iza2G3XbDPI1cX1/SN46mbXh0c81+v5dqNAWmceLTTz/ls1/+khACh/tbXr34FZf7ng/ee5uf/NVf8J0PP+Qnf/3XLPNCWjykQCIIdbbfYdtrtG1R1qDEyRer3dlgtrCEio4BhEwU4lIeUF3GA8LosVYSijEaZyxGK4xWhQEkq/tkeKbxy1TU2hGQ9j0lxTyJ7YjWlhgyS8pElWhcwNmZ66uO4+EeYxq22xtePH8gYVhymU1oU7qbwkKqc46cUTkXewyHKQ/64oV+2m828mdKIBRfBGj1qAqNVDzqY/BYBW3XoFTGGPH5icmjVCJGT2uhs0oG7BhUETqmpEt3q8lZYUoFp7TASDFFmTvohNJJoJ2UIWuSOlW8OYtflCFidRGZKU3Okb5t2PYdm86x39XVpT2dM2ycou0anLNcXV5wcbEXlapRp3uIWzuoxXvmaSHFVJoCcd20ZRDufcA6J/BNEq2JUhBSJnix+VjmQZIbicUv0gUkgadyudfWVpsS+SzllmGMxVpNUgltZDWl0RLwRW9SVqxG8fQ3Wp/uOQj1l1zOplDWY/BSFKCIXujBGeTcK3Ha1U6ShjViCuiahu1my3YrNilt0xXti4hfEppc2FBdv6Pve2IGqy1hmek3jnG8BRa0DhibWaYJVMLHgY9/8VNefvmK6BPXl1c441imUWzt55FN2zMN4gn28sUL2mId7oorsLN2TdrXVzcsfsYvntcvX9G1LQponexaVxqck66IGCHLfbCu4XAcmZdIMhl0loRjpQA9jjNKN2Abun6PzwnTWLTraDYXxGyZFtmpktEs88LjR494+91v/eaSwh/85/8EY2xZ8ShvWiiAGetabGNRWpakSGsrg95lDozzIt8bc1FFRrQ29N2WzWaPcx3LHJgmT0oS7ZZUD2LRI2SBFUpUQamMVbEMS2uLq9aHRQKRQBbvvPcNbp69j+r2dNsLnHU4ldhaMHHm9cuXjPNMQnN7OLCE0h4bLcOyxuGMoVGatnWkFJjmo+w0+OSnfPnqBYfDPcF7fvhbf4e+bflXf/SHbPuGZ88eMxyPvP3OO3zy818wDJNwJnMEHdHOoe0G3exRWpKCtuJ5ojIyJ8kKrW15qCjB3ogbKHHFcqWic1CClbVW4IUY5fWUIq/MI1b4KAQv2oIiMEtJBnCcBUtQBCBm6dCMnrm+7Mh5YRpmLi9uWHzm9d0Rmg5j7DpUVpK65R8ln8GgsdYWFkxeIcRcfOsFmhR46XwAmnMuKxoluKAV3gcxtNMK5wzOWTKRlAMpLZi8YPJA2zj2+wvGYSKGjDENi08sPoIyJJ/xcwItQ13peDLWUvyevMwn0DJbqDTbGLAq45Q8ghnQTnyGSIF5GnAGrFG0jYEU6axi3xlubq64ubnm8eMbLi/29JuOvpONYs4YVBLM3zmHKgJHMQ4MzLNnWWZQkbaRGUuMkcV7lDGU4yL0aXJJHjPH4z05CyyWIqSQyOWe5ywOAwLXqbNzZaVYIJFUwpTOod6bGujr9xppt9ZnWGtTZocCp9iyRY8cEJNEjVanZFSjUsyROQ7UwkiKo7Qmg5xlMVRjHW3f0bYdbb9DG4u1HTEVMkZMHA8POKtpe4fKnhcvP8P7ga63PH3yCJVk1uRT4uLiiqbp8D7y8sVz7l6/otGSCJ2C4AMxRrquw/uFeZ7pmoZ5mkg+YI14GaXiRWa1lsVHiKNvjpGmbbFGrPhT0RO1Tcfh4UGKbWUAjc8RjBR02liatmWzuwBt2VzeiK3JbocuxBMfIGFo2h2Lj8xLZJwmrLW89eybv7mk8Pf/8X9FKIFDqvJM1224vrouS+IXFj+QEUHMOM3Ms7BSEhpnxZcHo+g3G7q2J6bMcJwkEWAgK0k6WaONOBJqRVl8EU80ewASOhdWizal/VXl/ZUPhyLlzOXlNQnN7uparBS05tmTR3TOcH2x53iUPc7DOJG14frmcVnUDX6ZiMHz+aefMh+PjOPAPA/E6FmWkYfjHSBQgdYaP4lDZ9tYYvDk4Om6ng+/933+7C/+knFaMMpirMI4VUz8OtAbSQpGoaxYYhNTqd5ltabRpiQFU7QBEHPAB09OEkxNsXJom4acM8dhkAHtOmugsMbKxYxRrq+ICgQaSbl0JZmcRLGsixVFTAFUBDzbznK533K4fw05cXl5xYtXrxmzEcveqAH5WfH0L7FUZVSsFhdSFdZ5iLFSbUmVKIt5atIDISfUoaYMXQOJVLqZxLIstG2DsWq19FZqQSVRADfGsNtscNYyHAdyVrSdCN6WaSln1pGwxFi6GadFPBmXQguV02WdwznLtmvpW8O2cXSNkepYjGhF0do1OKPQKskebgUXu55ta+naFuvMal3gjLjvAmUIbFYarqnW5vkEyQQfGP1BBu110KuKriapck4sISdilrWomVioyJEcJTF4n5inhRgoybrAlkqudwhFbKg1WZeEjHSt2gh2npH35ZzDKKSqbZpCJpGh8FrYAEoJd06oy0U7UzuEAjdmlcnKv3FOZJAtUGOKqQRe+RmUxrkOpTSN62nbns12R9/1WBuxjSKlwO5iw3G457Nf/py72y+5vtzjp5EXL17Tdjuubx7TbbZ84xtinvmzn/wV/6//7r/DqEzrHE+fPuXtd97l6uaK7f6Cm8ePOByObLt+TQwpCVkkJ6HSayVutI1zAvFlCMuM1YastNixRHFarvNDpTRJgXHikqudE/qvszRtB1pjmwZlyjC+aVHKYlzD8ThibYvrZEaYUmLTP/nNJYXv/8P/JTln9tsdV1dXNNbJPCBI1W+0ZgkTx+OB4/EIuQxuTcNmd0Hb9mitCSky+YXFBxm6GHmY65DYWCfziFC9v4XWKKvlTtWmyrlgoqxVZK0kBINXVBz7cHxgGo5865vv89FHPxNKo3P0/RZtLcPxXoYw1vH0rbe5uXmEVpplGsnBMw1H7l6/5OGu+DoZyfrB+zL0U7KQBtBWoZDtVskHuq5nXgL/4B/+I/71n/yptHJRaLOu0WX1ZoeiQ9GijZJhH5II5e6csN16CVaMvTzgtVqrbXyFAJZlYQmeykaqSmdVhl4V24w5FTvt0zWkKH7ra8YYC4Yr+gaVE1f7DSrPHI+vefzogpDh87sRbTqMbiFLUJJABsZK8jSpivVSCfKxBP0TFh1JxHxKCKfgIkEzpUjMswyIs5bOBoFCtFZYp2R/tbMlECWxBQ8LOkeuLvaklDgcj2y3W3KYmeaFcYaQrcypEMq0UVnuSS5JFFXgFIPVsG0sl9sNF9uO3c5xfbXh6uqC3XbDrm/YbjqMKrOULIyxWLQbUmHLvTE1uCGJHJUw+nSWTdH7rPg+GeUUmXIdi0I7hSTixgI7KgUxeWL02JIwY0ilMxea5DTNuDJLUEr2F8u1LPBOjPgYWGIoOo60no06g6hzA2vU2rE759bz2LbtCjMpJR2xsWadZ5ELmaA804qM0jIXg6KOL/e+FjkpJ1IOpYtMMl+JyO7l4pvbOEfTKmwDbRlQd31H6wxd35IXWZo1zxMxJWYfeBhGLi4u2e/2PP/Vrxge7rFK8dmnn/L8+Uv+9M//Atu3LDkTUTx98ozf+e2/y5PHj3l8fcN22wmMVjqn6BeCXxiODxhjGIeBRllyEuZZ1hqMJZaOICbZT2Kt2LEnybpo54pzQGSz7ZimiS+ef0nX77DGcLHfs99fEEIQ+6AyZwHF5cX7v7mk8F/+s/9G8EtjBeNdglSVIeJnzziMzMsMCpq2Ybfd0/VbOXBLXC2S5+jJdfAIp4z4FfinLhdMOa76BbEdOCUGXRgrUkZUzn3968K/z4nb25c0OvP4asurl89xVjMtUSwEUqZtNGRhPXV9D0qjcnlAc0KnRAgzIU0yVEOXQKfwIRPKBjSFBhNEOaoQY7Igg+v3vvFtPvvVc2ntrEEZXfxsDFq1kFuUamQNpc6gQtl3YFeKYCx2DFpJdYYSJogqT5guD5PWog4OQei8lAdU3CqKghWp3lW5bqlUmMK7R1p/IAYPShWeel6LAI3suu6cYr9zHA8vaGyk2Wx5NYoA0VnZXSGwQEJrecAVSlZewhnzqDCM1nsKWUUpCXK9tTXQSDGgFWQ1kfEoZSHZda4UU4AccU6jmw6PDFtVXNDZ41SG0mmmlPB+oW0Sm+2OmDT3h4UQFVo7IGFMxloFuVBYC2xitKK1Bqsy29Zxse3Zbw3Xly1XV3v2m579rsfqzKYThlDXNSwh4cseCo1eP7MpFG9TuiblMiEsJXHU5OqwRpa1kyGpjCnajcoIUllLwk/iKTVPE1rLMzQvRWsTM8viUdqiTSMb4dRJ8b0sC96HFRZSSs5JKIG/7qWY55nSVqzxou7qMOv7lPvrnNDStarwoRQCKztRy9BfncUIs7LPsvysViUBVdhUFeGsdBNGWaHFZyl/TvMo2UinjZXnxFqs1vRNw7br6LuGtgOlPUlBLF1zDJHh4YHp+EBjLclHtHHcHw4Epfjs+Rf8/LPP+Dd/8mf80b/41yg0l/sr3nr6mA/ef4e333uX/abnO9/+Nt///vcYhyP39/dM48izJ8/YbXayR9s6dHMGyzdWXAoimCIQVEoRERjeWoMPC8uylFlQBzkXZFNJF1JmGzElQko8e/q931xS+Kf/6/9j0Q2IBP7wcMQvQSihSnGxv6Lvd+jWoi3EJTLPnsM8kKNBJ0U2majVCi9X4qREgBrsVVklCBUvEnFTMXPLlU/AGmBU2eSVc+F1q9Mrp+Q5Hu8wKUKYZDfqNJaBlrSb2mSUCnLxKk2SopLNgu1rI20nWZKRUppEFhO7SDmACO/dKZLO6BJ0kzGYzQaNRQdFso5kNEYbTBGUoUXlrXRZgKMVTonAK5uITg4fFdHPNNqgnMPniM6s13CFzdSJPihjArUmjtqWn1/2+r26/l1p1eu+2poU5HomSZqCjkIOPL7ZofPC8eE1S0xEswMcyjQobcvDKp2FVgUSxMjrFEsMoWcuq+YCQOtIVfGiBIbSWksozV6qbhUJWbyvFJKsYhR9Q84CdWUN2nW0rhEqdRKbiBgDZoU2MyHPkD1dq+nahpzFIt25hqubSy6uNmyalsv9nqvLS3abviQEgTKtRob3BhILRiU6p1A54LSWyjtnmQ3k8+42lw1whZ2TkeuglQx2U1xhmaU4mtbvbZoGVdg+EvxkKYxogEpfaKSLUtrw8HDPOA5M04T3XqzM204CfNEJpCTdmHMOH4LsK1YKayzLMpdrK+cqhiB7s8tzWqE9H2RJvVZqhTRTgTjX81fObk0Kp+RQtCqFLVdO0InuWhPE2Zexkghq53JOY3a2KdYeIuyscwug2HuIlbi1lmwiTW/ZdB1k2HQ9m66nsZZ5FJ+hEAJz8EBgnB7IWYgwX/zqObevD/z1Tz7ij//4z3j5+p4XL74UCE/BD3/0I/7v/8//B4Qg+0LmCddvuHnylN1uLyiEtdI1rZCpCEiVNSsSAFJwhrAAyLKvXOxgyjyuDvhzzhgtZpEy7/kNso/+0T/934JSLPNCjBnnnAx12p6+79HKcpxHDuORaRxRSeYCKUdIonJN5Y2q+isz6+CRQlFMSrI5WWGztMfp7ACoUpAEpaCsl5PEIDJ5lQWPTyVxpOQZjw8c7l9hiRgSKhc/+SwPXtaJREAXqKa+N1Motgl5AASmSGJRXNr/nNI60JN3ZEgakk7omNBZlLraNVjX0dgOrx1Ry2IaXdoKra0It7LsZcgqY2rlbiImORalSX6hwYKyhOhROq0PS/36Kv3v/O/rv+vf1e+Tru3EbDp/wKGIFFHrZ6UkBUVk2zsudx2Hu1cM00IyW9CuJDpbZlBR4Bsk4GVlyp7l8wc8roNleR9LeU8OhTDa5H0kUvCiwlaZSFoH4xqDK5YEquzcTTmRSne13+zWDlU2cBVfqJxkM10cUXks6tKWxvUEHxnmA9aBMw1N03BzfcPTR4/YbXuuLi7o24bNpmfTdzinsC5jdUYnj8oeq7Xs+00JXeAfrURLUWEx0ejktSqXJUVpfcB1YUXVoW6dq1il1jlESlIUuUaESsYYGtegjAyQayKYJjEIfHh4wHsvFSVCJ287Ub56H0Tr4NwKH0IlK+T1PQobLK2eSlDt3wVuTHVGVO6yLmr2s5JwPaM1aaz/rRXO6tWfqgpez39OPnheYan6O1Yoy9oyi1B/6/zXa7Y+Dybj40JKidY1WCXsPZWETdU4R9/3JJ1oW4vREa0T0zSilSUsCWt7pmHhsy+ec//wwBfPv+Dll1/y5Okzfvif/Ii7uzsuLy/50e/8Dm2/J2vLPM989NHHOCtkiK4RQV/bdWK/4hqBbcuTRxYinJBAZJNcPRdf/YyqJD7psLb8uq+vnRT+4T/53wmrwFi6rqdrO3wQHDLFzDAOLEnM0SxGDMlywiR59yprchks1iBQkZ+TgbaStq2kAYOYvakSiSrspLP8+flbVzmhkcpZmpESrHMmR8/d6xcQJqKfsLpUHUaLyVrh7pdvJ2fhw58XI4lEqBh/oVdqpIsQaEOSkMcWcCOhStdijcOaFuUasnFEZUBbGS4quS4Zjc0Kk4UWm8hrvsxaIKtk5fcY1aCSJUdPzHEN5vUhPU8IdVioFF9JHHIHalsN1cTuzeRSH/IqilvhnKyQVfcZZzKPr/b4+cgwzEzJkbUV3YqqkI6syTRlsBizIhf77IqpywxDKmZtNNpGYTsl8cvS2pWErKWDSVESei73OkknpBVYq7AGme2kJLbbMdI6R9u0uIJNxyjD6bgsZB3YbjSbFhqTsWg0lqZxoAL3h9cMwwzKys+ESNO0pfrXNG3Lbrdjv2m5vuy52MmM4fH1JX1j6duGru9xril4+AmX98XWOARhtbjCJDJW4L1Q5kKuUE4hnypqKHTbvEJsNUBYaws+L1TeeZ55/Pjxel6stUzTJIt6QkCVAXLVPGilCn08rDON4D05v1lAwCm41mIilWRx/uc1gciEpyAA6lwXoflqkUNOuKYpsKl+I3HY0l2oXHzCosynalKoXU+Fo776e2py00XbEHMkZPEPIoOfZ4jyRHdNg58XlNFiiaITbWtwVqzuN90GskFlK+7B+ALjBsZxZBhHZr/w5Zdfcnl5yZOnT+l213SbvXRx9/dcXl6KdgXpJNuuQ9mOkKUbsFb0KELXt6tuRWuFSr7Aa6fPbrQukUmKZ61/vdXF104K/6t//t9irWTiaZoZx4llXlZ4YppGMqIbMEnYMdFpopeHVydVqCcSHGqArxh3qWPXbiGoiMlacO4IKkHUcog0mahAperek8kqFTgDgVRKMpJ20bOMB4bDLSlMGCOUVgk8FpM1KskuA/n+U9WlVF4HW1kpCfW6LoqRxFA1O4kMql3tJLQpug3bAIZsHbkM7VTRFugsCSEoKHRkokL2KKAKBVLaXBENGbJqyTGL1qEMmc+/zrsAgKp+/Q91FIK9pTVAr1UqpwddSeYRfn7psnKO6JywKrPrHZfbjnGceX0Qh9uMKYlBKMM6J5GKaUXIkr3LGEQgLxJ1aI6CbEQwpkoHRdaopIpQEsGpSWV+wAqN5RipSTmXGUAqVWiOUrk2ztGUgXksIreuU7RN5PFVz/VFT6stOmtiWMh5wbrMNHm+fHXLMEyElBmmpSQ4qbRTBpUCndNs2obdtuXR5Z5HV5c8efSIxzfXbDcbjNWE6PHei/VDpZMuCyGENai7xq3BtP55SkJfdE2D0RqrzTqKWcurksTbVuYYPix0XbtSQmtQbNv21HE4x+zn9XtyTqVb8Os5mKYZ57oV7jrpGPJZMEoCi+Zc3IRrIqBoSwRySmQpAkuHUd/H+fBaPofBWbfOBkyJQwqBjZy1WFX3kBiZt6hqjZ7O3mcVTKo1SQhz6ZQoMJmqgTW6eGzFhJ+XYsqoiFm07xCEPGABZLjfuh6FzF7bFpTK7HZ7oQPHgGsajkdZXmWMZQqJXHzIHj1+LI+iUjTWCc226zBND0ZcHRSaUPyyjBVvOXGBTqh8uieVMSa2MH591ve7X69o/treRyFINzAMA977v9WiiHrQoFPCZoTqlmUBjFIapROxltUodC6KV6QKXw9yFrMtdEZnSQGU2ylD3hLodZbXyHLhKU6XtWI0qSBTWjZ6KS0DL+McsoegVEMhCu+6ZGajBJdGGZktlLY3pEjWVY2r1uGgUaLAlapXYWkFIilJJxtD0lo2p6HpbCfUOiXgus4yvDuNWioNLROKoZzOuoj1LLJXIJOShxzXgH9+L776JUH9xM46/yo1vPyvOn3/ej/OkwhvJhtJx/JgzbPHXFxgm4w2i/gPKXlto8ya9CulwFpdoCjxHarOpVoXaCQnkpGhYGsbckjMoy+D+TJhUlpmFNoCgl/LzMEQgi++R5LMYvCryZ7VMpuZ/Yizlm3fcXGx48mTS95/94YnNz03+46LfktrG+HcK09KE9Ev7Ld7Zh/44sUrDtPCEsEnAceMcTKDMQpnDE7DftNjtcJpzXg4lICqcW2DLUKnaZp4+fKlDGKBcRyZ55nNRlYvtm1L27biH2UN0zSR0rxixpT5gi3qXmMcIXiWxROKt9U0TcQYadv2b50VpRTzMrP4mXmei1r6TahGa82m7wkx03Xd2nlM01Tg5HYN5MR0CuwgtiBn1XmMYj6ZtKiba+CqUEftMGKSIk1yWw14p/edvBAqdFkgZQpMVrucStPW1tK2AskoYBjFUoekxcQTiiAyE5PoDLIxjOMk3YiSAtJaS/Zh3XeeUiAUthwYxlk0QcO0oI8eoxWvXt+jjSnGfzIX2Gw2YuESPbv9psCZC+NxWP2i7l7f0m96Li6u2O2vuLi6QiuLaVpU1vhpECjSluG9qyoZKYpArHGsLT5kFRr/NV9fOym8ePFyXbxujCqDxhJOcl6hGpUVSUk1aLOGFFFJkyTUCnNQaVQSvB0ldMgEmMo+CAmCUM0qq0BcCArEkWS4nOoAOosYTKOkQ8inxBGTDDeVVrRNi/dl6YxGKtUKA0k9uv7/jBZFMdJ+6SxB2mpV3Ai0HBRMGQqKipLVjkIq76xkZwFGNqnllAUu0gq00PBWEmLB0yTRKKKSytqU95UxxAw5LtK9lH0VX+0U6teauNNp6HgawFdxEpzjZPWhrbAG9f6mJJ5GprI+KgNE3ndIgfvjQNuJD9Gx7FJO5fMJ46m+H1DGogtkVwfhqtolyLEQZ9MY0DHj0FiKo2mplCQMRHTpEle7jyxnyzqNMU2hdvYyvLdW6JJJRGdkwVoNnunwmtuXC73Zs2+uCQZ2jcEW0ZLWChUVOQxstx0fvPUdsnG8ujswh4h1HbMXv60QpUpVmdVmRCMLiDTQOYcxGmsb5nnm+fPn3N/fi0hNqdXCYVl86Z40Tekanjx5ilII5tyK/sd7z7QsLIcD2+2OTknSzWVmkYqlgFKKw+FA27bM80zf9xIIChMohUjfdKVQETvvei6MMex2e5RpOByOYs/cdesAPJTdKCAJRJJLWhlIMiM5YzIB2BOFup6988rWJCkM53nGmBPjSDoKKTbEPyvhnMGHgFLLmpBAtC3zsjCMmqYZaZqGruu4vLom58wwHNduTMgAog/KSZhzItgt1zEIYiCPqsFaUzq3jDKneaQ1Fp2bU3xMmWlKTJMMhx/uhVSRCUzjTNMUtbYV7UrrHBebDUoJdHh4/QWvX3wGaC6ubrC2wbU9zrUopYkoYqHZOudkrmRA4QlBirSu/XouqV8bPvq7//n//tRiVYxK1eqxRIasJCDrREbhEvRpACwxm4KRi3hGo9ClQlYFY5aLKmv7SElcBinYUbnYShsZXCdYlAyIxaCCImlQAvXoJFhMkgSUoycuImASjImSFLTg1Zy406qC+YighpRJSrOUvsUqVRgm0vmU1CBVvFGrS6tg9sgCEgwGh0ITTRQYDU1MYs+hkXZbGxmuUbHAlOo7I2VZumOiZ84NR9Oh8H+rwv/KLSanU5VWv+/N782y4cyeuOZ1AFoxbqUEz1ZGmAwxqdKKSfTWOWA1PH1yTQgTdw9HYtKkJL5URtsV5hDPgTI0U5RBqryWKUFC2gkRvFmg1YrOlv0R2rDExBwi8zyS4oIi4Zwo0J0VuEGcPy2Na9hueozWbLcboZG2VgzhcsJZw3bb0jaKy4uGTZvZ9ZZGWZxp0ErhWkOMA2F8QCtRpaYk/jZX1zegFPcPB4ZxxLiWuQjCRAgobX/0gRQDw/FI21iUOYnQvPccDgeWZWGapjJ0hraVinyapnI+5R5eXl7y7rvvrhBnDcrDMGCtZX8hBnMpp+J9VFlz6g0FfB061/tr1GmYHULEe7GsH4ZBxE/bHa5t2e7EPsJ7z+vXr1eoonYPpgzSV6GhFl1MFf9pLRV6pUHXuco6mOb0Xind83mykM9w0rDASXRX9QvVTLHCUXUWUb8vpUTTNKtljbWWvu9ISWC8ek3XOUkhZGhzglE1rN2QKuiH0qUAKjOu0/utZ73azSRQgZiCPAdKieFh6UyMVrIi1UDTmGIUKc7T0kGdaMohZ8xGRHu73Z7NdgNKk6I8v7YkG908+7Wx/msnhd//L/6bMgwsOHOphKsXvcAcRoYgWiT077aW//q33wHriG2HVlZweG0gJ+Iy4VLChYwpeGIozCGdC7aXFN4oolGYkLApCzMHgX/IUeYB2rIYR7QO7RwEj1omVJIqTeVIjgHFidePEgiCLAKXshFAuhc0UWuycSjXElUxIFsWmiiMEgokplCYXLj/CDNAdhCUQ4SGbPFa4xtL1Jk2BJwXim5QGZMEG561JrdOZh1KqszgJ5qYIGjQIpH/t1+M/L8/+hJf7kMN9ueBo9xiUiydwtms4I0hvYKclqI4/tvso5WTX1ryiCJl4UsrLVoSZ4AU2O9arvZiQX44LmQcZBkSO2MxRQ0vI+oysF9pqEmSYnmAXevY7Xr2XUNvoXcW18gehTlGphAYhwG/jJAjTWtwTpXdx1oeSmVoXCv89QJRLfPI4XCPyuJttOlaHj265snjS9579xHXVx27vqExYswWYkbbjDbS7vtF8OUKyBkFXddwc3PDPE88PNyz+EjIhpgN0yI4Zgyi8g1eguZxPK4P7PnA83yGUBlXlTVUK/aKvy/LDBoRXZXkfXl5+cZZ8N4LlGWEZlqD7vosZ4GDUkz45TRQPhdB1kp+nmeGaSIhQrS3336b3X6P955lWRiOR45H+VzzsqxoZU086/yiij69X+93TQrnnaqztiTgtKqoU87FnkUCXkxJ6LSczm9NCEK5bdeOZVXOr2wqSSZd160Fr7MnuM654lxc3pM4NyyyRbIiG6v6vyaN8vpLXOcVbwTd0mnklPA5lqQg3XQKZQZWPo81VtyQjRheVqKCLUr3tVtUGlXsSc67royIB4WyG3n2/u/+2lj/teEjVnUthVKagFScLAtarfOaKHTSvLO/5J/86Ae033yf7gc/QLUbqMBTDoT7Vxx+/kvUp5+xHY6YHIk6ihgnK3JumHVL+PB9Lr77bZaf/Aw++hiT635MjUETlGG5uUF94wP6995FtR2EiP/8C5ZPPkI//xUmRqF45lPXkYt1Q654ZCrtvtKMtiU8fUb3wQe0z56hmoY8j0yffc7yyS/oXn2JjV4EVkr6BbIqtFZ5zaglQdgksNKhsfCt99l/+F3mv/4YfvYzLDORBascU3eB+vaH9N/4BmqzLTtqIuH+FcePP0L//DNaf0CRuNgd+MNPPuc+nc8JagVynufViiV+lZlUDyhIdZNTKnxmERJV62vhREeyyjhjVhPEMhoWqmNcMFpxHI5suo5N34g3j5e9DVWQI6sfqzvpeiPWruPk7wQxKMbjkTg88FBEZ9aJ7H9JCSFIJnabjpvrCy6udsI4KlXZ4e7A/d2Bh+FIimWYLo02OUeckbNIBq08Ki2Mx3t0HrBc4HYiHjKNxodFiBLaYWyPjoEUFpJf0FrM3p5/8QVtY7m53LEsM8fRM/qAaizzIpvIjsdB8OpiQdF2nVSw5X5Um+lpmri8vOR4HMoDbklJOoa2aaVjKne36xqaxrDZbgkhyIraattsxcdpPA6oAu3URFSD8Sn5gzUNIYho0BpN8PK8xOBLkmjoOpi9vMePP/6YlBJ939M0DRcXFzx79oxEZphGhuNATJF+s2Fe5nXI/NUCpsJUtYpfloVxnBjTQN+2suOjdjhZClHbNOScCCnj8okpVxlIwiyb18+8FjdncJW1QtOdyga3ajfy8PCA1pquE/WztZau7+k2G3ZmL7OEwsoKPkK1eK/GoGSSksIwl5kpao1+8j1GYbAYJTqKFGVpmFYGlMwDtNb4qNCtJTIzBpjiwjJNxf5evqdxlq5raVxT1Nvl/zeORixxCZzHhf/w19dOCkl5yMUiIRfIRUk7qJUY4SXkMCltcFrR60ibI+3FFnd1QZ4m8jjKUNZousc3dDdvMe6vSH/2r+mGQRaXqwjKspgN9r1vcPGf/BC96zBv3xE/+WvsLC6fSkHOhrDbsf+d30a/9RSWhbgMMtj51jfonzxi+Bf/A+75rzCp4vd+nfKDKZvCAigPSRONwVzccPG7v4e63pGmI8xHjHXsv/cd4qPHLH/43+NuDyiVyNU2IpfOqZwAWZoj7JmIwe127L7/A/T+An3xAvKCSQNZJaJu2P7wR9jvfgjJk6aDzAKMpn16RXvzd1lyIv/sL2lSpq0PB7VRU+SsZetdPkEyKWcC6bSIp8wtzho8iclalwMtrrWyMzqVga4kuZBzEdtJsqiKb1kIn6TyUYHjPLJzHTddx+vlgWAywUDQCp1AJ2ioDCZEa5FlhhJTRGkj1VMMuM2W1rV0tsNqgbmarsE0YgdgNfSNpW0cFlhGqeJzzlil2G9asA12s+HxzSVX24arfcuub0TPYC0+yPDVEYWEEAOu6ZiXQM4CxxitMdmU5TcBtIGmJVvHdDxitGVaFqbgOQwT+77lan/BZYL7w5ExBjyZrWs5HkdM24GWdZcxLsIuy6CT2FR3TUPwC5ut7BxoklSwN9eX655jXYJq1prjGJmPE+RM6xph3cQC0WqN1Rbn2lKBC+SwLH7tIL2XINo2Ym6ZUmEH5ZOd+OIDBIFPdpueDAQvw+z7u1t0Geg3TYNtWzabLfu3ruj6jtuXrxjGkWWZUSDeP1r2QscojshGu1Ut3naO3f6a4fCACl4YQItw8YMPWFdmIEk+o9Gsny+GQON6CYRB7HSUkW2CAtEU2rMSe5oKnaUYmWIo1GxdOjExWtTG8HB/wFqBHa01tJ3skbm+7IUsMIr2I64OvhNJyRxEoVYGlLjvqmIaGUVTU4ge4u0ls1lxyQUihYxhi6BPC5mn3FthGkVu7x6kOFdqdc91zq0kgM1m85tNCkongk8Y5WSIogzWKBGCqYBSiVRw+pgTOXm06ciklfM//OynhJ/8BKMUyVrs06f0P/pt+u99hzA94P/8zzA5oNB41+P+7u9iv/FhqXCqV08u8I8EqsVq1Pe+jX77CfH1a+Z/+2fEwy2qaXDf+yHtB9+k+eEP8XcvMdOMSuqkT1NpVUOjMlkJ3OCto/nRD9CXFyyffcb44z9FDwtsd7gf/Yj22Vvo73+X6Y9f0wVJUGSZEWSBEjFRF6sNyCazNC393/ld1P6KrCGlUKpUTdIQL3a0778Dfmb6s39D+uVn4oniWvrvfofmuz/AfvBt5l9+LIlVVR5WIGUtLIosZn+5cMTlkAm8k7M8HFIVq3WIqJWicQ1LlIfDaCP1f1TEFEqVI/9UbUet7lLprFBgijNuxHA4Llw92qOspjGGOXiyNUSVCGihDOckg32JbCgjbqxRKbE1IRGS52E4ckiJxsjS+KYxmHmm7Rv6TYfRhuM0cXv/QPSeeVlYprnsoc6EZUE3DW7bcfu64enlBvP+U7bNFU2/ZbPbIMWwRoVA47qVIr0sC0uW5TDaNRzuHnCNYrvt18o2o+j6jUAEi0dbS44LD+PCw/EFV7sdbz15xDCMjONCzAarYIonBlrwmazFydO1Mng2ytC0LUuQoWnIniV4XEnCVitsSSrTtNC6tpivCaxilS4GepYQAq1tVppiHQanlPF+WmcDxhhx++y6dcC50kxXXDxjlWFZTlRo5yxvv/UWSinGcZTqdl44xsz93R3OOTbbLbvNhv7mhucvnjMWK+zaLQ3DuEJHTdOtswXnGoyzUhVrmQksxUpdFasMA8zRl2dQlkWJkV+UucoqwhT9QvXzquw6GejLAFhrTaRYsJdhtdigzFSbjtA4YgyFFABiVy+Bt+97uiL+2253woKLiXmeWZaldGduhXfO349ztkCArISDlBKNtYJAWGG2xRixTVfIH0o6rxK7asuZgWlemObT53LO8cGvd7n4+klBo2idY9NtaZuOOpzMSSyIm9ZgGgWtxadAGic227YA1goVwY4L7XHAFkdFf3fLqBKb3/ufYb7xbYaPP6F/uEXFjO161PWWNNwzz4H+2VNUlCoTiq2yUqR+S/feW5AW5r/4c5rPfoFKM0ppxmWmubnEPnlEfPwIfvkrKKBDVppiqot0CaAwJKVwF48wT94iTQPzn/4J2y+fozP4h9cEEtxc495+l7y/Qr18AbrIhnLVLuRCn5F2cdQK893vYN95n3VIn6s4TeypsRaMIU0z4fMXbO9uURkm5/CfP6f58PtgWxQtiplIICk5QCdfGBnMGwuoSAwL0zKjjaNy+BW5bNrSbJwp5n4PYvimTVkuU72dYmnDi21EaberlF74BeX+KjlOxpiyPETTGE3bb3k4HGS/c8rEDCErAeJNWd6iToSCxso+gZwSsVRoVkHrHDkJM8RZWeh0nDzDNEmrHsVWxc+LwAextOzG0TtLZxWPLy94+9k115d7tn1P65wsw1HiReVsV9ZzivliDQjTOBIXL8EG8Qvy3tN3vcyfgiyaapsWgHFOWC1B6fXDgdkH+q7n5uaah4cB7y1OaWafiFHhizuwci3jsCBrQxd2tpO5VanuU4xi6168sMQxNZCRnQTDOOIKBTOnxGazXZWuYqssQe14PK54eQ1Ktdp01q0wTJ09fFX9C2YVs53PnUCCmV8WXOOIXthNIUYOt69p25bb4cDN5QXPHj9iKFvK7u/vhS6MKo6rMp9onDjIhiVAEpX0pt9iG7eyspIuNhXqrFiJucw8ZZ+I0UI+USqc4NKSEM6Vz/XP6jynJv4KN2mtz4gX4H1c5y3TtDCOMw8PRyEmNGJhv9ls2O/3XF5ertfLF7+oZVnWGce5UPSUiCQpWyPWF8YUtWdKcp/KjhlWncmb4XydwZxpRL7O19dOCr1zNE1XVjcKD1abjGsaNtuO7bZnt+2IKvMwDhx5Le2dxAwgY3MRWMjadJoQ8J/8jPTtD9H7G9TNNTzcSrx5uGP+H/+QhMM/fUr/5Aqpy+rQRsmwsu1EKfxwIL/8EhO9sIpSwB7vCbcvce9/k7S7IPEZBqFT1qRAhnUQEBXRKuJmKwOeL7+Aw0t0FoaPyZH46nPy7R1qd4lqdiT1ZRkoq8LAkko4a8HSvTLw1rs0v/VbJL9w/OIlF+++jUauBYiKOR5G8uGAvrrBff9HzL/4BMJM2Gzov/NbYBLp9iUsXgyyjKLftCxZlmhYZcq9yYQwk/NC23b0fcvFdsOu27DZ9NIJpCSt7vHIUrDhYZkZxlkgBWwR6xV8VmUJBIhTqOxerotkQPYuyN3JORO14dXtkettjzai3E6x2qgbotLEtAi2jkElVozfaCcJysJS9j4QE8skGoEQFu7vkliMG1FCN01ZHak0TdfIfSjWytoYLncdj6873nn2mHffesTjqx2bTuwKbNMSY+Y4TbSNk3WKqXj9WCu4vHOExZdKTx7srusIITIMQ8Gm3aqxcE0nlOcge6aH2ZOy4ngcuLq84q2nj3j96hbtPZ5I6yzeGI7TjFHiJLzZbhmmCePAL/M6d4hRtqclH1mCBLKkFKG4hsaUhG2XEi9fvypbzKw44SZRSm+3W7z36z6AeZ5p21ZW3ZZhcg2cddC8egpxtiGvBLkY49ppVPYTKclMYp6EmWQt01Hgl+HhnqZpCDnjuo63nj3jvfc/IIbAcBTrjcNhYJ5mDocBSHR9R1KZYR7X4JnJqJL8tbJQGDshi+BOIeaStZo3xq7Li86JFDU5ng+h81lnlJJU+nWQbwpSIuZ+ubjNRvxSzCK1Qk+elD3jOHF7e1t+t1s1J33fs9vtCoQnMNo0zSs7qyailGSzXaJASErsLXwU/ynnLK3pBXb6ysygigIr66pet99YUmispnUKlJiJbXci+Lm82rPdbtFaMQ0Hbm8f8PczcYgEFSWRKWH0JHUasKgssdjNE/Hll5jrJ3RPnxA//ZkMhZOmvzvglSI9vix5QK9qw/W/dztoOgnUhe2gkvjq6ADp9gDfsJjLG1Jp0cGeKGQF2659VzSadHUllMmHW3L0JK1FDKcSJkzkhwf01RO42BNfaHTK6KRKcyCfU2anmWV3QffD30HZjocf/yVJWXjnHVKhoNWfUcuBh4/+iovf/X3a7/wWfOtDSDM4US3G4Q7/s5/gwgxGobG0tsdFsUYmVdtgCZK77Yabmwturvc8vd5ysduy6XrIggNP48DxcOQ4DNze3nKcZ2bvhSVkW6m6l8Q8LwzHEe8jPidC1sX2PKEQNpkwL2Q+EPNC1JHFBPK+w5hM7zLhOEoxoSFpMbJTyoiavCQbhSYuMzHJfoq2s+x7SdAX/QXbTYczmpCCaFtyZImhKJolQIpSWVr57WYrQbCzbHvF9eWOq92GvnUYJSylZRpJWZG8Z07CfqkGT845kQGdtfkCuZz+W7Z/yXB+nsWszrWdiB2j+OCLHYlUR3f39xj1wG7TcrnZMM4z9w9HsjJc7luOU2AOicyCc3KObOPEpl2bIkoT6GSpAZHMPAwrLFitLXa7nVS26wCUtSJu23YdvtYg2XVdSfjqjWBY6aYr7KR5QwBXu8caZEMIqBRJGoKXIm6ZR2HklPWgMSyYtmMchxVyahpZLvTuu+/RNC0P9/e8fPUlh+Gw0lEBDkVXsEIvyqAQn7IaeBsnXdupOpbAa0x6g3Bxvhfiqwnhq95f9bMVtJh5XlZWU702qswjYpTYNwzjSnfVWpL74XBcr5nsRG/Ybrfs9xLn5nlmHMeVEEDOKwOr/u5UGE8xnhFHvqJNO2k1WO/91/n6j2AfeeaxsBq0wowB8IzzgZwSwzhyPAwcHxZUtpgUmZ/uC+YsGSArIX6aBJAhGjCZ+eGBJlO2DwnfXyVZQ6itwDooU9hCMkCVAW7EbDdgGtKUMbEMevPJGiEOM2SN7bdEKzRFqgdbVmSd63+UTOsw2z1YQ1oWXDLoJKZ+OiswstKwd6B3ZSVpLlCKSqASsVjXLral/f73MY9v8J99SvzpX2G/9V1YeTMZjdB3VdaE+wNpWdCmEyhIW5l5oAjHiTDOdMghiDlzeBiYkJWWqTAustXkBlKjmYcJv3FEj1iCZOi7jtwYOtfSN5mbq463n14SlajSrXG0Xc88eXJSPDwMfP6r59zfH3gYR24fjhwOnpgU0XuSkvsVQ116k4TlkCeWJbFpFbs2sDwcUGZDUAhlWceCexsarTFkGgvbrmO3veDp4xuePLnk6aNr9tstj6+vaazw6mMM+BiYlplxmQhnwXwcR0kMrmHT9zTOyRCy0WiVcUbhjBYtZIwkFIsvdgBBqmfZeFeqrAIv2AJPCKSQywBSoL+27csDLNQ/7SyqVHJt06ByIiwzEPAx4lNgno9sW8XuYk+/f8Sr23sejgdaZ2kaS8iZyXsWL4pl28pc7XgYRMGtK24unYMpIirrHCElnDHMy0LbyO7wWNZzAiuEVC0qaoCf51no4rD6L50H3woZRX+CYc4DzblDKaQVfpFq1a7vr8IiUwir0tm5VhKfsRzuH1BKc3FxydMnT3lqnpJR3N/dcnd3R9/3qzBuGAYOh4EcFppGuh7RG/RrFyPvNROjR2sZPp8CtV4D9LIsb0A4tVI/D6z1GqV4goKAdSZxsuko6nqrTomydGHnO6tFhW55+fLl+jsrHXa73dJ1Ypnt55nDw4EYw5rQ62tW6mks1/v8qzLMzi1Jft3Xf8RMIbLZ9Gz3W66uL7l5dIMPntd393zx+QuOw8QSM0PMqBRpQiTFqk4tUTgXPW0WaAUtNSLFa0j2S+ry/bKvVGVdtAPiiVQphCvVMkMBLlBkkhLRm8lFuSzla4X45fcVBqSiID5KHDUVlQprIBl0MOio5b0YIDWQS8WS02qNnYFsiiqZJApu3cD7H2K//V3SPHP86U8xqkEbB1kTdUva7Ml+AB+I10+4/v0/QHUdy2c/J338kXQkjx9jvvMdmseP0X//H7D84f9AMx4xMeJSXK9tHd6qnCAk0gJpgewbljEwmETyM8voBPM0VraBWUt3uRebksLjD8FzuevJCToX2XZPGY4X3D4ceXX/wOvXt3gfIBtilGpsmReUll26WbckZbnYab7x9hUtiftXe+6HwKQ7vHUoBF7o24a+aWmd5Wq/49H1Jdu+48mTR1zuGxqraLRlt2khBppGg2pYvCIkTaAhZYEvrC68+piEDGG0dIZaEpGfZ7GEUImYEZfbDLKHWRJOFV5VaOTcK2gpTBVr7WphfTweVwiiVp05BlSdfWhDjgqfFGRT1qLKeR/TxOsvvmC/v+Dy+ordRebhcOTh4YgBts7itCHEWrJoGidFQExlKFoYK6pUksqI0n5aFhojsFEKgZwTTdsUewwRbQFrMlBK4awkmhpw6zrXc/jI+2W9FvXLlUAPsNls5Lr5SEqKtm3QtmzWM5aUIabCnNPgjCanjJ9GEWXhsUZgmbvXr4SpU3Z5X15ecH11TQiRaRwYjkemzZZH10ks7DMMw0DbtMzLjMmmWDxACB7XuCIGM39Lq1FhFmvt33J9PadxSwKFppG5jXPV+6l2FpVafXrddWgcZQd6LsQGgXFlT3RNCNYmjodR1n7ah3XOsdls2G63NG1LSpmm/PnxeMSUort4Rqzv96vw0W+8U/id3/4R+/2W7W6Lj567hwdevHjJ7e2BcQgcDgtRgY/FRE4FkhZ2iso12AtUkpUMZLOOKAztZg/KoIwiG1VcJ71sXDJmFYRJ15HKUFhk/HEeAQ+dIliNDQZRJEeSCqi+tLzDER0jdQmL4nQoFArKesiUIswDpIhpNwQkZ4l6MZCVptlsIWfiNEoQFm6sdDm5RaGYLq/Z/J3fgXaDsob93/s9uQ6Fith8+F3yu2/B/SuGP/9LzAffQm23xBefM//rP6K9v8OSCK8+Z371EvOf/he4J8+Ijx4Tf3nE2EzXRuzsZY5RDMB0Fpx1mQZezZHp+IIXLtI4u3KYxe9nQ9/3bHc7Hj26kQDdtxjbEuIk7p9kri8cu96Srre8r54yx4j3xU8fvbJExKzNCaZqHNlMODfx9k3Hk22H8pmPfvEC310w2xaTwBlZGyo7sJUwaqzgwlqBsVmCRojkNGGNRrGgtcEa6cpyikDEaLE6SDEK/GMysVia1NWTSglfa1mE9mq0o1oQhGUSMztVrT9OQ9OUkpxlVZW3AeeaMmOwHA4DXdcK1qxLEaGSVL8pM89eltjkIEuLUoEumg7d90xRMX15hzWaxzePuN5f8vLlSzKKwxQLbdGQtcL2nQybtTh/hhSZgscYaIvlRCwMo5QyS0ly1tlVQFZhia+KE5WWBe8A2+32FHSMWRPmsni6vl0ZM+fwiveeYRhEZGcs5ChWLEqKMV8x+lgHo+V5I2OMw3s5d4ufpeTTUuVGL8H01YsvpWtqHNvNhkdX18QYOR4HJr8wDANXl3t8CAyDKolKVnwuXqDr2km0bbsmMpDuYZqmtXKv3UEN6JWhVFlZp8VH1S3ACCV8vZ6iYq8Os/UaVYuY+jvqNazalPrn9V41TSNeV/PM69vXaGPo+142R3Z9MeGkWJiUaFa6uPMO55wM8Ou+vnZSeHi45cWLz1j8Aloxzgs+ZJYFlO7o2o1I4lvZT8ASyUbM5li1nxQsXYaL6ER0mu7JE/CR8OIFJspcIIlNKDpZTHKQzFqV17JfqwT39zDO6N2eYBsUEyqJsC27Fn11BTkR7+6wUUz0kq5Le2SWkCovOUvHEF5/Aenb5F1LaDRmKbZ8JhJsh9vuyD4S7h7oStdAFu8jlTWohSbOcP+SbA0khWZDtsKXRpXOwzWopse4HrPfg8qEjz/CPTzgStBtYiS+esH8+a/ov/EBdrcno7m46Pk7f+db3EVkX60qMIfRWAPOaTKBvrNsuhZnJSGoclgaY4uwydJ3sgOA7AnzSGNhPN5KG9u0GC2D5qbrBRvntAkrFFm+0pm2adFYUtZkM5HyLfvG8+TC4JLG6cfc5YbQbtBR01iDK9zt4BdS8GXJeaHyZkVIkuR9iqJJKQFNGS0VYJL1mKFAPcEX24+oyjrCDd4PhDTTuJaYE6ZYLMccCUEShzaaVCAwUzylzlvuuilMhrN9EWxt1qpynqXia5pGrmXpXHPKksScYwoJn8DpBmVaFp3JMcmODG1YlsAvf/E5u77lendBiJGQBrq2xcfINIfC/AMpOhXKw3azYV5GjscDlDkKZXNX1zbEsnxnxcXLYLgqpGuHU4Pgsiwcj8f181flsVKqzB3iKuwax3Htps6/zzhHyJl5mEAhASxLMgaxjPA+EFWltsr5DcGXpKwgB7quo3XuBPlozTKMECL3t3fiE9R1uK7h8bOnjMcDd7d3NM3FOhz23hdhrWWaTqyyCqvUz/XVWcK5Orhem+pUC6Lr0IVeft5N1OreaItShqZpy2cLaxcW4ykht+1JdX0ONa1D/0VwEOkmxKRPa4NzFuek4xN7F9ZEUzuPmhC+qqr+n/r62klhme7lIXZdMXHTqN6RsmJeAt4ptHGM/iimX1bTt3XITKmki/1xgY8mZVHf+A7q8op0OMCXr7C5OiPJ4ZHNZ6ZATBVqKnRSFTHTQl4CXOwxj54QjkdcSiRlCbsL2qsrMeU7DqJRyOlEB80U59NiuawL9DUMYjr3+Jq8v4KXL8h4glLkq8eoi0vy4mGayWTQGq8bFmNpfKCNYO5fM/6L/57Q7QQCy5pgFOZbH7D/4Q8ZPvkZy1//hHY4ijU435EDZaQizKkmzkyuA6Pq15Th2aNL/rP33uaIWrsfGd+ILXiMshZ0OB642O6xVnyXjodDOUQWo8XML4VIiAspL4yHgaaxbLcbUspiVYAI2UyYsc6UQyYD2NaBMZmUgpiFadmloEg45UjDSLAzrTHcbFsOr4/SoyVDyg3ZtOiMJCmlgMQSPVGLX3z0sjLTGouP0gmYnLE4fIho5ZiK+rVt2mJQBkqV5SMxE6LC2k6quSjWAzFEut6Roi/J1KF1Zp7mggOLIK+yfqoBWwZJQMYwTKNsI/NLoacqpmXGB4Hy2l7mYMYIVLDf7UipZ5lnxmkEY9j0O2ZGYgxy7a1lCTNz1Ox3W57dbFmmicM0kqInhYnGOMYUZU+6U8QslsymVP/DOEDKEjSM0FF9SrimZT4eaRqHT4k4TxilsUYq9qxl/3Pdo7y6mS7LysOXICMK9xQLi7BppCBE9hvEEBjnGeMcXS84vw8Vbqr7DmTm4awpHc8sNFsrjJqK8R+HgaWIB9uuO3lAaU2OkXES48VpmVG3tzRNy9OnzyClskhIjPuuLi6ZFkmqtatZvMwQcqG7ImEKW4RstQuuXdQ6nHZWus1aTGopL+tOi5TBL56uleKjaRpSTmXPjAzFlVb4xcvzlyLLcq6IBqutQNEIa8qHxDTNQk7RGqM1ozoN1rXKGJ0LBGUK20qYUHIvWVXwv7GkcHV9AUrTuA5jWlJWaGcZhoGxMAvmZWazvWG3adl1Dd/fdRiNtPgGfGsJmx2WKDDRs3fY/vB3UED4xUeo433hvBeuf0p4nYllMEmhsgp2kcWffxhZPv+c5mZP94PvMs0zy8M9ue2w3/sQc7ln+eI54fa1ME6U4O9kTS4sIZ2L32YOWG2ZX9/B81fY99+l+dFvM/74L4nTATY7uh/9Drpt8D//OflwT9KasL/A/c7fw232zH/6J8RPf4HJnm4aidOMKR3eoi1hfipV3Dhibl/ThpmgLenVS9R772K+8yHzccC/ukWnQHKg336P9u23yPOCv7+jydBoxVVvaKBUIWe7mrVmWcSb5rK5JCwL80EYHK3r2O52pJg4Ho/MixieRSIpR7atVL+H24H9xf5khmcthEonzmUXbun+ishJWCyBFD0EsGjSoLkdH7h+7ylGwU5njn4mmw0pBKYhYZShVZp5nGRhidg7kpcgK1GTqOatLs5VKZJ9Jnsvwihj6bY7SZxZFqWrErwJgW3Tk3Mkh0yrNCkmtq4VKmwC10iV5kOk1dW4EUmkBRpMUbQHWoFxBr8stIUSK3b2Ut1ZrZinmbZpiOX9Wa2Y5qlcO+icxqmW6AM2hpUqanf7dTvbMEfuH16wawxPri5468k1x3nk5Zev0Faos8yyz0LpVrpFXZyDC6Xx/v6BvhcV63a3Z5gXbCcdfddtiNGTo9A3tYIlzFhbOpsyN6nwSQ2kAMs8sS2WGhf7PYfDYXXgjCUwamdJwDAcSwexvFF1S4cp2/RU2RQXQ0Qbw3EY13lGrZ7neSaWyl2cTQcR9ZXqu0JcYfG8+uL5OlxXKB5fP+I4HNHIDnaFVP1NEAjQF9GcX5ai2wkrnfncjwpAG8OyiCBNjO8ExjPGMMd5rci1zni/lA4qk6KosI3t1o7CmNOwvnYIdTGR0GiFjBIX6ZrrrAul8CFgtGFOs9Dno5cZYdnSd77atGkacs4rQ+o3lhSSFQe+JVvSLAOMeBxpGsujm8c8efqIt956wrZr0Cpj8bwdZ7h9Ldx4lWm++wHNu88EQ7IKs92Cssw/+5jwN39Nm0MJ9qcpsGxmipAnUEF2H8gEGZU1LizMP/4r8qML7NMnbP/gDwjjgmksatuSxyPLj/8SOxxICCVSR4EnYtlnAGLCpzDkBG2MLH/2V9jLC9wH76AfPSEuAdNZdN8Rb18y/+Qv6MIIGJp3v4F5/33QjvbDb5G+/ALmRQbXFKZYymQtw1xyFJl6knmGyZH080+J730D++iK/T/4XxCGAXQArbGbHWTF8otP4cULFIk2BB4dJ7aUXco5r66Uqi42PwzFCiCgnGVOEb+MtAcR9lymQESq/pgU2jSEOZwq38O0UvWYA8bKTuVaQeaccdatMIPWsvjdaWRxiDYY1WJixtzK/uPHCdLr15gmygIeLSrqjW0lOadI8gs+SwBtXFrVyWKLUDBbWIOEWAOUIV8ueywydCEA4hEDpY02otOIMRVGTESpoQj/zrj3NTBoUTcbbZBdvIE8nJSxSskMLJX95TFGLhTEYSqBqqVxjm1KqHFZh7Y1GORxppsrVh1o24bjMNP3WqrHecbPnuVLuLjac7W7wi+RNmZ0q7hNiTEFclbEeUErQ+s6Qkgsi2Kz6cgZ7u4GXGuZhgnnim4hymKY3XbDNIyyEU7pVXkrTqkSdOuMQWCRzYmJFGPpFPyKgc8l0NrGreroOuCug+sQwiqUq/OJir1X5pD3HlOS0blwrCqvhQp/+vvj8XhWnJzopofDgbZrMU6MJutwvcmWrm9WJfU8z2TUSgetZ2w9O1oXoaXQyVMJ7jkl1Nnwug7uuwIJjdO4npfzoX1NehWirMuMvPeM41iSiy7Qn/iNpTLTs66sOs7gs7hKh1zHm/mN61Cvz29cvPanP/6pSLibXhSgEle5vr7kww/fwlrFR598hI4B5Rcu8Tx9fEPrGszdCPcDurWonSniokx69ZLh57/AfPwL2uEoqympy3eE6mhQ5PsH8hfPic9foBOF1U7ZZQDueGD6V3+C/vY3ad57H7trIQXiz3+B/+RjzK8+o8lBSKAprR/bpNJ1gAQFJfCAjhHz8gse/s2/ovnmB7SP30X3FsLE8jcfs3z8Me5wiyKhoiF++jn52aeo3Ybhk5/RxIWyXlo2zpUk5nJCvXxJevEF8dULbDV/SxF794rp//tHqA8+oHv3Pex2I0P1DOnVLdMnn6B+/hFuGcg6s/Uzm8WvwsD/4JcqjCjqAF9BgYTKqSsDfCXkZ62gbSjWj6KWLJQ3FQpbrPx0RsFKg6vCmLySbXOR3aus0A8HyJ6t1lxnB9NM4sRg0adXpGzDoG65yLVIWD9QOf2VgKCqydj6FgoZof65UCTl71T52fL3awFSzsD6H5yuC6xEgvV6Z87eU/21vkypyu/NEdSy3h4516f3l04X8uylyjU5HOUzqIxKi2h8no8oMk4pPlCaowvoreGVkg4wKEVIGWUt8zhydXlNyobFB66urpimI0ppmQMslrZ19Jsdr2/v2G025EyxZThtSjuvMCuUUugzb9A3u64ry5ZmmrbFtk52GwDzNEnnVOc+IYigrg6ec1oD83nlb4xh8eJ7VFfFrvMPpZjmeQ1+J0WvWrUVGdHs9J3YkvgUifNYoDC3FjddV5Na+Sxtuw59xZhvFPuQ1Y7CFYdm8U/SJdG50t2YEoBP84P4xvyiUkrra9U/r0ni3OW0zkVyPOll+k4YSDGEsru6dNc5ymzsbDZR7+V50v11X1/bOvu/+uf/Jxm0zJ7j4UgKgU3Xsukbjoc7/DLRdoar7ZatUfz+2zf8sx/8Fts4k+lYtjtiK9JvnTM6JuJyRKUZncAkjYpZWKm1bk8gqlnF0jSQpUIGcVPNGDHgA1CGqC3ZNei2JSaPGicaL3bZmVQ6EEUsNgFVL5FVWYhTLCc0kHVi0YA2mGYDTUNcJtKyYFJCp4TOGrKWNYyNJuqMDQkdA1kLNdVEC0rsxBWicFbWQvRoiq03ETBEZQjaoG2Haltxys6ZOB5xfsGlyGpQlzOGsq9BnUWnNWByCmSolSG13vivRDSxacprrHvjUPx7AuDpVfL6ayUB1Gsq4fH8daovKgWuK9ly/Uzrq66R82u8hf+Jr8wpQCv11SrpzLowF0Zc+Rznn1+d/e9X39X5+6kP9r/v/f773nuGrwgxKUw9JDkVVlsmY4ovf1JV+AcmZoJRzBct45MdX0xHWWykHUq3eAyLF8DVe5kZLdOwbkcTFXODNcJGU1oxjUecOWkNzgVeq09PijIjyZnGCTOmBvRlWdYuKCGrYysGXge+KaU1KNaquL72OVRVA2MIgWU+fU/tVmp3UgfEFYLZbrdrRV6H35IgMk3boFQuZnr6bO/0iYqak5ZtdWX1Z2UGVQpurbyBtVOs3WWFs84ZS/XanQf4c6pqvRfOuZXOe84eEkM/saXx3gvUlU424ufrNpU+daHn3Uf93hAC/+x/83/m13197U4hLhMPt6+IIbDpNnS7nvvb1zzczjROs9/1PL655On1NU+vLvj+zZYuB6JOBO0Z08AyFdvkrGT4mjRJWXJrMaoFnzhMd9yP91ztrul68U7SOeNVAmVorEHnIhJTSnY1I+6bssx9Qc1B9GlWNm9phfi35gxKE9cnW6q5ivUK0GMEUlIyJFcoclwI4yR/ayBajYoGgyYbTbvpUSkyjZPgkFqVPQsWrWQPcFJl74NSsn7UGamFc0apRhhKZXhLGGAZSRhJLrLJY13eoQ3isY44o74RjM4YEEBZbCKDZ+vKqssz+lqKuTioCgOn+uI3jSyXqQ+6NVZEO9U1s75+Pu2ElYOpqV5I9W3J9juFIdAbhKqYpS8IWZGUET5/wWdzEiti2cVbqvmzZHcOv5x/1Ye4/v05N1uX1jYW7F2CTIGVolhEoOprFl7am42kHJezbXEVQviqd04NEjWQnXvorEZoSq8LWuqD+1VNBFoo2iZoGmWZ55nZT+wayz4rTErgA6SF/U6jtGFaPCEFwpLYbS/JaOYFhjHR7bdlKKqwRuFDkj0MSYaVrmlR+VRZVnhonueV3x9jJKQkehYSMQh8CWAaVwRUYl45zyOb7RZflN7iX5WJPhC8p+1aZPCcitYhnAXasoAIhTZWnsckpJXjcGRXEuZcdBN1mDpMArusFTcntfIwDFgng+HzPQ41YBstVfeuuImGEJjmGWc0m1Kdh+AJxaAzFpjRBy+kB4pDQUxiuX4mFquJrl7H86q9fl/tLGqyqPOAGCI5JdrW0XYNyzKvcw9U5uJyxzRNbyTHmijqvawU5K/z9fUHzVvDB2+/KzziaSYsgQ+/8Yzrqwsu9xsePbriYtux7Tq21vL0eA8vX4JqGJqe+/2e2TqSThz9jDVb0hyxBuYwo5Xj+d0X/PVf/4THz274xttbLjZbMWnLkZAC1jTYJPYEqjy1ISdilOzpg6fvOqw2+CSqYaUU2grGXA2kxEALYo5lWYoSD/Pq1ZRBZ4GycpIBow8LqIwtRm5qiThr2VzsePbBe7x+/orXz++IMUDwZC12F9rJ5rFlBoUhZFk2TxlOWkBliydjWsfkF3IS3r1rLWOIWN3gFxEgdX2DM4ocNCkrMejLwsaQmy4PpC6MF1AMwz0pei4uLmWOoSQwxFS2V4XM/fGBw/HAbreTQxpmdrsdQ1wIc+DRo0dCo8tgtHD5azVX/xEooDim5kjjFDFnZl949Cry7bcuMHHEB8Prh4moG5Zscd1WVrIaES8uy0zXygPQtM3qXW+dxVm3VlR+VXWKYK0mtLXSL0WDL/sAhKPfUK0pFKJGVVoRouwfUErsUFZ1bqmKawUv6x+haVumaRQcvkAZIi6Kb8xdvGeFB+ReyRxFJaEjjuNYNBHCRmnbluEon23xERotwaBNXG8vyHcPbA8elcHHhcEr2LTsLze8d3nD69sHXt3eE/wDxji2raExjofjIs69iBOt1nINUJkQA0oZbLmP591B27YrrKO1RhlxKgVW9st5pyBCQs12uwUoRAH5vdpJR9B1HUtY3uDqm8KCOjFmxOgwa7UmzGoNsZTBdVX2nls6VDM7Xwb9p+GvdE05TVJs6ojRNWFbsJkYF1GvC/6IK0UfWYJ2iLLDwPvEMs/4DI21bFphRmljCTESQxQ/KnXSMpzU3rzxues1rN1UPTcVwnLWMRb9R2VEnZhgJ53JOexUE0OFkWoS/I0mhf/L/+Gfsyye17d3kA27nbA9rDMs00BKkd4pVMrk4ElkkbDHjFdwbxQHncAmlmK/rExPax2Hwz1/9pd/zi9++Qv+/u/9Dm+/9RTdNhxTEmjbWMGnkyL7CFaGooTI7EVUohtDc7kjKI1JmVQoa1FLFapQqCgVrdeq+A45ohXPHZUVWmW0zixBvO9RCpONwB62LdVtpLEWpSXJbHPm3/3Jv6VpNvhZDAJVY9BOMSwDmYg2Fr3Z4EzHskSm6R5jMsYZOmsJcyagmXMi6wbbNeS8EHIgWk1avLyuzgwE8uRxrsW2LfPi8T7QOot2ulQ9k9BDU8JYzZA9rjEsaSk4uqJrOw6HygwJuN2O7vKCrDWH8vBn59DX1xhgaJq1y/MlADSNW/1+6oETHroh+YnWGFmI4xyZQKcCL9XCo73FBEX0kZGIVw2jEvqxU9BuWtLG8BAic1L0jQzJkxax26QUIYiadFFptUc21qCaDaE4Z4Zix9C2jmmaMVbRbnvmEABTzOUMc1K4xjJ5T9OU1YoYYt1+pTVLCaA+ZGzTkmJicZqoOmHb9C2T95jtlnEaOR6OXF5eClRQHvSpDMFTSjw8iKV0ZzIhR8bbA0+fPWUcRkwKpNbSX25pYiJlzYLBN4rnfuaxzqugMhvNHDP3r48SuJfM1eUl++2Wh7t7PvvlZ0DG9pdserfi4hnFmKVDmhePDx6lWpbFr+3ROfOmKriVkoRQAzTlv21xx+3alqZtictS5gexbJvzMlgOQnHdbkU8ufhlDW4xRjabzQrPCJtK/q4qs4dxKCJJhdPNmhDmYhqY5owpu4qXYqWOkuTVmoYcEyGLJTdZ4b3obZQyEGSrYS4dqStCz3EY2Gw27HZbgYBCxJpGRIEleR4OB6Zx4tGjR3If/ELXdisJpEJstUutX7Ubq0WOUEpP9hr1ea4+aY0V88d6HUMUhp41sp/6XBR33inX5PAbTQpquUP5wNs3GzKGZV7IWZEDaF1oiFl23m66BnUUvl/WGa0iUUU8AZsVOii0FQrc8199wZ/88R+zu9rzj//xf8blrqO3hhADqFgGjUj7mIFGaGFEuBseGI5HLnd7mq4TnNaLAZhqHcoasdyNAYMwjVIKBJUwMWNTWW2oiwVwTOhFWljdyCxAaQpVzonhXFNavZjJ2ZCyY7+/FtWlFTU3BaKx2goOHMXl8DAeISkOD/dc7Hdkg1TiQSwGnBI2z7x4fJJs7xrLlEaG+Yi1js42UCT08zKJcZ2XLkIOsl3pfEJxC1zf3PD8+ec83e/o+g0xJsiK/cVFqV5FoxFSYBxGuq4pDCbhpAsmHMSICoPsHc54f9p0JQFDswTp2BoCaRaVu2k3YCV4vjoMXG939Nqz7x0vv7yn2XfF3dOQSCyLJ2l5zZ0rQ0AtiXv2Xnj0zjFOszwUTh6u5AMxJuHnN7KfOcUolXjrSqsfSmVf9lApQ7fZEoNHW3EBrYEspkjfdWgUIYhYKa0zEEVMmVgC1qZvkZWIhpTh4upKqLEpkZWi1Yb7+zvZTFGEby9fvcK5hn675fr6hhACu/3+9DDnSGYiLwmdLFvXMidZBZlzHcJrlmDx2mJ0y+df3HL76oHriz3X+x0/+u53GI4HPn/9AEZcX3MSz6fGGfFJchqlHVXgppVaAzMIG2i73WKMYZwn4fGnxFIYRwJjnOw/jNb0XU/0J/w7lZlOihFrDOMwkkiEHFYbbyky5jX5xCi7i5u2IRU8vt9sZPg7CUW14vZt24pVSxQh43EYZE7RuLWLyUmEszEEYix7RSp0M3mMtZhSGFZaaM7y72kaSwKQeVDXtsWHS8STbdkOV2ccm5w4DAMpvymSW4frBco5dy+t842aJE7is/zGbKIy4sTjywlLM59e65x9dP5av3FKavATSmlimMlZaFIpitLUGMN20+L9DDmwBOFbFnNqGchG+XBKdygU93cHfvbRJ3zy0Sd864Nv8p3f+pD9RU+Ks4iDCjREhhQheeEFowUbXAYPVnH9+KbACWCVVC7KKEICoy0grCMZOMpKRE3Bn3OGCDEBRpM1+KhwbScPT4wEBG5qdYPSsn0qKNDG0diOYZnF+VFpso6E7IWcUW9qlANgVOI4DaXd9RglwqyoNT5mxvEgQ+VwRDuR8oNmHmectfT7DrLBKMuSFhlwF173pgz8Uko0Tg7q4oXm2HQdKc48e/t9lFLMi1zHZVqwzvFwnIBMSAvGaKZ5YrvbChZrdAmMBU+NEvC10kTE36ZCDT5GQhRKn7YaZww5RMiZ5GdytqDhEBW3I2gN2hmM0xynA+g9WhmhC08e22hyjngiflno+57jMEgrXAKBj4G2tNQCFViMEbVzKBx1bS1WI+IhJ/TMnBXalgdEaTEcCwGlHMoYVPaiykasvUNMxITwxLW8doyR3nXMizinhhQZ54mYI7NfOByPRf0rcFO1dtjtdkKRbBrefecdrBaV9jiNpCT++7EEJKO1DBbJNG1bqJWNmOwdvOwE9xCjLgtYoO0NYZm4vR85Hkcudzsu95e8t73k5f090Q8s3ot4LCSUaVHakhP4EGi0AaPL+5Xgp41imkdyFhrvOIpxnTZaBH05lR3Yhmka0W0rnlBB5kK1y4hJEnTXtRhrsNpAEGFiDBHdGLSSuVUohU7btIzzROMsqQy6xSZdIMy2aQt+Dg8PhzKwtoXqmsrAONG4luAj1oouIsWEshZfzBATWWaLVq0uwDFFrLGkLIHcJTkzyxJWS5T6lVVms9us93xePDsjQ2s4ubV674vDsPyswHan3Q7ConICh3uPdY6ma9/QUFilUFavi7Sg+FadwWZ1LnVeIH5dVfPXd0k1siM1FHMpVWhQOgtWRpaLaowWnL9YSeisiUlB1jTGMc+RXz1/yY9//GOUVvz+3/89Hl1f0fctKkVIoKwp1gWyXUqR16A3DzPDsVTNzU644an6ymdSGZqeHl7h5Etll0ipiECMwC3Bix9Now0JhY8nz/GafVOSFYzWWcIciBmsU0zLUCoJJe83Ca86xEBjHXPwqxdPxNMaxWGaaWzLl69e0zQN+/2ekCb6foNfPE1ni5CqyPpdVw6LXh07+14OnkHL0GmZIBr6zUYojCqv6kWnFcm0hBi5f3hYse1MZh6HwpTQxCnS9g3Dw0BrGhk0G43BMB0nqeScrEsMoUADZQGPtZq2tTALpDP7CZ8loGqti62EdJPJWj5/daS9thyPDyi9iDumddwPM22zwRpQMa9rEcUXJ5JiWWhjLbksmRnHCWcNTaH2WWPWtY1t1xXXUzl/ZI0tQ751mQ5I0aCkqMgJwhLZbmWR/TQubPueFMWafPYz+4s9wyhiqOEoIqpf3d6ilJIdDdaRYsZZEUdFH9jv91Kxzp6+20g1ZyQQG2vWHcEPxxrYnECgeUvjYF4CTb/BaEXfK1Cv5TWsXWm32liGeaLvt0ItzTA9zHx59Fxd9Dy5uuB63+GXkRcvX6IJRG3l2ihF2/cEv3AcD2y6nmkcQUHwC42VpUsxRbTpSRm6pmeeRI0dCDhjUDHjx0nclHNAZRjHkc1mI9i5E2W+OIkojLLkqCApwlS2qSHJd5omrEm0jSnDeTDJFORAs3iP61uslqU7zjbFStsSkxAm+r4XSxMtQ+FUhIIyV8xl5jPLcxjEw61tC3yTMiGdGD1+KgN3Zwk5oNEYW5haOpclUhFlFNtdj7FtsZ2XpTp1MVO1J08pMi/zqqx2pcBZyn4LW0wWvV9QVknX0QiRRSAvYWeG4MVua1Uxn8gntSs570h+Y0lB9tVmEUNl2X97jo9JVi4VVWEGUIa2whKXgPjTn33EX/z4x3z3u9/lBz/4Lfq2pW9bWbCtoG3lwsSUiqOlPLaxwAAxRrabLV3f4cOJBli5uVXVqc8YIedDv9rCnVO/gJUbXZWateWUtrYBTpP806q8ExZYW79U2BP1BriyoMV7L4rKruXTzz5nf3HJfr+X1s6Iadc5f7k1rbD1s0A1OUeySqXiSmWwKe+9KXtYl9J6nzNZlIJ5mkGrs8/CGxS6tm25uroieM/V5RWV7FOtzOWhKLa/ZwPIlBKbzYbqeeP9wjQHtAbldEnIWVZMTp6cFkLIYCI5W26ur9nmzCefv2ReBlq7F/w5yOukmFbztrr45W8ZsZUkF0Kg7/tVJGWt5XA4rPTFc872+RC1Pji2cUzTREyJ3U7YHN4HpmHAlns+DAPaaO7v7+n7nsPhwH6/ZxxHLi4uVj667MJVq8Fa3/eM40jbtszzLN1AFSEVp9Vq63BOpfQ5k30kW1nNGZK4cy53x5XmGkOkcRtyWeOptWGaPW0jTJdlnEjA/WHg9vVLrm4u2W86vvXhh3z66WcMc0DFzDiM2HaLURrX9qIBanpUUigc6FYWJZXtgjFKoMs503cdwXtZSGSN7IhOAVWCrwRmef622y3TJEVG8ImuLZ5DBV6q28iapuHy4hJjpZPMOWK0xThTRIIZv3ixftBG7DXK8zmO47qjQLaiTWvBUJlylQraNOI7VA3oxJIEFLLfIcZIDDLYrTM7WRokRJWUT2y3TMaHMg/pNH4aQCV2+57trmeeJ0DU3XLu5PPoSVMXHlVKq5w/gUpTlGJbaym+gy9+Y5jCJBPRYVhOw//6nJizz/wbZx/V4UetnHOmYMuS3QWTF1aFs26d6qMgqsxnz7/g//Pnf0bfdfzBH/zPefzkMZu+K5X1QmEHEsKbDwbANE2r13ndXAQKbSqr4ORLUtWONejVi3EutKmBAFiDhgS1N1s9OBPoFE+Xvuuw1jJOE6b8bC6vp5Uil99Z9x/nDNpa2tLOLcvCu+++S9N2a6tXZeiVmijvWfyhatvnrCsWyBKxJegrnDProa8GZ/U9W2uZSrKLOb3hm3/O75aq1awY6fPnz/nmN7/J8Xhcg9Zmu8E62albD1o9vPUgi8eLSO5TzDRNizFu5WTnBIsfwWim0bPrtiQVuOgcn78c0K5jPDygjKPbNixedi1XpkW9H5WqVxNfZW3U++qc43A4sNls1utak2H18jkf/BljmKd5NYoLhf+Okj27lSaptabfbpiKYvXZs2e8fPkSpRT7/Z7Xr1+vytVqJ11fvybOupQnpZMSuA5XlVKrE2cVehlrMVaD1gS/MM+JNlcJIaWitAyFOlqvlXUNwzCu/jfTPBFDhrsj93d3PHv6iLeePUMby/Mvb9Eh4KximmIROQp11DQd2jbkLHCTBNQZazXH40MZenpcOfcqwRJODBpJwAlbuqZlqUtq1Lr3eTXRq8Prs+du8YWSzemeD8MAWTGOExcXF4QQheFWrn2MZSYUI8PxKBYtsDqdysazZT3vNXkL/Hrac12fx3ovV7qoOrkDn4vN6tlUSnF/f79+nnEMK6wl8Uvez7L4Vfg3l2eYUgBdXlwI1AT4pehUkpQCtliiV1sXkOKgxsta+J6b4p3HvF8b67/Wd3ES55xjX/IG9BqUjWlkfrC6ogqt6/mLL/njv/kbPvzhD/ng/bfpnRHqocoiMlsP+ZtiKOk+ggweU6Zt+3KhpVpWUPDXEyXyXORSg/N58KsH9fzi1SRSO4p6WOoN11paVW3kIg/D8AYPfe0+lAwgE4UbjcKHZbW1hsJoKE6UxgiL66sy/vqzKec3HpJ6uOtQT6CcuAa8usGqVmYhBNFMpFSMt06dTu2MqqJynueyHF3z+PFjXr16tXYybdtKa1w46rEE6noNqpVBpRUuS9V9BBnOW808LzRO1lbO88Tz5/c8vrkg+5knlxe8fnnAaog+kpRhHD3B1xWcJ9x1paKWB1aGkXrtsmqg1QV+qFS8Su+DE2e8XtuqrzDWrNeSnGmbhpwynZNg1DQNx2Fgt9vx8PDwRsAYx5H9fr9uzWrbbk3w4zi+0ZUKV10zz9Pa3nfF7K2+p1pNaxTGNlAG2saebdNS1XJBo3Vek+CagErwm6YJ17TiWmUMOSt++asvSH7h3Xfe4fHljmdXV7x6ecs9R6ZpJiZPqxN9ZzhOnoBYhGjnMCrgrGaz6RiOB0l6WnTsyzjS9z2oYhOtFMsSccrg2q6Ir2AuEN84Tys0ugS/mg0qJYLGmBK5QMDCnhGDOKH/NnL9gy8mcDKc3vQtx+OBzabHWr3qH+r9mud5pdeeCt3qjCr3PkZBRZbFczgM2OIonHNVGcotEP+iilZkYbQZc4J1VEFXAviwrDMAo6tPlZBLVJlFxCjkhvNdCOIgUYtNgWzJqmw7zLRtBySCn1fmkT5LkDXWrWf713x97aRgjC0PmBb8uwx36kBHDrNclAjFrldcE7q+4w/+03/EvN3QNpqcFhniVRVtLNtFs6JtHDHKTYkhciw3ZLvdr0OfFAVSSUqGxecJ4av42bnasFYvtRo75wzX/z7H4uq/axdglCb6INTMog9IIeIahynBIdTpf2lFG9UwzwvLPHP7+jVPnzxlmmf6fiPy+Zy5v79nv9+vVWW9+bYwiaqn/xQC1gh+LoEhobVa5xpt267B87zDWZaFtswhKrvjXMVZqXDL2XKVpmnKULDj1atXPH78WAqCkkiGYVh/X71Wa/WuLU3TMY4zOUeWJcsyEiU4tM+C77748p5nT3eEsPD+s0d8+vk9KnZY1zIsC6ZAXucKzwp5rdJ97wk+0/c9fd9zf3/PMAz0fV/mLz1dnS2UYLnb7dbELpitx1lDSKfFJNvNBoVimaYVWqhzlePxuCbe8yU03nv6vmcYxpX+V5k1VYtQPW0EShEI6vw1qu9P/Xw+BNqUiHEhK8W8zFxZU56X035kWrtem3oGKgzr2pZpXnCuA61ks94Ruq7ncBy5e3XLdrNh03U4s+H5i4M4GduMn19BNsxzpNldEhMEP5OiJgSD1pQEJ4QIrTvmpTC5MoRZOn+rBF5RRkSCs1+YC4xincO5huQzKieZ/SklBoTWYU3DNM2AaJKkG8osy8wwHDDG0bqWmBJXlxcEH1jmpQyrM84a2satbLk6+6nPNrB2uzmLO24918vi10IjBJkLNG1DiNL1yY6JZQ3CNc7EGAr8mzCmFJnl7wV2lk19WinptJxDKXtyZs2a3XZDiomgZPVtyom20cQQmJcZpTTTODLFERCmYj03tVA/13f8xjsFoR3WBdeyRGJZ5rVdH4YRbRxGGV69esX84gu+tZXh2vXlJcerC+6tQRPQRjDdGGS3rikDvhQT3guGF4K0dZvNRiq8koAqtGOdY/bLav5WA2GtDL/6/89Vp/XParD5KuZWL+R55XYO8VRIp2mbQnXLhaEgB27xnn3BpVUuf0fm2VtvrXjp+cNbMfGKK5+/x1rF5sK68N4T1GlfrLXiqw6cVc+nWYfAD2b9u4pv1++rg6lQ8NV6uLUW8dHLly9XeCMrmMvQsEryK8vhvFubpoWUZAbRdxtU2ejWNIbj8b4k1obXh4Xd3uOUZ9M7iBONaTlMR2zXCwGg2CjUgHeOCcuMKbDpO47H4woDPH78eLUOaNuWh4eHlS54Pus5LU2RB5jyMFHu86bfMGfZJqa1GK6h1RsdYr2P9XofCwkixhMEWb+nLrWv1/er97rCKPWZEqKBlU5DG2zTkLPs/VWq7DnXmq5rGfMJMqw/L3BHU6CSlouLS16/fimsFGNJIeFzRCfNtERe337Bs2eP+N73vs3h4YHjcOTl7S3Rz+w3DWO4QyGGe37xhOhXJo9zbqXnzt5jy+euM7pxnFCKUtWKM63WJ3j0cDiswUtos6K0t9ry6uVrrq6uyucyeD9jjHS8FboxSgtDKSXZD1I6aZlRlOtpbEEdTnTP+vxXWMpZhzUSb0KWwjaEWOYWEpjnacZHvz7vcpZkZnQ8DusMEhLHoywmcs4JDpKALEPj6iwQlsBcCjCtNUYp+k1fupqJ4JPAd2WGY5xQ/+Vz93IOo3Ql8xxWmm7tQM/P+tf5+tqbF7IQHIgx8fBwYFm8qGJj4nA4lkUSiV/96jP+6I/+iOfPn9efJKdADjMmB1kd9xVHnAo1OVsEJUWYIfzjrlRztTo8wSn1M54PkoH1AtSHGU5mXl+FIc49UCqsULHdy8vLN2Yb4zAQvF/9/edplkS2LKxNpFKlkhD8soaGlKsLI8zLvCahZVnWarLCHiAMr3PRy/l7B4GV6gzhvMqswaVW2DHFdfhcr03XdeV3nG9Ns2tHJQwaqVqurq54eHhYlc7n2HzdUmWtXdkbJygsrtCbXyr7IrKEBNqhN1dM2RGQGZGOnjA9MI/3pDAxT+LwWu/rOI5vzE7qZ9wV/nxXZj21Yq/YenXTBNaBbl2hWSuqtnDO52kq3v/yYE3TRNe1JaiN6xmpVdd5ED8cDus9reepftUkcIJZzRq827ZlGAZyzmui7fuezWZDiFLp28aRlHCMxnGUeVMWv6ZM5nA8rve2zk9qF2O0WMs0VhTTXdfT9juCavDKEXTHguO4wBQNP/35Z/zsk1+A1jx5dMNvffhtnl1t6U2kzTMXvaG10FpN0fwTgufhcEA7i7YWjMVYh3UNPkT5/9bhXCv4/xLkJzOrSWPTNqAKtOecYOml29pud4AqAV2ee5krOWrwlUVX8n7maWSZZVOf0Yq2sbIzohQENU7UAq/OmGrxucy+QFq2wDSV3q5oXAtlCK2UxjlJ1Fob5nkhRlm4lLMILHNSaGVJEebZF11RFHuQkFimubDn7PreyYlpHBiHY9F5aBqn2fQN1iisgU3f0DhD3zU4q1c7j6ZpVnJD27aM48jDw8PXpqPCf8xMIcFSaZLGCafciMLV2Yb7u3s+/uQj/t1P/poP3n2X733rm+QQ0RmsAqcQpWbhdFPW1WlVdi0jCWeZJbNbW7cR5VJdtwX+KF4nxWqBs+R3nhwqVFIfwho4ayA8/5lzg6taYQHrQo/9fs/xIBREZ8olS2JX23dSpcp+YLEIp3QtrnHC4Ck4fh3MWtui1Qnbr++767o10Ch1CiAhCLWPde4hdtLWGmqCrXhkDZj1dV0jIr5qB7HdbktgedMj6Fz4sh4OK1XqO++8w8PDA9v9jrkEnfOBfcXNp2k6S2oBo115QISaLGyuhmXx3E+ZOI5smoWL9y7JaeE73/4G//Lf/g1qr/E5M40ym6hBu1bnNbFV6q6zp/tb712d2QArZFa7wzpXqtcsBHGbdKUbtFaGjcsyg3WyiKbg0cdpfKOjqtBB3/dr1W+trMKs520lC5SKTZJysa4+6xxBAtXhcKDve4yxLMGz32wIh4NUqsaISItFfHZKVzBXPLp0guv9SbKkiJQYHh6IQNN3sghJi831brvDTxNBQ7vbkVTki+e3XG47Lvc933j3PZ4sC1+8es3L2wPaKBpryEnLEiFr6YxdfYq0FQioFhD1mXMl2J9M3BJtJ3MaEJHcUuxI2q5d729jZYnPusxH1UR72h+gEKGl3FNfnnkRMmqt0cbSK12W1Uxn9+qcQJNkK6RSpQiosDLk7KGwAZ1zxXtMrv/xeFxtvGtBsMxL8e6CZZYON6OQBWWim/E+QvQngV+KOFe80lJEcpgsd1KkMqiWYqeev1owKdWS9ZudQIyR/X6/QqL/f4CPfPFAEcghxECjpSL58ssv+Yu/+Etub7/kRz/8AR+89x57a1GvhLsdQ5QF6grQxWdElQUVyHJ17wPztKwfRJ+pjJWqXj0RWyhpi19kp3M+rdCDk6pvhUXOqrb6ENcBK5w6iFUBXB6uSg/bbDY8PDysN+58RiGH1nNxcbFSIedQBtLGsIS4ZmvnHHd3d1xcXKCyXtkC4kEjkEVKibbpCFEwx5VdVSpUShXaOCe6jRyBxOLT+plc0xBTonO2JJYERrPZble4r84Q+r4/Vbsx0LRNWe4+ywNb2EoZse+Y50VU31qorvf3D2siXUoVOgwDzjVsNh3eBxSKtu1YlkKXnEZCzEwx0JiWF6/vePuqZWNt4cNHogqkaPAxsek6WbdoLZX1lnOhYjYNuXhf1Yd3hXSgwDhhfSgqBHgaRpu1u6hzIOHlSwVZF8esvjFK0TYNPogP0G63YxzHlc21LItU+OFk7Ja+cm601uUa2TeomjWB7Xa7tVCpi2le377G2YbD8UBTmGiwrCZxKSUZAJczUM95V5TfWokvlLNi5UGBMZSxoBLDHNDKgcnMIWDKnuT8MPPwcOTm+oL9fsc3336HJzeeVw8TX766w+gomwCNJmbFtHhc60CJn9LsF5l/GbU+L2J9UQkPzTr3qVW7UuoN6+wcM7eHuyJSjDStI0SPLcWBdNOexrbFNK5dEzFwKvJULLTvZp3B1c5POshiGheRHSYl2bEGUunKpmlmu9sQQ+L+7p7dfo/WZu2QtdbMiyy+GUdhg0UV8f7kqGqtFXqtMbJsLHq0tkyzWIxnOcgl+YmqO8REiDLTlfipSLkEexKbXU9Mp/W4X6Xfnxcev+7ra/cUicjsJyY/ErMnpsgwTfzNzz7iD//FHzHNC3/wB/+QH/7ge1zstiJDRvQJyjgyDqVanOkhaXKQ7iPGogAdD/js6Xcd2il88vjksa1BWQjJo6xCWUVEBCJKvynSqA9Wbc3PufipvJ++71CKla0gF1mvAyyt1dqWNo0rnOJcm1RxX9WKycvwFq0Z5omsYPGexpQdEEE8knJK647ZausbwoS1CaMTjdUi6oqR5DM5ZpxumKeFHGGZPEYbGYyqLII7Ij4tHMajmG43jqQgIp44yoqAcIle/qyIm4Q5daRtm7XKijGsLJGsFMMsmga0ZvKLrG3MGYxmCZFp8ixzAKywH5JimjxaWR4eBqytARdiXIjJE+JCJnJ3d8ey1AUikUTDl7fw+YtATJppfEm/Czi14OeEtj12s2OOmXERJ0qFCOpUMkzHhXH0oA0xizI9FfsHv0R0Uuhk0FkYN96ftn2BLklGvKC0LgEglX8yJ9qqsygrFg8V0rDWcn9/D7CSBCpcKcEGZCGRiP1ktWxEa9jvt3i/cDgc1kFlreK+/PLLtfvIxd6gK1BY3/U4a6WDoShqs+hQVtdPLQVJ2zSMx0EcSZdZBJtA3zToGGGecSmgw4LNAacS1kSclXtNu2UxO4bc8+oY+enHn/L65Qt2NvPukz3vPd1xs9d0dkHnkcZmrDbMoyclyxTEIXkKE8YowrLIwqms0Nmgs8PPUeaJMdE1somOlAXyQhGWsrinbdDW4NqWxQe0dsSkWLysWlWqEZhGW5YlEGPGFFuaGDOgWRZZ4BWjULnb1tE0lq5zNI2h61ra1oFKuNawhAkfZ+YwMS4DWUeUyYTsmeYZ7zNNt5XhvbKM08JD2YHRbzrQGeM0s5+Y5xGRGESMhuhnSIEcvWgrrCOisE1LzBAy+JgYpplhlpWkVZQq1NhACAvezyiVi0pbYlvXNWX4L3R1azV939K2jZA9vsbX19+8lmSD0dWVmHzd39/zN3/zUz7++GO+9a1v8b3vfY/HNztUFKWtVPdJWEZFgWiMbAvS2qyt/jzPwjuGVf4eyuTeFNij4seQiemMW5/VWgXWKuxc4yCUyVRglirYsmsl91WmBrw5dK4DwMpKqFhVZTUorcnl/fV9v25iqpvCVpFUETTVz+ucI8XAPC907YZlLrYD8yxMi3mmcc0bGPh+v2dexsKSEP/7fiMMCmMNOWR2+12pihSb7ZbD4VDcIE+tZtUnCH96fGOecHd/z5MnT7i7u1tZObXKqIHw9XDPo0dPeLg/rB1DCh6lxKJgHCeaRpJYnTGM47AymgTSahj9TDYt3faKv/h3H/H00YdcPX7Ebz99wv/4L3/Ko90Ni23JUcR9SimclqUrzsi2tRh8sWCQytI6ubfOWBqnVujROUfIdWPXAixvMKfqv83Z+amiwnOKcuWyn5+1eg1rl3CuNagc9Brw61mrncXxeGS3262VcQjiRjtN00r7zTmjUdjGkZVmnsZS/NQzKIVNnU/5ZWG73Yr1dvn8MUaOwxHXSHJRZPw8ERapZP08c1wWLi4LPdoYur4RfH1STH5GKcPkI7/8/Au2+z3XN5dcXWy5Pw7cHUaef/maTbNl12w4HkeS0biuZ54mliXQ9T3Hw4DKyAB/OmIaQ4onGLXOayrM2nWdnOEST6qAsc4Xq9dSPZ/LLFqjruvo2p7j8bi6CxcJNU0rBcKxzGGU6sqsTJCDphHEYbPdFPShQEDLXBJ/z/E4kZJauwKFzC+XZabfbLi9fQWAs+3a8VR7ixgCfTFUjCGiSwEXQiHNKF2QBl0gWFUWFEl8qpsHZWtg9TpzxJRXuLv+vvq8VUHgbrf7WrH+aycFrSz/P9r+9Ne2NL/vwz7PsMa99xnuuffWrbmq5252c+gmqRYpMZJMG06i2LI8y7KdODESxG+CIPkHAiQv8iIOggRxAsSQ4UECLMiyHcqSLJsUxSap5iA2Wc2eu7rGW3c6wx7W+Ax58XuedXY1DbEMdE6j+t6qc87ea6/hN36Hs5Nzhn7g4cP3+ea3vsk8z/z0l36S5194gaZuUArBDBfVH9l0q9zqxgBGoIFd1y0J4SSJsx0vfeUk+OXEZvRJ/uDZCOPD73PbOUigvw3O+bWABdmUA3V+j2P26zGMMQfO471FRiflY4sxisF80jI/XhAft8c+Jh/hWRQZTVFgtGV2I6MbEyehWJADxmQ2cX6veZG6ON4lZJho/m9VVXFzc8P9+/fIXISMqc8Lqbw30NokqGYPCUAg50vMx6Xbiux2u+X3d7vdsk/IUMu6KReBvqZpFv14GZnIw++mCaNgnnv2fc/Z+QUPr/d86pUzrB958d4ZvZu49hpsjSKyalaMaRa8P+yoygJbGIKKFLpYEp8gMRyGWwy61hoVPmxekhP+8SjQuVsTmPy7mZyXr3P+3eOxk7WWuq6XXU3mMdwy32/ljfN75UCWR5d5/DDPMo7M59OHkHDzDh9Ei0hN/XK/SxGg0dayblqmcaTrBtnD2ALnZtbrDT5ExgRqKMuS5557jhDCwvrOxxohkeBalDEUVcPQOZp6w3W3py4th6dX2Ktr7t+/x+l6xZ2zEzZ1wfZmzzBcUtcFg7f4OYkHGkvAELSmMIrZ96AmmvqUXdfTqCYpp66WZxFuQRHX19tlpPvD8OcFxZWSSTbDubm5oWma5fshjQdzR7XAT9OyP8ebum7Y7w/LSCsXZnC7YxOZc4GYihJv5LDvMVYgok3dQgz0w0gIMcnDBKqyhCRxkY89I9+01kQXl9iWwQkyDhQtqIy2kjhyq3oqPKKwnJtjWH7mJ+Ri5aN8fXTto6jZ7Q689dYP+M53v8W9exd84hMf586d80QcmgheJ4IH/FBOWC5AiIF58gtLOc+2MyLmGCWUl4c/PMu/Xcrd+ugecw1yQJegpj8UnI9nuMcs6VwZ5/fLf8/JRW4atdyQxzDXY8JUOOpacqAClgs8juIT4GYn4nq+Z7M54bDvqWrZ2cSkzphHC2UpAWgYhmVmegxn7JIi5PFnvLm5WWaLu91uOS9Z8jwnjowekgqa5eaXzyWVZX4w5tlzcXHB22+/zenpqShTGsP5+fny+mVll4VftkXMcNBcDc6zENmGcaRaNTzbXeEfbnnx+TuUPvKxV57nH37196nOX2ffB2bnKYzII2uluXv/HtM4EL2nrOuFG+IzgiRE0ddZgnBcrkcO5hkllElpUtU3ItWRoK3HySDj2JVSPHv2jM1ms3Qbu91uQUBB8gTOVb6+9RDOf89VfeanAEsiyJIeZVkK+c7N6BAwWS66LInxlutgjKUsK67GQTD1RYHSmr4fmI38ez+MH2Kz5+A5juOyiJSAJ0J9RdnISNCKOqyuGoIBYy39PIhOWQw8udzSNiUEx2bdUukVN9fXHLpr6ljjY402BcEWXO72VE2LUZEwdtSFZeg7xvHW8yB3ljn5hhAYkt5QLi6OiYDe+w+xwWfnMUUhyq9VxT5Bd+u6Au+xRckw9DIlDDIpaJoW53y6R2QElZ8lay0XFxeM47jwSeSZ7LCmROvkIBijjKM1HPY5icQ0HdFHUOqISYVG/gwueJTOy+yY4o1P96qYTPl0/cfEmShT0aKNpR9HXN7XJkmaXMTm5z8/67nQ+JElhf1uy9e+9jVuttd87rOf5eWXX+L0dI3I7ebKMoiK5zQu7c7tlyycRUVyXB6ynM2PnZCON+XHOjXH1Vn+Omb8HhtU3+4YPmw0nyF/WcIhv2ZutfJr5wVV1gaSeXy/VBb59XP1nSvIY4Z3rlKOSVey2OogkirKPTfbLUZZhqFPqI0Md7uV/ZUAJNWM7DpuEVMnJycLBj5X5IJeyebgUjFkZMrxki0/iErlhZvl2bPLhFRqUwcETdNSFIpxmHjw4MEf4X7cInmg77ulHc/VXyaUDcOAJhK9oyot3TyxubhgOjzl3Q+u+fhza9x04FOvP88HXWS/7Vk1a8ahxxYFxhRJYgRsaW+rRGPQ3JIYvRc/CVnERZS9fdCPg36+hwT5NX4IoZavdQYc5HN+dnYG3CK+cmeRSW25C8hjkFw45CSVu8b79+8vPJDjUWZ+HpYlbEoWsw/sdjs2qeASeRlRWDVlQVUL8zwEkYGepmlhPI/TtBQv+Rhyd5OrZFsUGKvT7kVTlSXXw8DmZCOdT1USrKLzgaqwXF895SRa5qFj9IH7F2e8fLphHDoeP3qMx3HdTUzzzGrVMjuRHi9sTZgFV3/37t0FtZaTYa5ol5FxGvUed3H5+BcEziDe0CQ+h3NOhOucsPCzdleMtxIaucjLSVGe91vjn77vl2c7X//VasU8zzL60YZpGpbrJMEepnFic7KRMXBZyh4yxuStwCKjIs/ObWyQicJtvHHOp1GZX0QswzShnV6mEsYWSfE3LgVpNurJnUKOA8fcjH/c10dOCn/vv/67tG3LT3/xi9y/f5FmuiJ+5mafTN/1UTD+0HmXQOnlQ2bxt7qul2wmN/l/N7kij0nyz/3wn8cz3hzIj5mFObjmXUMO9Ms4J910x1j+/HV7k34Yo55b/vw6eXwTEofAGLNAC3NgWDgSEdq2EcXPvF+JkbIugSTP4TPXAKZJupVhHDgc9oAopW42G4aErc9JM793Dlj5PY9Z3PkBbNt2IQ3FqFDK0DQtxtijVl4qfUk6xYLKyotzYAmWGVYniV5uxtylHLf+zjl0DBRNSV0YfIiU1Yp+iJTVBmMUq6bk8q0bdJzADdh6LaMUHUWWPSXgphXpBOc9VkslaZQsacPSOt8S8vKfWUYiJwdJlnJdcwDI8ifHs9o8yz7mKHxoD2FviYSZsZ7fI5/zqhK556urq2V0k4uavJ86JjOaoy5QrEPT85Xuw3a1Zj9PC6oHLe5gtkik00quSz7OHCSOixnnHJvNmu1hK/LVwXNzc01bt0zjRFkJfLVqGryK7KYZ3ZwTbMHqzhkhDDy66qlLzb1757y6ruW9H9/wbDczeVGHnQaPiQXKFgQtO5nj0Vy+b4+h4cdw3nz/5JFsThQ2dUhungVB5kWiPnOcbFHIAr4Scq0w4Ns0MozLa7XtepEqyUXED3f94zBgjQhiVpXIjk/juMhTGC1+EbNLHeLRzlJHRXHUeZoEKKgrMQWqygqjDXOyLC3KkoiiS6PqPCoWiKsksynBe206f/l489/zfXkMxf/HfX3kpPDCC/f5/Oc/z2azoShEzpUI3oUkNWEBWaRkqJncuPLHOI70s8NHh6nsh0hQebZ7fAGORz3HFWn+87idP8aoHy8GpRNIjmBHu4ayLJfZZa6eckI5XgbmWXG+GMekpeO535gE0vLSN+vsZLxzfgDzjd/ULeMwLV0FCKHt5uaGtmlBwTw6yqIUF7XgOTs7o64rrF0tyKEY41K55KSQk1s+Jgni83LecsDKiUSUJAu6buDi4i7Pnj6jbmr6fhCoX6pw61o6j3lyS0DMSfFDi3orshvGlMuSF2SHk4Pdul2hwszsI4EZrRVNveKwv2G3mzldFcxu4sUX7vOP3vhVXvvk58RzoWwJkyyEi6JkmkXXPltDepUfhkiRkDjOOREWjLdjouNuc0H6pLn9scJsXixng5l8TxwXJTmo5+CVgQHHo6f8s2dnZwviCG49m3NCyYvVY/isj4FSi4+InyThFlN6uJMKsZtvIdS52vYZpq1gGEe0EvbsMcDjeAw6zzPdMECSkK+qirpMs2rv8cDJZi3mNhGi1ihKoq4I2uBcpC5r9vPI8GhHSc/dsw0vPHjA+bnng0dXXG9vKFVFP/RoXRKMomxun8X8/Oav2yLPLwit404+//s4jiikageYtFiLtk0lQBU3ok1kTICOplmlImlOpMewPMvH+mHTNHFzc7MQN3NMatuGeR4XdVWJAT2FlRGRQkmhrJBiKt1LMe1/8ueS6yvQ6XHsFlZ4jJkKqwle0IP5/bMpUL4f8/27WUs3fRw/855zASHMt8ZJ/7ivj5wUfvwLn+POnbPEZBYZ2hBCkplVoBTTPFHZW0/UPORxztN3PdFaqqrEVLc/kwPVMab2+Kb44fFQDq5ZmfVYBwdub/r8urniyD8LLFXBsuiJtwS3/L45gGRRNWm/iqXLyFVcTk7ee6q6ou9v5/45KB6PJKZpYnbS7tZ1zcOH73Pv3n2maeDec/eWkcNmXYpzWxBGoyzL/AKbhbhwI/JCNC+VMtogV1c52OVRF3DrpNX3goKqWy6fXVNVDTGANaJq6l3EzeOiF5QTcH7tnJiPUVtFYZfxXP6e97JHWq1Wwj2JBm3AKi9ezN1AGS3b3SSkPxTz3PPyC/dwY0dRbrjpOspmQ3QwzwEdIQQZkRhtlkJATF8Cc6o4tdGCeBnHpXM7Doj5IQvB/ZEAdZxI8+4pAxHy+DEXNnlHpbVekuU4jpycnHA4HBbWcx5f5j1LvgdzJ3E7W3Zpce5RUTqx0hq6m8v8gDBNI8Oo8ZWV2TJqGTP6CG2Wpg5u+bw56Rzfm6vVikCkrlvGYcC5idoWaKtkLKfhsL9JdlUCuVbK0pQn7Pd7bFGIDLeygvM3FY+fjawaxfnZhs3LFbvdgYdPL6kqzWGc8F4Ro1lQRPkZPJY/l85LL91aURTL2CvHhhDEu9wn/sF2v+P09JRhHIlEqqZOI9hiIZrlc7vdbhdiaY4VOUlaa7l79+4SW3IcMhqimyiMxhshK9LKfV0l1vb56RmDG+j6XjwzjEm7h9uiYp4FlDMMA+v1+mgiApmUJ4lDxk7Hx9Cn/WLePV1eXWLUH1Wazc9gThQf5esjJ4Wz843IHiMXUqlcKWVVVNFVN2keWha3inwCCVPUTZ2wwn4ZweQuIX8ddwk6L5TcrUxtRm5IO3dLPLsdF/llfp1PzLE2T04+eTYJLGOX45bxuGrJC9ccDLKbUowkKOKKqyuZw1/d3DAOA0VZcv/+PZ48ecp6taKsKsZxYL3eEJXB7g+UZclLL72ckkj9IcQBIY3H1K33QWmL9PlGWZ6lUVM+xrzDMMYsew+ZX89st1vW6zXn5+fLwikHAyGrxWVR6n1inrt5Sb7HS+NjVE8+T/nazG5iHAfKslge7Fy1LOMm56iKSlzwomfqe6xzTD7wne++yZ27n6eoNKsm8tlPvc4PPrhkN01c3LmHo8DN4qlblgU+QfnquoYQF9+CuhS3KqUU++6AJyxjm1wF5s9wWy07tFaL70GevefxW9u2t+qlqarMo5f82RYCU0pAmdyW77nj0Vt+BvLe6LhLWZ4LlSXQp6TdM1Evz4uwXY0xRCtWmyHmYCLOYV3fJ78NyzQOS2GilFqCSobHVo2MfIwxBC9WrrawVLU42rWrhmEa8dOEbSu6fkYFR1vXqfqHKQScV0zBUOkV03DAdU+4e6flbG1Ynzzgup95eHnNzT4QvKMqK/b7aUEHZV+Eyc2YEBb2v9aiVjzNs/hAz6ICPE0TZSUeFqBYrdapK0gQT22EQDl2SwLKz3hGGeVC6jiezJPwsbC3RWtZlgz9AWOTyU9hEwRc3n8cZZTTdQeCjosfQlmWTOOUZGeK5ZkepwlrSw6HPnGoBF0o3WeCVScEJVHg7fk4jDGMKTlMR4q78OFRfL6Xckf7x319dDtONyXIqMcH0f8xVi/tgNIGNAh1XzGmwIkCZTRlUxKqEucmrCmWakwp2bLnKl4lPLFOHyYTWITerqnLEpseuqKsFm5AbuNyJX+LVPLLjBJu0U35/WRE4HFeMO/zPGOsoSjKHxoDiWH3zXbLSdI6V0kavDv0VGVDWVU07SZleRG9unMukDKtLIWtKWzFMM0LA/bs7CwFIItSt1leW51+TzMOA3VVLfrrVSG+wdEHdGEAjXfiW6F1Jeqek2jEGGO4vr5K46d6MYjJncvhcEhJWLgG19dXCU0U8MFRlgVlVWAsVEqq69VqtVS6eayWA01dVzLTdXPSHlDM4wBJWtl7jyks4zyw2mzASWIxtVi+EgyXV1uev79hnra0tabfP6VqL5jnA5OvqOsNIXh8jAzTQGEKJsZb2LCCw9gTU3AxpSXiQQfGYWD2kziwpfvClnKubWFF7nz2gKaqSkCncYLwbawt0vcqYszVa5WCaZGCtEap6Y+MGY9Hoblzygkq35d5br6g4LxbxpvGCNGrrErYiSua8BID49BRllUaFXrQQqKa51kYtcO4mMxYqejwITDOE23RUjeNiO+Ng6gBB/FG9gr6Q5/4FNIZEzUBOc8uzESlUYZksWkpy4JpcChtUKGmCz2Prju0mmlXDSdtw+al+4yT4r33nsmzXRgmD1VRUTUN3TzIzLw0lMrQ9aOYDlnL4CKV1lhb4eaJ89NzEVEsSgEVaA3eM02esrBMo6NYxDzDEiiPDaeWZbsxouxqjACIRJ//VqI8IsZAqki7qI4QZNE8jRNzmCnbIsHlYUgy6opI01R0+wPE2wJB6UiIjkhgGDvy7jKSEEUxpvFdxNoiyd2IE2RpLdiCeZyIXj63qAaMizw7CDFXph7jR4r1H5nRbEyG1imMEcq3UpmQk8Y7zgsjz3mGtJwFRCulLBGP5FtdojzbztvyfMFiFFapTrrjxEhVVouJup9FxsDNt6Y4WXsGWHRqxNRjXPT+c4VwPC4yRshy3rtFX8XNjmfPntF1HVdX11xeXiMGF5qLO3dpmxWrds1mc8J6tcHakqZZiU78OFGWtQR5NEoJrEwl8l6e1WdkS9d1CUnRpGAhBDulhRjTDT31qmWa52QPOUj1OUypO7hdGtZVI0uufpAq54jjUSWP3+PKOC8c8zVUKnLnzjlai/zGatVAkv8V3RW97BKMuTX3yddP9iri7WyTjMMxTDZ3YVLMBna7LQQNyIM+Aaf37/PWuw8Zp8TUHQ584bMfB9cx7C4xITB0PTFE+lQIKPjQ+C8vh8dpSlLfFqVhHAeqqqSqsvSCMEEhojUUtkwQzzLBDwUnL8FDJR0nll1RDi557p0D/rG6bt6Tee+Xbiujj/K5P172ZqXM45GjOdoXudmJsJsS1rXOs243M00Dbdugjahm2sJQNxVoKOtq6UyUEphjNrUfhoFpliR2st4s98zsPdqWVM2KcRapBmsrsAX95DgMA1fba6Z5QBuomwrx05hQBuYwMbgZ06zYO+ic4tnVjsePnuCHiU1j+MTrd3nhuRX3LmpWDVRFxJpIUxUUVhRDiTDPnrJq8VHRrNd4Ite7a4rKME4ds5uXQDo7kYMoS7vIWszJlyOf0wwWyPdydsMjRMqkd1UVhRAAnRfCpFKM/ZDudyGztqsNq1Xi9yQ2c4ghSWG4pPJsiEH0yqpaTJEWgq4xGKPS8UlCj9EzzyPTNBBiQkZ5KWjGYSCme2LoB/Fuzs9VjGgtn0W6XJfg6mlx7X7EdpwhVRbpsSMGkbHNcNR5nkWQKSrmcZSlijxvS1sTQlqCpIydq6NjFESe0zfJ4CQbV+RgKYthWR5iPix0d1y15v9elrez79z+H+8fDocDu92W07MTttttknteUZYCCzs5ackGH+MwLzP74xb/ePZsrczTj9FQ+YHOCCVtzRIkcpXy9OnTBY+db9YcuIdBMPkmmcmIfWdCaVQF3aFjtVozDD113aQRz8yzZ085OzvhwYMHH5qny7GqZQR4iwnvF0JbHo9kfX8hA63+SFLJnIlFeCvNrsfxlpWbk3QOpE1TU9QV3kemeaQoa4ZxwujAB08uOWtLvv39d/ipzzxAhY5YFKxry3bfYaqZgIYoIz9NXGTV5bWbZTmcl2s+OJqmTMc1LVBH0YZKD4ItmdL1zbyDfC2ObTS32y1VVXFycsLTp0+5c+fOstTP5yX/PXeZGaxwjFjKSSw7xGXvinwfg8zPy0JkKbqup25alIZiDhCG5bUgsko7n13y4c77jL6XKl+8yN2CiMrouLxbOV5MGmM4PT3l6uqK6+tr6rpeRk0xRsIsyTKPaPMSNI/WMnx6nkbadsPhsCMEKOqaqQ+UZcv7T28orq+4e++UZmOoNy0ndxour/Y8enJFbWt0CILWKVra0mB1SX3achgOWBOpVwUu9lRtjfY+FWKCDlIWYgh06bkjighdLo7yqDSP+vJ4xQWRuT8cDktwlTHPKLDeoqSqSoZJ9MEyuq2u5Vys12v2+z37xFfI3IZcJBSFuAIej26tLZagfizkmM9tjge3wf42PuRrnUEszjmurq4WqD/wofvuR5oUlNaUlUFo1kJkySLYERIMzOHnIL65ZQlTHgnd4oF9cCIPcYTcyMElt3RlURKcE7s+Kc2kCkPcjeacAErZ7uf5Wq6CjoW18nvkxV3uUrKcbG7TM1FFdgsjhS0XoTfvg0DFTFgSzjEX4ngskE98ftjyaCC/L8But0el2XV+oE5PT5dlY755MhIiV9hlaoGHvpfpqY7sdj1tu8YldUlBH2kO3cT5+RnOzVxdXS+fM0v8TtOUfIWnNHZwFMXtriCjU3KSFdq8SP7m2XxOKJmdrrVOFeqtamiu2vPnyIsxqyKFrYhBRNWKqibGGR8jnRM9l6AsFhi7A6+9+IBpfMSz/TOK1X28F1y6nyYRVUzX5Hjmn7HsRVHLOMxnYbBbdE+M0lnMc0/0LLP2vJ/JwU5kDKYPscDzwjKPLPN9XlUlfd8tyTbbsB4/wPk+za97jBjJic1aiwsek5JqCGFhZotsdlZJLdmnUefxfZjx9bKn0Jg03w4xYpBxT1XX9L0ACNzslkCZF7rAEZdFJalrYYjforb8ErC6rlsW8iF69t0tUMMpC9Wawwy6PGHwB956/zGnp2vWq4b1qmLdnvP8xSlv/eAhDkvEsu97TtsV1/sd9aqlUGL5aa1BW4S3kqTsoxckVrbjXNQFgvggrNfrBcyRi4fFirMs2W23GG0whcUnQ6/dYb/ItIQY2e0ONKuKafJLgsnXL3fjZ2fnTIk0mOHYufjN+yeXkJo59uX7Fm7Vm/NxAR9yi8vTj+OdaY5H2YMmT0ZueU4/4kVzJl4pMg4+ikVlolt7H5n8jIqKuqwovQdEwiKkBWBIzNZpvvU7zSckB++lNU/GGHJzCVvVeY+bZ0KM2KIgxLhkxBxElRKG6fn5eYJt3fo755u8qiru3bu3BN0Yj8ZWkOwzSQFaJYjf7XgrJ5xl/q9vJZ1zJ5EXjHnhmz9rThhlMlo/DrDHs82swpmZiLasOBwOyzlqm4bdfpvm3rLIH8fhCIYr/tlNI2qlucNSKo/XyuX9vA9k85H82Y5lF6y1y43svefy8pKqqj6kAJvPyzSPKG0/dD5vR1RJrthYDEdSz+OIMhbnFFXd4vzIdnQ8uz7w3GmB9z2VMZytS9Hi155ZGW62NzRlRZkIadkHIv99UXmNUarHoyCmtSETLDMJzOhb2GHmrmT5hdxt5mouxpiS6q2HdD5Xx7j24y42P9z5mG5BAuXyHvmcL92nQvx+rYUonW8cZhnMRkEBxhiW1zsm0x37irjZMTnpFk3u0MeRbeK9bDYnDH2PmyUpZSy/QJZvfSqMUiLFno4xJ/n8fk3TJM0tgfiGKE6MVhsO/YzSsq/TWhODpqw37A8d19fP0Mrx0oMHrJo1H3/1eZwzPH1yzez3qBhZtxDjgI6OtirphhGtC6qqYR5mAoE5yEg5y3fk5yvEiE4OcTkGZEhp/iy394I8F9IZFEtB5YMXlWCj6Tohqsl9FFJChv2+o6qke8hw7vxstG27TBHys5RBLEWRu+khFWZJ6O8IjJB/L8eS484uQ9FzEZA9WjKMOt+bH+Xro2sf2YSbDhGrFJ5IiDKb3B86eeALw6pp0QFZLoIsmlPADyaRauytkU0+4bmanqaJaZ7ouoHT01PceNttGBRlI5XVMI5kpFiusJ4+ffqhcUcO+JvN5kPwtdziQSbCQJhv+RKZZShB75i4dIvayJ3HMexSztMtxC5DDjNsMd8g2UAlj5XyzXn8c8ez+xxMFvJKukmyEbgEskBVlXgf6PoOkO9DZhXPZOe8XMHn8RDk6tAun+uHW1KlVNqRiCdDJrTl850rm7zfyLdWDii5S8gPa12XDP0ofhrpswxDh7at6LoGxVvvP+O0fYDSBd3+hgcXp+x3TxjCiDfZ1jAsCSyfuzyGycciD6fBaENZ1IxhRKGpyiZBHSOFlQ4gHqHccmI5ljzJgf6Yn5KTSD6X4sC1/yNJ4BYye8sZyQ9svl8y9NJaK1WnNZyenKCNZbvbM08ZFZc655jkkptigcLm85CTVS4mokx3Reo6LfxzB7Db7/CzozT2Qx4f2ba0rusFYDClilUptXSIuejKQayqSlQj4zelxUtgchFrFXgwyuDmmuAKFAVaW+bphsePrjldT5ydnNGWBevX7tNuLbtDz3Z7IGKJPgIFZ6tTxilSmgpbDSgVCaG6DYRp2jCn7osfQufkBKG1Xp5VY0w6p8mGM83+x2lKe1SFm24nEJn4Kf9ETk/Pluc0PzcgiT/LmOROJTu0FYVZurn1umGapEhcrWp2u11CA8ozluNN13XL+T6+v66vrz+k35a7tmPy6B/39dG1jyAtW9M8Go1zYwqaCjHTTjod+shcPMr/HS8chwSfyjdwvjD5g8jo4XYclOdouQrMFzVfuLzYu7i4IAuKZV6C1mo5kcfji6XFDSEpICYSUgRjCpS6nTWCKKPm3808Bnn4D8vFAhaj+YwuOB4pQVqIcgudPJaryC1eHk/kxFlXoqqYTXiqqmK/27HetOgC+m5IFo2aruu5d+9ewqXfUuePF7E5EORFt0B8b9Vij6GRuQr03tPUq3QffJgtnccG1lrcJGYkGV1ljFkSyLEmy+wiUelkuq65uXpK3bZ0hwNVYZknx7N55qoPXGxWnBixZrxzsuKD7URlW9amxs2B2YUlARyb3eTxjNwvtx1RXvjn45cHThik+TrmZJPvo2MBu3wfOieqprlDzDuFq6urP0ImO54Z5yCRr2/uiPPx58p1s9kwzqJ82o97yqqmLixNUECPUlkwsl5eb2HOas12u110pzIktu/7hQmfxx55CmCNYR4mSPdvvgePkXt1XaP97TgpI9COIctFYdM0QILxOAqXRHyRk0heiBhdYm3LOAbaqqAoaggDN4eJh4/e5P69u5ydbzg7azg5aThd1xz2I1fXnUBCvSz4A5oQZhFjjH65/6p0r2tr6YaBpqrwRwXPcezJCSEDVPJzkMc3eQx8rBh8TLTLXX++n4wR06b8c8dQ5DwqWq/XTJMIXTrn2O12KZkJKvHq6oqqrkQG/miXl2Nkvh77/X55n+Nn87ZAu5UL+ihfHxl95HwUiFnCCm/3O/ZdhzIaW5S0q9Wit2PTiCDtM9GpLZ9m6QJyBZoDbhbnyh9SkBFVYqwqQoDZeQ5dz9X1DY8eP2GcbrWSzs7OlgB73FrluagsnMvlIc8nLlet2tw+GAKndEfSFIklm9q3Y5/fvDDK7XoeY+WEkz9THl/lB7bv+uWGy8eXxwcxxkVts+u6hOpyVImFfQy7za+VGZjG2KTLI9Xaar2ibmqsLSlsyTjMBA9Gi8pjDNDUK7xPcDd1S0LLY7jcOeSZce7scjs7juOyhAWSV4Nezr9zju12y36//9ASdtcdMIUlElm1DW1VUReGyiiqwshIKBi+9+5TVLnGR0Xwjgf37+CGA1ZHxuGQkFHF0mLnOXKuFvMDmoP58ffy5/3h0d1xJZ//+/F8N5+fbK6UJS2OR6CHw2HZGeX3z+93PE47LigyWTIvgHPxMs23FpuH7rBUuMQ8CpTrsd1uPwQCOEZA5ec2AoeuY5eY1XlHEGJciod8TjIfKKvg7vd7rq6uluCWg2AGieTP3XUdJj1zmcyoCBgNRivqsmAaOqoGBneNrSKHcWD0mjk2zDRUJ/fYjpHvvPUe1zd7vPOcn53y0gv3efWFu1Rqoow9ZysIwxWlVSiiwEaTOY1zjsk7YX4r0R7KATOfp+PnzRjDMI63PxMCwzguKDfSHicnjrzwzdcOWPZrWX4m/z3fO5kAmguEXPXnIjmPmfKesSxLkclPXWuOX9vtdtn1ZCvafN/m3WTeS8Uoopi5oP6RJQWtNSZp/meT9OBDWsjaJWDEGAXe5sTYmigoAK1E88NqQ13JHFgj3+sPB9wsc9L9dotGTMmD82yvr9ltt0zDCBHOTk55+cWXONms0SjGYaA7HNBKEZyIoLl5Fmq5tYx9j3cOopDrhr4XXfWioCwK5nFMGu/N8vtaIWqTgJsmXJqleufwbpb9xjyxahrKomBKctBlYfGzo64qhr5PiCH53KUVSFtVluLHGgV/nM/DkOjwVmuaRL5aNQ11WVKXJdMwpJteMQ0DJs2Ey8JQlYW8jgKfjpUQ0ET8PBHczDSMrNt2OReKuMDs6rLEO09wjs16TUyOZwqEJZlerzschCxTFBilRNNlnoletK+s1rhJDIPGvhccdQjp8xesmkb4FSHQNiumYUIF2F5eUVpNnEZ0cPixpzQKW7YcJtgeJpp2jdUaQ+T8dEO327JpGwqjsdoQnEgZe+cgBE42G3GsAw77vZx/ramKUubxIaTrKfDmOskQaEjQZ9GzKayVe8XaZY4/J52b/JmDc5yenBC9X167tLdwRj87rE6Cfd4vr61RrFcrCJGh74VDMU2crDeU1sq9Yi1GCTSzSH9fKj4l59K5rPjZLEvyeZ45Pz9fdm02jUOLouDi4oKyKLBHi82qqijKgmw+dYzmysltHEe6vuf66prLy0u22+0SiHJSyF3RmJbrT58+lVGT8DBZrxrG4UBbl4S4w1Z7gtqBdZycn0PR4E1L5y0Hr8GuORwijx5e8vTRU/w4cu/ilNdfucfHX71AxWvOT2CzEl+I0mjqUmCfKNk5BiJNu5IFshEeye3eTAJu8EGmHglEUyXEmDGyjM+S4tnDuygKTk5OlgCcl7t5lJiLk7yXyh4Zx52ZUorZzYtskFLQtjVaK7rugHMCU71zfoYxUpBmI6c8ZcnFV4zSHR0zo3Piysnjo0pnq/gRe4pf+9W/Rb8/cLO7YnfY0rRrVu2GQhm0lQWYn8WQGx95/tDxqU5a3G1Z8rQumAoxg5C5nJOH1giuvjAF4yzsVBFpK3BuoipKTs5OmdxMROEnx6qtud4+w+qCpq7p0qwun+jbFr+nrgRB1K5P6IaBQKAyVoJZjGgVU2dSivRyYUVvRAm9XvDFgg133lNW4mIUfWCcnEBltUERGacBaws8MVXzdZItLvCTiIr1o1yozWqNm2a6oaesS0IMKB/xk8P5gEo3dWGEKBQ8lLYUMkuM2EKjNFitUdowTjPzNOBnx+nJKZN3IotRWrwXE/JhHGlXK4busOw9tBYeRcBjCkPfdbTJoKSoKwxp6ZVu5ml2lFWDH0aM1XgllPxpGHFBmMbGaKa5p1mtsBj67kCzbnBObCHn4FBEwuwXE5HZZYe7uMysfVQ0lebBRc0nX7xL6G6Yx5lBV3zze+9Ae4IxNSZY5uyFUNdELc56xhjwaVFHxM0zWmkhAVqFtQXOe+ZpZrVao4hEBUVZErwTxmolDmYx+AUkUVYlRlvxDk5JRQJfoLCW3W5L3Uow3W53iJvfLfSzKqtlRg3JlCn92aaO8ZiF6p0XGGQSYjsZHff2jugjh7OW909qhpUgrPp+SF1n5kS4ZT/mvFvGU7laVUrhZreMC1WM9F2fOkxLDKnCdnI8t9h6s4wxsvdHiGKf2w89q/WKm+1N6pqFRGr0rQ6ZUgofB0KcKcuGYXB4B2VZC+M33b/z0HNS10zDAYNHMbFqCu5enIEK2LLgyZNnPHo6oHTFoZsJWGzVMMwOW5cchl6ObZxZNQ2H4YDzM847KU56hxsDbb0GA9GKfpF0oeUyYpWxj00JXaRVspd0jCJq13Vdmj4k9z0S7FvdesQIidAuABfh60rMKIuSru8w2ix7DnF1vrVaPVZbONbuyp157tyyNlIe4Trn+PP/4v/iR5cU/v7f/KvM2z0ujEQVaVYrmnpFrQuiEm2VeRypqpowO55zged2OyDgdclgPEFpFDbdEHKDocAEhYqaoIIEwLSXiErhoxDYAsl3N0gAdkwJq64TbFVkJ1RieaosD6GieGhqi1MKpyJluJUJSK2MVL1RkAUxyEUQwrLo6EhzCrM2wuaOEsB8iFg0GnAqomJAqyiaULpkkrOMiQFFIChFDJ5CafDJrlQrogqoGFFREYImWgveUSiPQySHrRLBLBnLpT9jRGHkdb3DKll6YWURa5bLq/CpslzGelEtgQkl+lU+RKyWLg2j0VFLV6OSDarSEA06BiEjGvm+DhJQHQod5fiiVmiv5CxrlQTBEkQ5xnT2peUP8dY8PX8/pp+1RAodUlAIIoIXFEGVyX5TLR8BZJkqL54heGJlqKJa9LiOfXq9j1hdAJ6gAIQQ52NAGSVEyhAgEfhjBK2SoyCyYVM6Lp2vUnLLoVjQKiTSW1zuVdnP5c+cv46Zzyr9z6uIVqASyqiIinoWMuXNpuDdVcmulLHf3bt32W63yygzo9fyKEdr2eMYbZYRZeYg5PfPyrnZeyHvZ5YxWIKvHkOW27ZdkEohBLrkBXG838nThFy1FsVtVyJ7tIGiLPDuFpWllcYqyzz2BD9ijSe4HsXMgwcXnKzFA8Oh2V13XF8PzL7ARcswR7yGaMTf3URLYQvGMDC7EWUV0zBhYolyYvE6hpkxCPJPgB7CBFZIB6FTJ2+IPH0iXdCdO3eWYA1ppFdanBcbVLnfhE9jtE7ISkEreS9s62lZZGcl5gT9N3KfnZ7f4cmTJ8t+J79fHpked3f5uhx3LbkI/Gf/lf/lHxvrP/Ki+eSw56xooD7Hnp1iq3J5eP3kcPs9uIlm2mNjoPJil6mUwbiA9YFgFL6uULYgHrboMIEG6+VGCzoQlSEoiy8r1Ok5oaoI3UDcXlFNA3YSPR7xFHbgUjRIVU/I82ABnSPxyRCMYt6sYLOhUOD7Pe7QY8aZMqQHTkfiPIOSqk8RQDkiBqcruHNObE/ET5VIVDMQ4DDA1RYdHDo6dPD4dg3nFzhbEZXGEFDRE73Cb59h9zusT97IQboBE0U6IKBRs0dHCUYxBdiIkzCR9W1UkOQQNEGJCZCKnqiF9W0jqBAlQCdfYhUjQUW8UZig0TEADhUNEb2wjVGgJk9Mr0tURBVROFR0CW+giD4F0ZDOlQ7p+PI10AQMOINSAYVPEJhjzHSCBOd/VTETdslhPKJQqpAEFRUlgDbEKMblSiWZ5SXqy3sElXA6KgVhcuaIqJDvFQWMoNLiMGpIxDaVjkcOJhUYSqVzlOaj6f6TY09IlASuQCXxOPLnSd/Lr50Pl/xSIf/g8uWVnAcVo5SVSYlzMjAaRTBq4TFst9tljrzdbpf9Vk5IIRU4eRewXq8/BCrIRMRbuPItAXGeZ5HRtrfWqnlhPaZZ/J07d9L73JL3cvV6vPSU1xPpfdkvRtbrlXST1e0+Z/YzRVOCM2hTYnREW0UME4+e3HB1vePizhmr1lLeWXNysuHR42v6cUakUCxEi8eCKpmmgC1avNJM44gxFX6aKQpwQ48tSwgl1liRKVeapm4wRnNYnA0j4yCkwAzKyOcrd3mzm2hXDePYSwK0RVqu64Q+SoCDoiBGqKqa6+vr5fwfCyNiNDc3N9y5c2fZdYYQFon3fB3yzuGYr5UJl8Cy9/vjvj56UggDZ+Wa9U/8JPblV1PPk/TtnWPe7hjfexe+9fvU45QKNekAolIEU7Fft2y+/GXM+oT5t38X9c6bEIL8TKqiJ9tgPvU5qldfQW3OxETcDbjLp/g//Ba89y6BGZRBh0h6ShcWXZ5/p7KNoDVju6H4zOdpXnkeNqdAxI571KOn+G98A/f0GSZGVJgxKlV7UUEKNF5p9EuvYH/mp6FqkJIxBTblYb/H/+ZX0Q/fATSxalBf+mnMKx/DIvhy2d448Bb/9g8Iv/1V1LwDExiUZ9KGMpp0HpKIIBqMRXuP0hGvpLrWIQU8tMBEdSCFKQKKaDVBgQmgA3gDRIPx8lNBAdpI8vMalCEglbQFWe4rjdYRb6Wrs8qCDygl1TpeQAceCMqgosZgiEESRlAKi5XuSZkUk5MMBingpsRwW73fLnilkg5S4aMTJl+lJZgEWRczEEGE4XLnqYjprrjtkkTaPYfm4/smpq5LE6JZ9gZG3RImJbHJq+koXUeWeYlRjilGRVRGkmiUFMzy+7loyYi8nEjS0alb2GruFEh5SGuYlGhgqQAG6WSjUnRtyQeFZy5uJbjhdhG6Xq+XReYx+i6jovLINf95jIjKDFrIENvDkUSMvZWYiWFZss/O8ejRIyGEHSFxjnXJMmovV7jGWNbrdVqchgVCPE0z8zxRFCWTmxnDTF0V7Lo9V88e89y9u4QQcZNi9+5TLk5rzk7X2KrgwfMnOAdXV3t2u5kYNJODw9TjoqIfAhf37rE/7BmnDvAUhXh8H7oBNxsmPabrILBOUUAtmUfhLizXFpYEmwO1cw5jDfvDXnznQ0h+H7KszvDz6ANTQqflxbJwurol7ooMj5yXDC3O+mM/bE4kZMWG7Pee9wqr1UrGXtOPWDpbq4CJDl0LtnZ8+AGqP+ANFE1NcX6X4jOfYSw07mu/Rzn2qcKKOGVxF8+x/skfxzx4AC7im4aCKOONkEYoUYJv9bnPgDVMT58Rh5HypMI+uI+t1vSHnuLmCTqNNMRYOweDoxlSkKTkVIn55Kewn/sE+BH3/kOCC5QXG8pXXiEUBYdf/we0wx4dHVFFojapnRT4XCgU+mQNdY273uK31xClyjcxoq4u4foxqBnQBFthN3cAw/j+B6h5IiiZh6pgUD/4AWbYS9JC8UEIvDVPKFOJVoyFceyxZU3EgPdsmjXTLM5RKCG7eCeLbA1QiD5Md9NxfnqHyTlMYQh4JhchTFS6wpYl3eFAUxm0gs4NYAumWRbNbVMzekdtamJaqmOgsIpplGV3WdTMo5P5bBxwcUw8iDJp80TquqTb3dCuV1RWE71Dl5rRO+bJYazYaGbEjlZK2vTkE1GWJS46xnnCxEARZz7zynO8cNHgxx0+Op5tt/zgUc/EBihZrU4ICnx0oJwsjYsSN3vWmzUxxCQoV9AljsZuJ37PWmsmL1JMhU37gcIyTBMGDV4TUYxhZrVuGPqeuihw00Rd1cs82PmAMRCjT6xnsXv0XuCa4zBiEix3micxh0mZYRxH1mmnYIwR8IX3zMFT1hVDP9K2K+bJYbVl5x364g79Yc9pI8SoDKHNVXmu8uFWNNJ7cevLTOtjklOuLPPCMqPQMjt2MeexCYmm5JoXZSJnas1h6NGTEOWyplCeeWckU4yRO3fuLIvQvPeIEabJobXh9PRcAnV/QBmFLiy2rLn73Iv44GlWG+GMEOm6Gw77S6pac3bWsl41VPdXtGXH1eUBHTWx0RzGSNOs6Xc7VNSUusFj6IaeEEVCPpSZXJZG2SoTChMDfRgWo5wMxc5co5yUgw9YWxKj4rDvaduGoqwJ3lMUR6RPY+UfpXBR+BDNeoMp5Wc04ueeCYcZOptHhHnxfDgc2GxWaYkuHixFYRZCq3PTglj8kSWFutqQBq7gZ4ZvvkH1wfsEE+mtxdx/geanfobqY59ivLwhfvc7mDASjCK89DzNF7+EWp9KkFOB6BzgU0D3OB0ZT09pP/cZlNEc3vh9/PfexM6Orq0of+qnKF54GftjP0b4rd/AjmMaZ8DSa2e9jVQRBqVx95+j/fSnwI0cfvs30e89RgXNbrOm/RN/AvPgAeWnPkn8+u8TnUJ56W7wjhgmsdJDocOM2l4yv/FN+MGbFG6Q949KdiJBAhERdLFCdQNhesjw1V+n3d5gFAQ8RIPyE9oHiUDeEt3EMM/41hKVxfcjw82ee8+JVDFRMR8GgvNiQelGjPf4acbFQNPU+Nnhppm575ntgbKRBXxAlq4heqZxT5gtbnIMyP6jMhY/z5TGMqEIfqQwos652+9p2grtI9vDlraucbPHaensxt0VUXlm5Tn0M+1qgwueQME8HJj8iHGWcdZE56h9gdeGqxhReMpWlvvgCD6wHXZs1mtm59Czx1QrZlXivcOGnrrbcfHiCpOq/Turc377nXfZq5mLu68yKgva4sOIVoBzDEpTrRqe+BFtjYzcQmCMjrVSDEVFX1hmP2MKTb1ZM0dPmGcikdEqdAjitKUhUjASKU82PD4caDYreiJOKVaNgpB8jU3JrBWz0UmjyzJai0OgiHVZMDow5CWkQrUNqiqJlQAKXCGiipHAHGFSmrmyOA2VKbCqYp4chbYLfDUjUs7Ozthut0sHkUcImStydXXFZrNZAn3uUjLv5of9HhbBOKBq6oXHIUquM4UWYx+VHBkLVdB1UvG2bbsknTzbloAq3JoQHEVR0XXDh+xbMwYfKyJ3elSJEV3RdQcmL818mAParVGxZJxGnj7bMc0j67bm5KTgZHOHvp957+qKaIIooaqKeTZ4DIVtqMqK2QtfyaTOrSpK8ZaYZgqtCU7GsYURRFrmlWSpidPT00XJIALoiLE1zarFpXPn0nzfBU9tBLjQDz110zDOM7YoOPQdUclOa54djakX+ZZluZ+Wyl3XLTwZYBkf5f3GZrNZdkM/cpOdMIMyepm32tlTTSOYCNPA8M5bTHfuUn3mcxQvvY576130LKqSuhuIN3vGdx4R71zQ3H9uYVfK60ngNWcX6PUp/uoG/d03aQ57dIy4sefwzW9TPPcC5dkZY1URp4G8Jv3hvWJMKJKgoLh7H8oC/9Y72LffpZodIWqMG+m/8202P/MnqM7vM5kaPY+SEKaBm69+FT+P7F59hfn+A9Qb3yR+72242mLffZvVe29Rd3sKDy5qSRDK4ZRmrDfsvvMt/AsvwDBSP31E+d671Lse6xRBeVzaeUy6wKxWXLz0POG+zIur9x9j3nuP5vfeoAqCTFHKElBgILiJylpCEOemPC7QKrW0pmY4PcM8uEDfPcd3E83Dx9RPH1O5UQKx0igU2hqZrXtHDJ6prDg89wLDq6+zsQ1xFr2dk2Bx+1EkiIeO8P5bbB69j9WWq5deJNx/DhWtWGbGGa1LVGWJo8N3B+rvv8nF9oar81Pcl36c3lq2ux2rdiWicKuW8zt32G131E1NDJHxsKOoW7xSUFY83e/ZTY5VjBRK4+bAJ19+iR88PTAPWyY8c4Ci0BQE2rLCzYG6tMR5QhcKZ24d0kpt6IO4dJlCEdxE191QFckS0XtAUVQlq7rian9DVVYMXYfSFabUeALeeayRNfjsPLawIu3QlOx2e6qqpE7eCu2qZfAOVRiUtxSVyI9UdUXX9RTEFHAddVOzO3QYaymtxQ8DahSUlVcKZS3OiRTLlNzYMjY9B4LMjM+joCyVnjuBzI0Algo+J4n8esBCUFytVkRupaeHYcCmDqA4+tN5sd3d7XYLoTQnAyDBXGWvkDkgxhim5I52dnqegpgowFZpUT5PE8ZYymRfqZXB2pK22dDvtwQsWlmutwfG0bFZ15xsVphS87Gz57ja7jnsHH3owBkKVcsOUxlM0TJOPUWRpW9EPbdOi+CysIxjT/DZYbJY+AXDMCxjNGOMAGm0WAgYW6K0Z/ay2xNttQqt7aKS4GbpmvdDR1PX9P2Y1nZqkanJXXWGvh7Ly0BkHCVxFGVBm/zViVGW3m6+BZX8qJJCpS06+rSMC0TtE2JGOAVVDBx+8D2qV19Hn50zb1rM1Q4dFVxeMf2DX5OF65d+Gu7fS6E7QRqJ6Kixq1PQlnB5iRkOoGZiECVMtb2C7Q6aFtc2mP0VJi3c8teSJFRONGBWK4jgnz3FupmgA8pDERzT1RPoe2hPmW2F1qMsmKee4tET9Oc+xav/8/8V3L2f3iCpF15d0X/j9+j+1t/C/t4bNPOIdCaKqVlT/nO/yMs//3OY114DW8Buy/yNb7D7W3+X4rd+m3qeREJcFwwf+zgP/oW/yGs/8VNwvgE/E99/wvCPvsbur/+n3PngERaHnCVLVB6NSxkwY97ErjFGx2hKhi99ifZ/8ucpf+yzsKphUoS33+Hwm7/J+J/9Te7sb9DKEaPFK4UJfkFX9ed3qP/SP4n+038aqEHPSEeXzZQEjRPf+jbX/+7/her0gk//G/82PHf/lvUSpSOSnYyGaWT+pf8v8a/8B6yGDrf7GKM5wRc1TJ5ZWVplGQ4D5xj8fkj4csu872W/YSKbuqJ/+4rzexsKPAVQ7SJ3R401gl7TZUk/9KgQqCbZV61JPr0BJjeijUErizmMnFQrmeGaiFER6wIlgUN/SDaXDcp7mAZaXdNfd5xVBcZFxq5Hh0hlCjCGfT9hmwptRLIDpbh79w5d17Hfb5NBiqNta2Y34d2EbWqwGjdNNJUs0o1WmNLSdwdW6zXezRA8ZVWijEYnL4Nu7uWKKLswxY9d9zLZMbOPM8kvwxNzMohRrEJ3u91Csjr2Fc8SJhk1VCYY5nCkI2ST+qexMgqRHY0smHPHUJYl6/UagOvraw6HjqqsRYcr4frL0hBDRKamnqos8VGkpVerFaEW2GuIgbqu5IkPkW2/5fT8hO4Ao4tURUE/DUzXjuvdFadnG9p1yb3zM84az811x37vGMae3cGBrQkYjIZ5llEZMSwFZ9ZAKss1wzDiZr+QbvMCuCiKRen40HcYWyxL4Rhh9o4mjZBCELHPspKua38QyZi6rhlT8hQjJvDOU5a3rPLjXUJWpXVuQhsEatvPi0OjS3Bm+P+Dyc40jdRVISu8GGWZq8RxDRQ6eupuR9xdo+4+oDw9hasnoBTGz2g/Em1JjG4JoBGNDoCaUUZjTk8E9XHoUDHgdcAgbMjCO+KhR53eQZ2eEZ88/CHYIYKekb9IQqgq9MkGQiDsB2yCvdq0JLbjQJxGWJ3Iz3XXgp6RQQ/7umXz3H38rmf61jcw3qO1xb72Os0v/BnK1z/Bs//Hv4f+vd+hnEd6WzH/3M9z8S/9q1AWuDe/R9gdKJ5/keJP/inOX3iFy/ffwb79DhrDs5MN5//6v0L5p38Bnu6Yv/YHqEJhP/4pmn/mf0yYRrr/+D9m3V3Lwx/z8jQChqjMggQK0ROUYfviC9z9t/5N9Kc+Q3zvMf7bf4BqVphPf4rNqy/jn17T/b2/TescOr2eUhGCII/c0HH4rd+kshata7wVhJWKBbMx2FdfpX7xJeZ5Yr6+wu969G/8Bub550FFvBF4aogWt25pP/sZdFFyuLykCTOtavjMyQWhSrP0CKEMGGsIq9ubVimZ4YcQEOUcCAbmWTHsG+L9u+im4oXNJ7jz9IpwdUU9O6wHbxqCTcvn4DGzI6DRvQNl6IPGrTfUL7yIaRoiEdd1jA/fpj4M2NBxTwUxLgmOrmmwD56nWJ2mqiww7a9x8wdUhwP1MDFpzbatuFaR6+5ATBX/br8XOKO5dcPLgbZpG7peEC2zm7FFLV1uiKLJ366IXgqv2TtsWUggVqJ5FLxIy9e1LP2PGdUgmleXl5c45xZL0CwDnhErx85rx+zrzGzPLH5jjKCSovB6jLUUCW55SKOerBe13++xVb0gYEIQM6ecVNq2TQz5mXkSnX+VEknmGGUs/nQYWK8birrBp85B7F73KK0xSSm1Lg1Xh6fgoTANc1TEUOCjYx479v2etorcuzhl1VRUdw0n65lDN1G3mutdT1Ou2fdiGjQkV71jKG+e52fgXNu2i1RMhvU65+RcFiI6OYwjZVlwfXXNyeaEyc2UVZ1Gb4XAdrWiXa0SXFh8aeq6IaJw3iWjpWlJ/KT7KSs3ZARUoc1CJAbSbiEscjeL9NCPKikUq5qQVCWJGk2BChqlI6hJYrPThHnCKFFQrUl7XyJKg0s4XJTCa5ndKx+JFoLW6ELMTFzfUwSNsib9vsLOHj/MaFMQTSEdSNJRWdJCQregEwpHKbS1MkfqPTpoEjtFktrk8fOIKRTOaEoJQ4BANqMDUIzf/UMO/7d/l/pwQCnDzXMvsP7X/mXOfuFPc/df/8s8e+ctTp+8z/z8XU7/8r+EKhue/dX/hPBf/x3MMNDdf46zf+vfZPUzP0vzz/+LDP/P/xfVNFP/4p+h+JNfwr3zHk/+r/932je/RSgt+5/6Ei//O/8O7T/9T3H9+7+H/+1/iHZirCI1u1Ts0hUZOQ9aM9ctq7/wz6A/8Qn6332D7f/736d99BZjXeP+yV/k+b/0r7L5l/8il995g+LN71IF6dCWjkN5VuOB8Cu/QvjKP2Q2Gq9mFIYQCp7dPef5//3/jrg5Zf9Lv8Tm8pLSK6b/4N/H6QKlAt54XFBMpqX7uZ9n9clP4t/+PsNXvsLJPDNVLZuf+hLjD76H3W0F2kuEMZn0EEErCA6vEr4qUeN701J98vPUr30Cs2rASnA9GXrm9z/A/87v0vR7CZpG3OR0gnJGJUC5WRfYl1+j+tyPYU5OhIcQlUCFX/0449f+EPPwHbTvUVrhT0/Z/MzPYs7OUKZO938k+gm/vWJ84/co33mfEs9YzMx9oFg3DEO/+IZ470XnZhT/aZf8QIqqxkcwRckcgjjJDQNGm1sXtCiyB1GBLQpiPxBdoGkbbLtmmmcOhz1VKSMqH7xIM6RRxnq9pk/SGe2qlSpcCUv99PQs8XIE5muMFsmXpK+FUsLqLkvmSebdSim6vlvMfeYky5z3C1n/yDm/CAwWyRvFOZ+MiiLtaiWjECMhyDkZGwlDWDqYsiyoqpah75icjI2KomAaJ9qm5frmGmssRVlAKdDqiCHEgLUNyggJzhQlRkM/3PDO21es2oL7985YtSLs2K5mVquBm+2BWCjQLbGtmX1gnLwUX9pgrU+IKEthy+SEqESVWIHSEWMVVVVzOHRM+5l59kQfWK/WaKXoxwkKIVJOWvSRssd1CJFhEJHIeZqJRSogFFR1xTiMEjuduxXUnJ3I4NQVwc9Lx6IQbTM3uwUw4H/UJjs+RogZIx9xyhOMPHA6Jn13lV8uCCwSKGIifiWSkYqCEJLRT0zA8rjMz1CiNCpko9uQH7WosELAaLU0CLm9yw+r/CnHIAxnCXi60ERcKrfl51R6CIjCrAWbJh5Wfj8CUWNmR3PY0x46otfY/fe5/mv/CSevvYb+xCcov/B5+l99Qvnxj2EfPEf/ve/i/t7f4e4HDwGoDlu6v/6fsvrxL1D/2OfY3r0gXF5z8vkvoFXD5d/+66y+/jXWrpe9wVd+g/6nvkT7i3+O6gtfwP32b1MirkxKGYIy6ORvHJWIEwJM56ecfvbzMAWu/ou/wfn3vk4ZHM1Wc/O3/y7DF79E9blPYz/1ScIP3iSaSUDwqHReFDpqzBxh7hNpykOwjHZm8+mfpXz5Vdyb30P/zu9QedAhUo4dKhoJ3srjMUwnNRc/97Moo+h++b9l/fQpRIM3Fv3cPXRb4/7R19DTDq2iFAgCRBNiGRFLQAVhljsT4d492k9/BnTB/O57TNsrdNtQvfgC1Suv0V/dMH/7DQoX0EGOXQExJn0jZYin57Rf+AlYnTA/+YDxybvoaKjuv4y5f5fmx3+CcXcD25GxUNSf/RT2zj3C1Q3d+9+BIJVddf9FzL271J/+NPPDx1jn0S7QVCf03lPX5ZFapRbNn1qWgbv+sDCLrbULp6FPUgi2Fny/LUuCD+yHDlsUXF1fyYw/RkpVMQ2iq3N6ekrXdVzfXC9VIomFfugFHYOHcRpSF1Awe0/wkU19wjCMzE44JVmscrWqBJmUXAQ7NzBO87LknKYJk7oTozXmSJvLWpvGX6I0oBKTb5pdInbV7PfCEj9JAnHBzUQlKCSrSeS5iVholDGs65M0t08IKicM5PV6xc3NjZBFvaUqSgpbMDuBxR/6A4fuwPn5OWV1SkhEt7fe39FWhvt3T6mrikJHLjYlz57u2R96dkPA2hZXGJQpsUmGv9CaMM90vcBD27ZhHMVTsigM/TDhvaZtK2YX5JnwsyihziMxiP6YEHwCbp6wRmO1dMV1KQvhwor/Mojr5S6pOse0BB/6W/0uZtkplcZwsjpZdkqlKZmHmTp1Jsej9n/c10cXxJucYOKjXsg1S3xVBYQSX64xbQvRoYJbUCIxk8CUEMQSeFSY0DoSlcgRuO1OgvVqJSiRGFBR/pytQdWFzKe314R0UhUeln/CEiyNivixwx/2sgxvFMEGvAp45YCZUBhUUcE04vfbhDwSb1Yhhs2yNA1C9CJ6DJ7aOzbff5v9P/wdsJbi85+nq1a4516BUBC/9W3M1VXixVrKOaLfeY/4wUP05gx/csrNuiY+/zzx5oD/5jeowyDHHQLr8cD0ja8TncO++ALO2gS/laSqg+xxoso4K4HnDusNbM7hgyfEH/yAIszoCDbA+vKK+J3vgioonnsRlCEGjdeJsBYUOsigJiphPBiQ8Y4J7M42nPzZP4MuFNtf+fuYm06qTgImLoM7AGaj4LOfpv6xz+DeeYfhH/4WzewFERQ8zIHqtdcw9+4lAtotk0t4GMiuKhhId9HUtJSf/RyUBdN3v0X31d/AvPEG8au/y/A7bxB9oP7cp3BnJ7iFwSYjqIUMp0C98AJqtSY+fsLw67+J/YNvYP/gD5h/41cJV89QZy32wYXcW80adX6P6ByHr/0O8Y3fwb7xe9jf/xr7r/0eYXDok7uYdoMKkRI47K8IcV64AIvejFKcnZ8zjuMyK866OIfDYZErWK1WZEOmaZoIMTCn6rttW87Pz6UjSCgWYCEunZ2dfYhzkJfBopVz++95HKKT+nAWYjTGLEE/v36Zgv3JycmHRheZMQ23ftJ5hOK9RyuVvL7lWuQEOE0TNzc3IgaH4vr6ejmuvPye55lnz54J+SxVtxnbv16vF+VXpRRPnz4TW9okje69Z588ImxhqZuaF198UY6xqrB1A7ZAmZLrfcf333qX9x8+wUdNuzrh3v07vP7xB9y/3zJPz2jMhHEHpv0NdzZnrOpTCAXr9UZGR/1ASITKGDVKWZQumObA0A20zQprCqwpiAHqqkVrS1U1hOQZc/xPruozGkw6NpZrmhfNq9xppXMqLHvNbrvn+uqGaZwZ+hHvAvud7MeOR+3/uK+PnBRQsrSTXzHoYFCBxMCcCNrjLy7g7Bx2O9T1jbBp08Y35DFBGvrLStih8Lf8gptLcDNmc0JQFdoVwjfAMpcNarOBocfu94IdTwim264hzcdJ5K8QiNutLL5OziAWWG/QUeGtYV6doKoV7A6ooUOlJbpILISUGCJReYK6/SwqBgrnGb71LYiR+hMfJ967i3r942BK9PvvUgUx41ZRgmbR98yPn6A2p9iXXyQ+uIe6dxf2W+qrp9gQpExWAoedHj0izjP6xRcJZye4hGUmdVCA/OmjyIMojX7xRTg7IT59THvYohMJL1HwmD54D4iUL78ChRWGcUwJVQNRSUBGWM9RwLhCQPvC5yk+/3ncmz8gfvW3KWcRDpQyMKbEJMc1rVdU/+Q/gW5b5r//K9SPPkjMaQ99z/z0GcpY7OkpIerEUL8lsSlIHakkYhUjplhjVhfQzfjvf4dm3lL4A43rie+/i7u6BluhVhtmkzoOTLpmHnQQhJjrid0l4d3vUfY3lGGmDDNhOjBub0TdpDJowLqImmXpqc9WYJTsuoxCnW1QZQFTJERP1A7nZmpdoLzIr4fA4mPRHcRX+6WXXkkmM2YJqtmrIKN/sv59ll8vy5LLy0vpJo7UWBdtpMT3uLm5AVjgicd+z2VZpWBubo2p7K3fRB79wIdlw7PCZ0YVZZVZcSjsF+G3fVZdTQknSzBkgb68T8lGV33fs90JTDaLbGYuRNM0rNfrD8lMZwXRbFKTxeVOTk6WHUrTNJRlyenp6bIozzIPAIe+ZwqOy90OrzRRFURVMnvLoyd7Hj3dE8sCU0aeu7/mC59+iZNiwIyXnDeGud/hnaMsa0A+W9M2okJc1mhTYIsGWzZUzRqlLc552nbNOM7EqKjrlhgVzgW0tuz3h0WJNkumZ2hx/tw5QWfpkQw7zmq5OZHkxX+GyeZFdL62+fr+cV8feXwUsxSB8ijtlypfyKaGuW1pP/1J0Jr50RPsoYeoZFeQBv0xo1fQ6KBzv4DyiqgDc7+j8BPmzhnjvQt4/BgbArPRlC+/DG1DeHaJnmYBQf13LE5ikJl0ehf8fosF7PMv0X3/e1S7HTooBmspX3oVqpLQd5ReFuBTUcLFHaIu0cFAqp6DihCF1IQyECPjzTOin0WhM0biugUTmW+usF6Yv5nLoNzEeLmTBW7dEosKqgo/XKIPXaqKI15JfxGudoTeo22DMhZByClJwqlriAqZq5M0lNoNFAVuf0M5DoID8AGlxbtgf/2MEyKm2RBNDezQaVQjCVCuUowpOSlBTYybMzZ/5s+BLel+9Vdpn3yA5TY5obJ4A4zGED72SU6+9CX8+4/o/+FXOXMTEZ/m+g59eQVRE07PiboA5xKkN3GQlYwjheor3I/Y1OiyJlxforo9OrGQIw58B88uURf3iCd3iA/fFRRZIkWixLRHq8j8g+8RHj9E73qKKAWAVxrfnrC6/wA1z7jtXuAN3QH33tuUd75A8/mfJLz0KvQDuqqozs/Q2jO/9zbsOwFNGMscpGvLlZ5Jgo/ZZ+HJkycLiSgzTvODnFEs+WHOukOHg4ybMlEqB0S4hdfmQJ0tQnMgzMSwLOomXYUki6GXICI4d0GrZDJZTki5Qj0cDkslX1YVzs2LMmtOXNkTOhu67Pd71us1Nzc3S5LIGj1aaxQRUxbiHT0MRAVnyTHRxcCh76iSVP0x0S53RtvtdpF6sPZWowmkm8nWmBlKO4aZYRxoN2sUsK4bGSc5eYan65Ft11HXkdO2ZV2VfPKV5xieh8ePbnDzhC5LMJZxTui65N9gtIyOxG/cYY2haVrmhAbLi97M/8iGU20rBMp8rfOCP3d08zzL/R/jre9FkhzJoIKcaGfnmZxj9hKrq6piezjQNi2mKGW/+hG+PnpSUCJ9gNKgNf7sjD54JquoVhuqV17H3L1H2O0I3/s+pZuTNIwkDh1lBJNRJRJgjYwTokFHCJeXTO++R/mxj1F88acYvv19+sOB6t451ac/CT7Qf+d7mL4DrT+Mu5X8wiIpgJCOpvffwz1+gn3uOYovfon+zXdQ00zx4j3K118jdB3dd79HNTvG0sKnfoz6zjnul/4uQVlQFhULTExEoqw3hBYJDi1ELI3HWkA7TF3KbiCfO8AbTVlXgEcVlrIoRWjNaCgK2RMoJ9IdylLUNcpkjRorAmwq6zHpZb8ToqiUEgURAkr8Za0iIEQc0njJtHVq9DReF8RYSOcRVFqwzyLVEKWbgsikYP7sZzn58R8nvvMew1e+wsWUmNU6QjRyDAmVc2hb6l/8RWjXHP7LX0I/fJQ0e1KgjKAPB2JQqJMNVBVq7mW5HKVDiOpI70cHvDLY81OoDGHsJJGF1A6oiIkO1e+IVmEuLgi6QMfh9uSTpFYUVMOMGrYIGkEKiL4sqT79aXTd4h6+h3/0GIsIJ3ZvvkV8+TXKi3uYByt5wahRKuAun3J48/usgydqTVCGer2mt5Gx72naFmOKZfbeDwMnm03icgSGcVjc2nKg6/ue8/PzD/k6544iGyzlBHE8ZmgSDyIng5OTE3a73YekL0Dm0zkwZ40jkUmouLx8tujnNI0o5Wak0vF4aZxuq9r8/ln36OnTp/R9z3q1oarqpfPJxKmsmSTHGWR/aDQVkdk7Lq+u2GzWtOsV7aql6/qFK5G9THKAPD8/5/r6OnErqkXr6VgV9NaLW+xBlZbi1HvPFCNV0TBNAaUMRVkx+4FpP9Btr3nx3jmbuqAtPHfOK2ytue46+ikw9TNFVYsx0TRTVS3GWLQVJWTvRTG3SIZMGZoKcHV1xcXFxdLFZa+SnMS01ux2uw/ZB3vnUVoQWuM4Q4xJUmNevLWHlDSV0czOJX02hQupQJjGjxTrP3JSaJURxIEMuDj74pfAOyTqaKAg7K44/NZv0lw/RTFJ0IyaoGSpbDQCSQ0ThUo+tUaCtyZSjwPjH75BcXZOcecexc/el6RiPERP+P4PKN5+Ex2d4OshCbEJdj5tq1MwEORKedgyvfEGdnVK+cKrlM+/BjGAncFF3De/RfXosQj2VYbi/ELIbloTUtLJtbhXoI1G4fDasn7hZZQuibNDjSPju++z+uKXUGd3mY0BP0riUwpXFjR3zmAc6d9/yPTBE+LNDlPXTOua0kSBTyqNx8H5ClVr3FuP8deySJPPFdPITNRIdZTuSFno3n+P875Dn5wwVA0rxGpSlria+uQCIgyP3oFR5q4RLzuhhNpCRZJkGipqxrZm80/8Wahrdr/6K7SPH6WxEWlcJCJ7hIjThvjxj9H+9JeITy+ZfuXvczIKjDAbzhs0DAeIkWAVkwqUSgsajDQSU6SkpFFRJDCcDxTKM009KkRBkilROdIB3DRglOhnZfG+qFOXRgIwxCzWZ0jyUczGoj/+CcqPfwzGjunrb1AOPQpwpqB8+VXK9RrmiXj1BL/fY1YbuHMH055QvfIKbn+JcQ6tK4JzeCKr1frW1jKK69l6tcZa6YyUsdhCAnZ2zsoM2VxN5kCcg3+eKef/ngMfSNeRf8d7z24n+7k8mqiqUiTNpxmlpJOIYUhBlsVnYbfbLfDLqqoWyY2Tk5MkvibXqSgKmqbh8ePH3L9/fxkBZYZ013W07epD5lnHY6mqqgjRf8jZL4RAmcYg291uwe2rWCwcitzBZLmMPJo6NvqBW+vaPDrpug6sWMlarZnGSfwVigpjpQKPSiS+m2aNDiNPLjt25sCdOxuqtuLeyYr1NHPoZi4vDxz6HqJm3dbEMFIWFUrDfh6wppDFc9dRVXL8xpokrgcxeozVC2T3eNSWNZQgS2PLDmccpoXJ3DSNqMlGRVnWdH0nfI8YJGko6A5iXTCO8wJm+JEmhfU4Q+xw3/sOxo9Ek7XRIU6O+eFT5vffo9pdYsKM7KMTckghixjnCN+XSj88fg+jZAwVlSyjtYrY3SXdr/8qxfMvYB68gCpLYndgeO9tzAcPqade1tdKpVFO+qD6aCid9Y9IlK9HDxl++b/Fvvoy+u49tAG/u2J4933sk0vKeUJpMOPA9Adfo7h7QTGNTIo0vpiIykk1nVQy+/WG+ie+CGjmN/6Q8uoG/YO3iLMivPIJprKBsYcYBPWyOqN84UXC9Q3q0WNW11vU+w/hs59ifPAc7fe/T5HUNafSol9+CV0V+Hd/gB0GUZwly02nPcqy5tCo6CmfPSZeP0M9eJ759D7x0Z7AhFYKb0qqVz8pI72336ScOlR0aB1FlykKskuE2wRA4Ixl/syPsfmJn8A/esjh1/8B9+YxvWlMUuik8ZNm37Ss/gd/FnW2Yfs3/hbl++9ivbu9HkG6F5oKtOxblE4fJI2OBLwgiDa5uQxGJTFrH7FVKR1rAgOICJ7FVhVEMCnI5ZGfUix8DBOFp5CTwlRYeO112s99AWbF8K1voC+fYKIjaAj37lF/9tOgA4ff/0fw1nfR08RY1vgXX2XzE1+k/tSnGJ59QHjvbbQP+HmiaBqePXu2KF0aI9Vi07Rst7sEE+2IUcYom81m6QryuCYH9GN/kDyiiTEuuk1t2y6mKsdyycccgSwv4dycdI+ahTeQX/P6+pqisIskRa6uj93XctDN4w1rLffu3Vv2DXn0kTkZx0S6fIxwa0WqEzLQFgXjMCwib8euftZY6qpivV4vx5T9rHMgPfafyF1LVofN3IyT01P2hx37m610ZyFSVA3d4ZCkVsAlfsB+P2I1FEXD7rBl7/esViXrlWGzWlEXM+cnG66ubuiGGR81N7ueEDyrk5KmVvSHjtOTc4pixXa7paoEfurciA8T4yTqs2K1aZbRV06aObHlgkCACT6NyWQikHWimqYQpQEtSSGEuFxbcZycRH/rRw1JLecAamJ88ztMD9/CJ2SHjlGq8tlRRKkeJ20lcCXFR5NjtVbEZ8/wzy6lY8jIF1SyBBS4qd1fE75zw/yD7+JNAbNHByFRjdnBCi/IlTQuykkgHjEX8mLbRIfZPmH6w2f4ohDjmNmJjDSBuciqp8DuivjwXYp+SMVrJGpFiFrGFVqzKxqmn/4y5z/1k7C/Zv+7v8tq8oRHD8GNVJ/7NDevf4z+GwOFn7mpa4ovfRlO7hC+84dU+y1mOBCePUG3P8nJn/x5dr/7B+j+gFNwfXGfez/9ZRnNfPAYk+aWMaFoIHkW5L2C0pjoaHdb4tUN6sWXaH/mZ9i9/S6r2THqSPex17n3mc+Cc/DoCSYqZmvpL+7iNmfop5esdtcU0UHwqKgZ2pbiz/4Z1PqU/r/6OzTvvY+JKkFGpftQOKm4tcG99hrlF38K/+wZ7pd/mZNpACUBKi/Rg9L4tpVO8XqHGgQ5EZUQGlF5nx6lKk2L7Hm3pfIBXa9w2qQRoWRFrw2qaSEG/PUV2s+C0oqSGLJweFDI8USFsxX+9ddZ//gXQCm63/8D9Pe/SZmgjAFF8eA+qmmZ3nsbvvNdGneQsdnkGH/wXfzdC4rXP455/mXCo0fEGPCBpBW0WmbnWaROqWFZFvsgi/ocLHMiyIF0gasmNnGuiLP5+zRNi09vfp/8O8ASIPOCUroRCT6Xl5fyPVOm47pNRHnBmYP/D3spOOewRz4I2Zw+dyh5GR0D1HWzsJnzMefXUEqIWUUpHVKIke1OEqYP0qOLlPdMlZalGYKZu6XsaCaveevdkHcX4zguvuYhBDbrNfv9nnkY5ft9R2EMY/JLLwpLCI71psEUJbvDgViecT30XO0OrOqBdWW5e7GmbQ32fM3sIk+vt+ziiFJwc/kIbQru3FnTdfuUhNfpPES8n6UANQpjFLouxWf8aEeQE0JOaFobrq6uF2nuzWYjvJHM25gmbFESomMeHaTOeBhHxknOgS0KKH7EOwW0zGQPBg5+xkcrpGYiaI/TE9oUBB9xk6OtWkGAKCAJStm24jCIHaaMeqUtMg5KUzAGl1AioLSVExi8zAGNQL6KskIYxx6jBOZljGYYJxF+y2MIlQ3rEeMZpZkBYyJkF6ik7++J+GDQaKxR1AjDOiiRj/CbU4ZPfAb6A9Fa3E/+JHf+/P8I1hXT3/4Vmje/TxE8h29/i/3v/CabX/hTnPzP/jLDf/5L9E8foT7/OZq/8BchBG7+7t+hvHxCNXue/b3/houf/lmaX/gF/OUlu9/6KrqsWP/in6P43OcYv/sW+1/7KneCF5LZ0hUct4FKxikqUj695uq//hUuPv1pNv/cP8XB7+n+4BvEO+es/vxfwLxwn8NvfIX+61+nBsb7L1D+b/83nH7m8+z/zt9m/Cv/H4rdDToKmS+8+jInX/4y7tFTxl/7Ne5MSV4j+Qvo9PeoFENZsv75n0fff8D+7/xtzFtvYr1PC2ZN0BaNGP2E83PZM13fUE7Za0ASXNCJuqJiSgheQM1DTxgnzOaMUK+I4yCFAQpnC+ydC5QP6JubJMeipXBRHhsDRI2OBUF7JmOIr7zO5se/BDpyeONr6O99i8rNC+rJxDSSjJHxekvhQTvhr4DCTp75+ooChQtKABd4IhZUgff9EiS7TiQMpmni7PQMpaDrOrSWwJqd5qqqpqkFOeKVp7AFPviFhayUWnzQ9/s9TdN8SN6iOYKq9inQ5QQhUFFJ4BnhI4tmSfIihcHSgWRoaghBDGq0vh1jTP1SzR+bAhljKGzSP7Llciw5mR1XwPlrGMZ0nDIvH1LAFnVXscxVaYeQSYDjNGGXKhogSqBM3gLOucVO8zh5zfMEIVIXVeowRlxSr41iDEJRGibfMw0dZbPh0I8cxsi62uBD5PJyT2lK+n5Hu1pTFAXP37vD+ek517sDj588o25qfBypLPjEqq+bRljrwabgPhKioSwaQiDtXOJSJAQfFnivtSX37t5fDJNSJcg0zZycbBjGgaos2XdTcnSLFEVJjCHdZ3kB/yMeH+VZxbYoedRUOKDbH7C6QFvL7LXY6AGTnli1FcM4YKwlKPFEdt7jCrts48W2zmBKLSJTUVEUVap+AhhxVquqinESV7cYYkJaNElQKjJNI2UWgEo0dKU0qqpw44A1NunORJQW9zVbSJIJ0WJswTiLG1WjFWc+ckagXa8gKpovfJ76//h/ABcBg6pKMDPDf/PfsP/3/wrnhz0qRpr9ge6v/jX8x19j9YWfZPWpzxMZUUUBvqT/yq8Rfu0r1LOMVKqvv8Hwn/9NVv/aX+LkX/iXiH/hn5ORWFnBoWP31/8am/ffTeY7Sb01X9iY+B6K5EqXurZf+3XGL3yS6hf/NKf/xr8tUg1aQxnxD9/i8Nf/GmdX1+hYYF94iebTn0C1Nc0XfozD+QZ1uAKvmaua8s/8Imp1wvxr/wD17tuyi1EpVivSrsgQosI9eI71n/qTsN0y/No/YJMWi4tPQZr/x7amvHeXGD1+f8BGvcCKbbhdNGeCIUHIeXEYiYcd6v7z2I99nOEPdtg4ivXigxewF3eIk0PtDygiozG4e/fhpGV6+JB6fwBg1hXh9U/Q/sQXwRi2v/819Pe/j46y0EYZkVBHzI9MVNR37zFUBSqOKAI6WKZqQ33vRYTPMROC7DBsqbnZXomkwU4WhyZ5SLdVjZ9EQ8gaTWEtox8xyjAOE5VN19VDaUqx+wxzerA12+12WZxeXFzgnCMbz+cxUK40j5NR5i5onVjICWEUkldAnQTd3OwI0VFWBdM8EJEqtKoqbm6u2ZxsGKdRusQE9zVahAmd8ygihZUOJIQoy/ik4ZPHWpnjAGCioSwqQgxURZ08FwRJFJyMamMQVE1hxQ+g6weB9BpLVUtw7Pue3X6/jJNslvc2Bud9AmAgz0jqgFyMFJVoUAXvBcmoNdoW7J7taVcr3NBjgudk1Yj/eQiMpuKDvaMaNPpmx92zDaermtNa0+ia589f4ur6hpttTxigaU6Y0Hhd4qJCVyXzdGBzssHPg2gWaU3diNzIdLNFRVE2zvuEbt9T1RLf1usTvJ9ZrVeMY8+zyydorQhhxmiRdpHOVMbGq6aiP+xQ6Xs/0qSQbzavNIcQwRpiXeEwFFVBmAyxLNFaUVQV+2lClaUwHWOPqmQGqWLkZrtdHJ1Eu0MEwLz3eK2FPW1Fd8QjMMfZFlRNIybsa1Fq7MYJU1im4Bm9o2mbpdUVqF+kaFrmIFVMhneZqsInqFpd10zaMJsSS8TeXFN/8zuc7Pfwh19n+OW/j3r5eYEZElAhEt5/iPu1r6C+/gfcuXqW3NE0hYP2e+9w/X/6P1P8iZ/G/PhPoJsVPH7G/Ou/SfzaP+Lk5hoTZUG0mWb2/+V/wdWb36f6+Z9Dv/wxYnDEb3+T6dd+neK73wY10xVCWMt4q6BljKSTzAcqKZ2iabprbv6D/wh+72sUX/4y6sELxMHB134H/9XfoPzud1EEnHEcvvcGN//Rf0j5mS8x/be/TPn0hlFXIjlOQH/l19k3FdMv/S3aMeKUBpUc7bTFY0TnXXuKq2d0/9V/xawV9pvfQBGYTUmMnmCS50XUzGUL9Zrpzbfwjx5SxjmJ/IHKy+PcjCRYqkJhh4HhG9+gvXOH8tMfw61ahqfPsG1L/dqrKKMZvvt1uHmCIuKeu0/75T+FKmv8+28TvvoV0e157eOsvvCT0LR0j58ym4LqY58geOk7go7Yxx9gb66ZHj3E9J/CPrhD8wtfpn/nA6LzYArqF1/APnef2HeEdx9inCdUME0z69UajshpSkvlS5Q9gVYiP/H0qdgrlnUJEZGUSLwDgK7z+OgXrLnMhyXZ5gUwIBIaifx2rFt0jLzJe4nFEhSWxfVut1ueR/leXJzY8hipqm+XucGzGMYXSagtv3/WU1JKLYiq/LMZPZT/aduVBG+ll5HZ8U5AJT9t793yvb6XkVKG7J6eniyfL4/S8rOez8c8z4LkiTAO40J8cwnJhJLiSkZ34oJmjcVFR2Fv5/pzCGzOztje3KB1SaEDTy/3XF9esW4L7t+9A0TunJ0lrSnPs5t9UkD11FbhA5iyEjJZKKjqlu12n45th3MiZKNCpKwbrDZSWCqR01DqFkkliDKPD17IvB5CUOk6huV8tKs2FQ4fTRDvI3s0h7/y7xEivLluebep6KYRP8809QptZZ5ZVjIeMmmOWtU1fdfJjZ5kb324VevLFUxmVOZ54TiOt+12Um/MULn88/mmyTdfnokeDocFz9u2LSpCYQzeh+QrIJZ4T589oyqlStFFwZPLK4brPfcfX/JzX/t9nj9scUZxs2pxTYtoU0o1Hg8HTvo5jSWC+PdalRA0Ipu8bQr6dkWpa2zXU057WjejQkhLYiszdgLOKHZ1SWjPsWHGHK6wAfTHXuet04a9NpRBUyhN70bsqiKMjgpL1IY5OtqmIkwzg5to16fsUVyVBeX6hDhDfPqM+1px342cxEi8d4/w8ouM5RpvS4rZUbgO7SZiP6Dfe4e46xgLQxkmrMsPT4So8OsT4muvobse9fab4B29KfFGsZpGNIrQtvDSi4R2A6YA5fEnpzSf+XGGr/021c0lJniikjGQDuI1HXRMHtAkGG5EBc1cVMQv/DjFx17HVCeAFrXeacI9+oD5t36Tqr9GRY178Arln/w5aFeEt9/C//Zv4MuG6hd+AXV2QdDSESgcRA1Op2XGTHjvLebf+ipujKjXP031uU+g1zUKGXNAIAZH6Dqm73wX9a1vYcLEti1502quS40ti2Xe652jKsoFaXO6OcF7txjvZL5CvuezfHWIEVsZprQ0BBbEzTFB6XbMcPs8/fCOIu8kjpE+WbMoL6aPk00eu+Rn9TbBgNF2EbbLC+j8mrl70Vovz27uNo6X39mCMyeNY8+G/O95ed333QK5Xa1WVFXFbrdbzlNWJ53neemajn8/f0aNxAQpBit8cIQE11ytZP9RFBXjKEkks6iVUmy322U/USdfEasDceqpS4WfO0qjuHf3DuvNhqgi/STyJDc3O66u9syzYnIaFwsCJcPoUEazWq+Y54lpnBhH2bsEL/GzLEQrSxRtRS3WFoI867oDxmjK0grzPanhrtfrZR9xfD0A/ul/9t/8Y2P9Rx8fxQhKM80Tz7o9pxfnhLTh91E0//ukygfJWLoXpJBPCAjnxA0rj48y7C0v1IDbsZIW/9hM5Ml2fvlGlVZZ/5FkktUcu66Tm2uccOn3D91hqRz6vl+gbZvNhpvLS1576VXuO83KzaigmeqS+MqLPDOlYPeNxqb5ycNDT9msKW2JHwfmMBKUuCoxzrI0tQXRebxf4TmnqkoKZQRKpg2EIATmGPHTzAmGu2+/y6qf6esC9/FXef/BPdics7+6waBRlaZcVWgPN4+vZH9bWzZnLW4cKUxBt++pmxWmqThMM2M/Yu+s8OPIRdNifICmgVdfoW1P00IXUIFpd0l4+JCieAHtAjWGW+lsgfLEGFC2RN2/R+gHWBdY5yliCqyZqKYV86rFPP8KenUKcUZfPmH4za9QjAeU8jgtCVx5tYyRlmU6yIgvQWCLeaD7+te4fvddiudewbQV3g34p1e4p8+o5plgLZaAv3qX6Xd+GXV2xvzuI3A9gwL9h1+neu4FAUEwoxJPgyCfTxGYP3iLGCdKDe6t77B99j72+eco1hdQKAgj4fqG6dFj6DpKG/EYdgpmZLy5OTvh2bNnwk4uhZVqrGVVVUJ0BNYnG/quZ3OyYb5xgidXihBFRK4bevp9vxjPGGM4PT3l5uZmCX7HlXVOEPk5+eHuAlgIcPl5yjPsDCXNxdQxrDXzH5YAHh3jKKYy+Rk6Ozv70BI8JwBg6dzz6+f3z8927igySS3HAUky82IpmZFR2fjnGHGU40h+vnPHlYlhTdMwJrKeLKlnnJ/w3nF6errwG/L3c2Gal/g5vpDimbbC8SlXBYf9lrpouDpscWpHedNz97kL6s2Kud9zelJw3t7h6eNL9t3MtpuomoLCNihrGcaBYRg5OTmlKKa0VwkobXBRVGnLskClfY5zEedm1usVXdfR9wPrzQajLeM0Mo0zRlu6rqepRQV4szlhGPqPFOr/+42PEhFlU4ttnvee3bCjrErxNbaawhSMaVlUpDZynid8EEvEcRiXyiEvhXJ7lquSYzp9dpA6NvXID4T3YUkmu93uQ9VUhsJFBV2qZrKq4unpKVFBUze0qxYVDV/43Oe5ub6i3FRifK8Nl+2G3Sc/x5MXX2Y3OZQBFWfm/sA8zTTrNSFENJp5nggK5nHibNXy3vvvsjo5oahqmlqWWvvdntOzC7zzaANzfxCxqmh461vf5vPnpxRPHrPuBW47x4hdn3A9TxyUtNuqtEQDQ7fncu45v3eHmcCTvqetGgYXMKuWg/eoWRygbNPQH3bcqSytNVjvmaeO/s1vE5UR7kDiP3gcOgYGa4jWJmRQKQ5zMRJxsvTHUzx5hFOaYA1KK0yUaiSoxF7XAcaR+IPvg7HE6FHRiRS0jUwooi6XsYq0BqCTjWtA/J110ElY0YGb+Rt/8z+jL1Y8fPo++5tL/vz/8M/L/Tj3vP7yPT758jm6u6RQj6G/oS8nvn94Slyfc/2NP2D13nsyrnADdWnRQZBx3ntaUxHCSGklSZZ1JKqe62/9PtrUYBTGKoro8cNI0VQ4rYhO0Z6tcWFiVbQ8vbzk4uKC6+trCZAhoJVatPKtNcxOEsGjx4/TOEWsFQNSYGljKHW53MtZljm7b+XiKQfa/AxluYi8BD5OGnmJ23XdMnbKENIsoZBJYHlR3TTNQpira8HHK6W5vLxcltb7/X7pLvJ+I5PnsgR17gYEVST+y9fX18tYKbvH5RiQWb2ZxLXf77m4uODy8nKB8WqtxZ0t/U4etR0nB0gdkDULfNYHzzCMFIUkrP1+h/eOzeZ0cZzL5yNLeaySVSpAJHCz24kKrFasNifoObLdTyjlGNwzghp44bm7nK0qjHK8cP8cFzXvfXDFdb/FmIZxVGgMVanpD1vhCKlIVSXN5hCTgmzEasvZ2Snb7Y2I5KWxX9M07HdyDmRJHZfrTi3Q33EYycKZP7KkEFOnYLRc4MHNQhVfr1FaTLs9kW64bd+0kbGS8x4dI77vKYuCOMalDbb21kowzyJzCyktUrdUA9fX16zX60Vv/YcrkdxOZ4x4fs2McX7QPr9UF3XTCF/LWsZhxu921IUhBE9EfBd8qZjahr1SBFvIUrCquDrsee7+PQ7jIGqwKRDvDz31+oTH00D54otMaByaYEueXF1zenrO1hhsVdIPHcX6lCnAkyfPMBcXVHfPUYUWJUUqQoDr6xum0rA+a3GzjC10NOLG1ZaQZDRstAIRtaLGeHpywrbr0F7jiaiywo8jmfU9K83DqLkexRe5sZpu16G0FWbnPKMKOAwHwNLvOsqipG1rwONjwCjD5COl1jKv956mqMRAKCh0AfM0YrASEP1ENFCXFVUlZiNMUpW5kDgL3hMRH+XJ9VilaUf4xPoOKyw+eN7c7/nVb/0ef/oXvkx975R3G1l2rpsNb+wuWdNw544hqJGoAsVmzbgveLS7Ia7OeBZHylKudRkCSmucBltqng0dKE2hDBSaYTzgRsfJ6ZqqlIfP2lJ27pWhqgy6qsHD1s1QGoZR3MhutlvOz8/ZbbeUVUm3F27A3Yu7YsTedWw2G+kOgngv5K4gL4qfe3BvWQyvVqtFbiLvzvJzkp+Z/KzArcRFThjGmAX2mEcwp6eni7dz/r1jfaL8fG2324RG8kxpWb5arRbznezulpPRycnJotOUk1NmTx/r/ef3yr4Nx05mt45wMj7abDaLXWjmM+Tl+yJlMY6s1+tlbJS7CiHBqUVosK7rlEB0grHeJh+43bdklvHp6ekR6S5Q1iUhNCil2d7ccJg8yjZUtsVoxeGwp5smhsNjXnrulIuThsIWlEbxwkt3OB0djy9vCAG0rrm+2Qm6yxRoI9DYqm6IPuJUFMMiI94y7apdEu5ms8E5nzgLI+fn54gEd4+Mm1w6346PuCn479spgA9+udl8DLjZobTCFobgPavViv1hT1M3H5o3aqPT8kpasGPLv9ya5vZMpHtFBXC73S4yw+v1+kP0+q4Tuv3Z2dmSzdfrNWdnZwuOuWma5ebyXqjihUmz3iBiFD5oTFHg/UBwkwiwxRGrPLbQzG4ihkBbGXo3s1qfMnhNJDllKbh8/JTnXnyBfh4odMW+7/ERqmTld373Ht67VAUHbFlR1S3TQRAEn/v0Jzl0e0KMoAtitGgsbpoYjGLYjRSmwc0B7y2FttRVTZwjdSsaKyLEpmhXJ3TdIBDEqIlFgesdynlMGYTVjObNbmYoKpyB6AYwmtkHVoW4Xe26A9pWmLIklLXINLQtw6GjqFqGaSSaSKEQ7kdZMEwTZdVw2O5YVQ2xLCWpHAbKuhFhxH6gsiV+dBgU6/NzvveDNxmnkRgDDx48hzIapSq2V1fcfOPb/Bt/4k/RFBUxRK63V7zw4A4/97NfZD8c0HVDP8xynM7z7Xfe48s/9jLzPDL3O+o28soLK66/9QjTXPD+sxtcU9EUYv9YNg3DPKIjtOuGaXSoQovJelVSqgIfHJc3V1T1Codm1WxY1wX7/Y7JzagoicJGzTw7olLyDAzDojlji4Lnn3+eq6srGdfogsury8VXoG3bZccwDEMi9rHM2HPHsN1uWa/Xi7lLHgXlvUCWlvhhBqsxht1ux8nJyRL4xPB9Q1EUPHz48EMVeP5+3mEccyYywzkXcplHkJNTTjh5NHU4HJYRTO4YVquVjDjSa+d4kGfgOdgfk7hyAswjqrOzsyUm5CLx6uqK1Wq1xJA8nprGaekkslqtc3MaY4vInfgRVEuSyvDc3H2FECiswbuJYZAx7cW959jv9qzaBlxgnkaKsuWsWVNYzePLHbv9yMXFmvWqxBaaTWVoVnfZ7R1Pn+7wTaCoarp+ThDgChUkEdRNSQiKmMb0MXpW62ZZ+DdNS4ykJbwEf4Vhs1kn4Txhop+enn6kWP+RVVKzvHFeVDmXIYcsM7FpnhjdjC1LirqiqCuxoFuvmJxDW7PcSHnpFWNcKvlcZWTkxHvvvcd2u13MsEVONywaMCcnJwujUynFZrNZkktGHuz2O7Qx1G2Dj1IVTm7GBU9RFpjCYqyYmtRVRWHLZGIjMNmh7yjrgqAjUxQPibqyNLVlmnqebZ9Babj73AWPnz5mnGZZYM0j56enIkHhA/gZ52eMLTCmYjwMdNc3PPngfT712U8xz5E4a5QDgiTh6D1WKwptsarEmoJ1VWGVwvtAWdVELYzfstCoAi73N+zGgSkGpnlimHrCODPuRqlwFaAjU5wp1iuoCrp5YAiBgUioDI92l1AV4isRC8AyeIdtatG0taUwOdEEpag2a6K17IaZQSk6HzAUbNYn7OaJq7HDtBaMYooaX7bs+oHRz3xwdcl7T58wBs/p+Tl37t+jd56b7YGbbU9Zr5mCwzEQtMeUlr/4r/zz/Nv/6/8pr3/8RdraoN1IVVpuOulqHl/t6SaN8iWVLZmHPWdFEKXL7oaqECh01Wwo6xbvPG1ZYqNGuYh2DqNhmESbyM0ea2rONqcYAkYH+mng2W6LLzWzFz6MGyfmYcQa6eSKqvzwKJPkHR5lsaytwRYFxlqU0eLX7B1RgSnsopyaC55czWd00enp6ULgyru2/Bwt75mCbX6duq4XRdOMVNrv9xwOh2W0k/eCx0E/V6Zamw8JsuWZe95JHAf2bPhyLMWRJb4lQe2XXeMxhyF/xv1+v0wd8th4GGR+nmPEMZs6fx5gwfQfj59yIpTjy+cyL58PH4p33nuePn0KsJDg8v4khMDcD5ydnKCVYhino5GpYrVeoRXUdcXsPU6XdLHkzQ9uePP9Kx5fHRi6CeUD5+uCT37sPq+9fIfSTNQ2cNqWtJUVOZ9xwCYjn+yqZq1JI8GsfisjvLqS0V7wkXl27LZ7FJrNeiO8ERf4KF8fvVOAJCOhMFqWrt5LRdCsRKejKAqGOVXn6SaMUewFtdJ0XU/d1AQfORyE4ZfHQldX16xW7VKFHFc0IEilXF1k1mbWn8+LoDxHzDeaMfLQTdPEOEmiiAmeqrUEfVsUWKNx04jzHh8UMSgiFYYKqwvBQGvDSXtGf3NNQWR72BHRNPUKPzqiVqzXK4gaP460dYOfR4KfxXDETTJiP3SgI1VVsN0euNrueOm1lxjnDhMiJvEQvI6EIuAsgp03DYdpYm1ksTjOgbap0QW46AXhgtDwrdYUdZWQUYrZe1ZNhdEiie1UhMLixpHZCtR11aywCq62NzR1xRxHQqPxU2CcRAtIA4d+B1Ew9sYofPQMY4+2BfVKcOx+7pnwXO/3rNdnzARiv0NFx6HrkruYorQlJ+enmMKy2awxMeJdYA6B1tQ0ZUE39fyJP/VztE2DmhTGK0ptqFYVjx69ixs7tvsJb1owJdd9x6aseXK9pz2tUd5TpIfquftnXL63hVhgXcNw1aELTVFbJn8AXeDdvIw9T9oV/U7c9rpppogTZVOy23dU5RqsxmpLe7LisBvwylPbgv2ho1itmKeJsqoY+l6E05xjv99TVTXD0C8j0GONnnyv533bNAlTNnsV3Nzc0Lbt4sPQNM2yFM3L3Qy+mOd5GTHl7+VOJHcXMUbZsaXkkf/7sVR2TkAS/JExZko0t3r+0pkfj6xCCDx9+nTpRDKwJAfapqkXsEgeb+WCLsNyp/Tcnp6eLvLSbrvlxRdf5P33H9I09SIel0dTIYSEJCqWZ10pJQUakjiK1IVkQYSqkmMpCrPsd/JILCfKpSubJ042G/aHHh/GMlNsAACiU0lEQVQikxNBvXmecNPA6s45VSMExqgcg3NU5Qq0YTcOjGPHoYjcPV9zcmpQ2nHvYsPJekXXBR4/3TJs95SV+FmXTcXsRUOs72XU3raW7iCqC5moW1XVwoTOI8dIXDrKnHj/uK+P3ClkRMg8O6yymGgZuxHz/2PtT39sS5YsP+zn7ns8Y0TcKfONVa+GbjW7moRECWJDJECJgAhJn8S/ll9EoKkGJIBqgT1Q7Kpi9RvyZeYdYjjTPntyd30wNz87spqqLOidRODmvRFxzh58m5stW2uZsUQfiLPHBE9dFkx9zzyM+GHGDzNh9KybNaUtwVuscUzDxDjMXM5XVs2a928/cL9/w7s3H7jb3VO6Amcd4zBiIvjJC/87ROqyloEak2ceZ+ZxJvoIAbqzDAOPIRLmQJxlDoQJljBF5ilQmJKpn3GmTA6hkgE5aynMnMZ5Qgie/nJms1kRiQzXM5UDS6AuHVVh2K1aCmvw00jlxFpbMFRDmIOUsasVhRH766JwzD4yjDO/+f2vefPhQRZ+45jChGwLslJNtDI7mWSNHKG7jFhb0dSNvM91Yugnpln8h6wp8D5w7Xqm0eNcyTwMFKXM07ZRZg6ZELiOHXVZ0FY1wzRynWbKZoWzBbNPzlUWiqoUVsM0CZOsLLClw5UFTbumbVY468RHZhBVZVFXjPPM4fmZ2orf0MfHJwyRh7t73jy8YaV++UUBMXLtB4ZJeP3RRi5+YIgyFxuf2EgELqczT59OfPO7L0yjY7gMrNuGunZU7YZuKvnrb57x1YYpBaDrMLHbrWiryLp2HI8vmLKkaBvmMFNVjmm6MvmRYR5xThKN9XZDVdes2lYETgHWdctu3eKHAT9MdMcThEmccq0hTDNTn6oGYyhdQZw9TVnJSMUYqUqpSNertXgABXDWSX/IFTjj6K99ag5azqeOvh8xWK5dj58Dfo4ETw7CCr9KMB2z5YMGRQhM05AZfsFH2mbF6XhhHGZI7qFLbyOFfxQ6kmAzEgm4wkhvrCpYb1aUpaNta+q6pG0FJmmaikt3Rq33vRehm3yJyliN9ryf2WzWhOBF75SErqtVy+HwzLt3bykKYRI9Pj6l866YJp9IJ+TMGUzaJIq08UoC6KoyOSZY2tVaiCLRsm43Mn43mlfMSA2uS7O9iMxmCHhWbc26LYnzlXnqqOuCcR4YJhE6ltawXa0EXjUF3luwLb2v+fXvD3zz6cClmwijpxgGVn7k53drfvp2Q1V5hnDCVbI+oo9sVhsKWxDmQF1V1FVBXZU4Y5KdUKRuGvb3DzSbLa5usAnCv1xOf9hNISZ2StW2TAFO1wFb16z3e1zdyLjMokjybfGrufZXQgx8+vyZX//6N3z+9IVhHHk5Hng5nSjKkqZtwVmqtsGVBfuHO8nC08jN/f0dtixyRtmNPcM8MkwjtrTY0uWvoi4pm5LJj/joobBC/TRij1E3YmXsI6IRSH9KT6TAlBZbeKKZiHbG2Mj+fsvx/EJVWcbxyjBeOV/PRBcxDqZ5YAojZVMyziP9ODCEGVMWHC4nGQ/dD1z7nv39PbaqiFjxjyew360pjBV8viRNlPNYApWrqItaVN1VSVUUNNUKPwfGfqCwJWVRU5UNZVETkc1omgLOVTR1SwgSdCNQViVZLOyDZKuTMGFcUTDOIlWuy5rSVbhgWTUtlSu4u7vjfLmwatcpc1J809B1Pd35jDMyS/Zy6ei6jr4f8H7iX//L/55rP/DLP/4TClemJmGCJGPEGbkeZVMnn6nAMI/MJuAqx+VyFevwGDE2UljH999+4V//q7+kbbfc7e/puzPzNLBeranqDd1o+fhywbuSsmllw4+eX/zkHcb3bLctHoFBm6YW4V8jD1DTNOx2e/phYFIv/76XhnDaLLvLiboq8JMkLWGemIZBxJVlxTQMXC8d0QecMbdM3MigmMvlwvF4ZJ5mmlpw8vVqLay9okyWDgKHRp3QVcjEr6qqMcYm6+SZoijRAe5wM1LTBrL8mzZ7xXLZGENV1fT9KHYSVY3B0NSNGPalrHy1WuWZCUoIWa1XFIXcj6oq8X7mfD4xzVOaFCd/WgtFaVmtWryfs3WGViEKP4XgX9HUx3HM8FRVVYTo2e62fPnyyDAMkpgWJW0rPYmHhze5ya09i2EYKIoyMamkmjlfzkQic4LaZLSpYZ5mLqeO4CNDP+ZNcxgGUXoneEtZTnVdy7NkDOPYs1k17LYrxuHKOPZEIqOf6borw3UgzDNj3xG9GO65NPktmpKPjxd+/e0jHz8fGAZPnEdsGHmzrfnlT+7541+8xdFRlzNNGYjhyrop2W1X3O/3OFNQOoHj67aUka8OiqZkd7+jrAqiDTRtQfnjtGt/j8lrRKKFMYzMfmC1abAOLqcjl+7Cub/y6ctnxnGg7zs+ff4EJuCcZbfb8OHDe+7v91gHm82Kn/3sayJe6H2lwzl4eHvP5XLKC22aBmYvniw+zJxOB2Lw+CAeLtIkutIPV0ISoij/OQRp+IzjgHOCHXo/YZ2RBmJVZqrs+fTCOAxE47iOk1gZxIgxlq4bWDcrbBDxW9u0cnzzLAE2lW7n80WaVQbu7u4w1qRG2gVnDfvdjq670DQVf/lX/5Z/+5f/I3/8y19yeHyRmRHWUlYuz2TAygNcOodPTC750yVmyjVnc0sDsKVjZIzyAABCG44msT4jzlnuHx7EfCwGClvgjBgYlEVB9J7NqqUwhqG7EifPh7fvJOPBJNaDwCHjOPLd9x8pypJPnz9lAQ3Afr/npz/9aR4qsl6vM7Nj6ZOvwiClT4qH/Mx6taIsbzqWGCPb/Y79/R2/+tWfSAboCq7nCybA+Xjm8fGFrhv57uMz0a3pZzDRE+eB+23L3aqiZOb88oXCqu+M0P+sdVwuHd99+y3Agt4ptsqKd2+3W6w1eSjOON54+3UtlgWbzWahzjUZ5txsNvR9/8rkbmlXAYK/Hw6HDA1pg1lVxsr5V3bd+XzmfD5n2EWbvhJEp2yeN89TPuaiLDLVVAO+BkGljI/jmKmPKuha+v7rZyh2r0FZX3Ny5tTm+dJwzznxRWqaJvcpFO7Z7/eiu0jXUte3wmxKYV0SVPR3dXKbwmTPz89yrCGk/ze557Lf7fLENoD1ep0hKGVC6v3Vz5nngHMldS3zq19eDqlXcs9ut+dy6aiqBuMKghVCTllXuMICnr4/E+eBn/3sK376s18S3Yrvn078+uMnLj4wEyhcxAwn3teOX7xt2a9H4vyF0/PvmMcXCmb6y5l1uwIviYSrDKtdTdkYopkoSqhriysCxk5EbnqV/1+vHy9e02xnGLDW06fJXoWxVEXBqimp6nsOzy8yS3Z9l5TF4jkkXf+SGGXHNbNn26yElogs8vF8oYhQWMcwTexSV32cJ67dlUikrmph2ljHum7o5it+mvFzwJaRAjGNq5uK67WnqRMLqixpXIGfPT6JeeI4EgEX5DzMGHCzhVhgwgzBsWm21K6k9wNNJQuwMIXMVTBG+N5VQ4iR8+MzzjqG0wlrDG72+Gmm6w/c3d3RHQ58+e474jjyn/7v/ilD12GrhugjZTA4D7MPBBwhGLyfiSFgnZiFbXY7LolfXlUVL88vECO73S5T/jQj1QfHWUvTttRNS2kGmGaMcSLnb2uCLRj9BAH8NPPmw3u6y4WmqnDG0V3O3G93HA4HXFWKUtc5TocjdSOCo2+++UYezmmkTOMQQwjZn//+/p7z+ZyDijYhdebuUpSobLXZz9zt98IdT30SZo8xlkvXEV3Jw5s3fP78mf1my7u377h6z8vxyKbd0taOU+85Dob7egvjM6VzROt5d9dyOB3x40Dfnbl/e8fke0KYMSZyd3fHPPkMQ3TdlbKsqKqaqqqZxiGJvuRnBTffZKhmu91ld1AVbS0FYuM4cn9/zzzPHA6HHLDX6zUvLy/0fc9qtcomdordK0FDmTbqjGrdjc2ngk9l6uhmY61JyYPNAdzPnmkSFk7TNLy8PNO0NU1TZ5cApYwL/HDJwVMDPZB7h8BihGiZWUaymYrPUd9fs7GezIce2G63edOc5zkzoADOpxNNKxYhcm5jop1WyFyCmDL2MXPz9bh006oq0UOUaYO5XDrapmGeJtartUDg6XoN04AO7zqfz2y32ywSVCt0iHnqG0jFdTicssiubVcijrct05xsYQBjoXKO9/fv2a1b2nXDMM60bcPzY8nUX3jqLsR54mGzZV3XzKcjlJEPDw1v71o+fqo4n6/0lwk/OYYQWa/vGPxMVYs1ByYyhyk50gfaVYMjsFrfGu5/mE0hWmy0PBhHUdb0fibgKawT8yofcQG2rsLNkaHvMNbiZ8nUV7Zg7CWrvKtr5nEmhBGXMi9nAlMnPztNEw91zXTucVamCBGhqmvGa/KdB7gObL2cRowRN0ehdEZYlZYBR7j27HzAIf4pZVUxzR6fTPfcNDGHGddH6mB5bysZXGMsNgaK84m7dQHOYccZHyCEFHBdwZ2xdJeO9WZDX4qorzKOfhioypLBWEzpKMeJd6sN5wB//qd/SnE6i0DYWYG9Zs92ttSmwsQSE2WoUT8MTJXFVZHj6YgthAZZlhXT6ClLl6ujpd98VoF7T1OVnC8XLsMAZQVRMmINGMGCNY5V29J3V8ZBzLRsVbNqWowP7FYbge566SMVruDpyyMxRnbbLbuUJb979y4Hp67rEnW4yypZZbPoIJlhGOi6Lkv0NYBaI2W9tXI/Y4jp4RIHyLIouZyuuEJ43eM40a5arsMMxnA891S24vefzzz88T1mPDKPA9f+yv3mjsbN/OzDA7Zd8fT4iC3EPt05w+V8Ybe743Q6s9/fUVUCqzmnQ1OqzMZRK5YQpCIQa4cj0zTnikjPXxuWakWhvPq+79lut/k9NTut6yoHSc3AdZCNagp0Hda1WE3r2E0djalzfjUzdq6gSDRposkbUdu2ybbiQlXd2Dz6pceqm4F+vyzLV/OVtWGsG4kI2y44V6SAHYnRp03SvXIqWOorlpTaYRhEzdyPuUoT4VmVKy6lqq7Xa06nU3aP1Y36w4cPdMk5tqkbhnQvLpcLVVFKXwsIITLOY56Ip69lj8YYYVudz5fk6VQnN9hG+pBpNgSJTBFjTJB6IMTA5XJmVVmsL1g5KDw0795w7iq+PH7hdDozTp5dveJ+tyfOE76fcEXFL77+imk2fPp04HjpOV2eOb6cqDcb/FzSrjeUDw3ddWCeA+PYYSOcuyur5g+8KUQLRZz4ana89WMacCMzSo2RBmu0fRZHEcXFM11pOBzQ6T/xIvQv1T6oA+ur13ArdVKPGzP+ewZPR/L7vnqNk35THEbnWT4laRbk7NNCsA4C2Bix3jPFGRtnVmHg68rxcy+Tw0iz2IwRK2+TNg+z2UoGkeAjsTZayU5dVWJ5HYX5EB8eZFaAAeGeSgtOmmYlfegxTGAKXFVhypJpGihmwZzHeca5gtPhQFFUOdgC2YtGS2xjZLLEOHsqV1KVMfn7SOYyTp7VZsN1GBiHkSLRgUMIrFrxjapcweV8FrVthP7aiYAofd7Du3ei5E7Vl4qGlI6oAURpxFqGL+EHDQDLuQLTNFI4y+PjFz5sdzLUKcq0DB8Dh+OB0U9ch5Hd1vLdd9/z1c9/irOG3XZHf7lw6q4M33V8/WbFh6KkrJD1Wjv+9I++5t/89fdczh2TLRi7kYf7t1jr8LPneBQxkXLADY7j6Uhd2Fe6gJsmgJzdCo3SZw+ifrHmNPgqZXKZzS/N8H5YDWQH0ASz6O875xj6HutuLB5Z2kWGgMRbaWC1arlczrx9+17gnOsADLx9+zZVLE1+jnQN6TkonLL8U3UPVVVloahS1sdx4O5uzzCMifUXGGd5LxWcGSPQoVYXSoFdOh1UlcAucj08+/0WtanQY1ToUXUGy+ls6tEkTqRNhs7aWn5nChHXGGJae2VZSIIAmf6uFYjqN2TzMqmp7ZjnkD+3KCrAMgwjrnS065Zxnrlbb3h5fgRg9iO/++b3XHZr3u/XlE5mid/t9zSrFcP7macvz5y6nvOXI5um4KHcMPWB4drhipK3bza8e7fju48fGcaJwZ+wYcfx+ZlmtWEeJqYpEJN7dVtbcS74Ea8fvSkM1gmeHKEMEWNkQA3GYkPABC+YtREfemPIVstwK/NkrqQ46MeYWDYmDW7RvQSk1lq+ov5W+jNtJsbKrN0fqvXUliMP4SH+rfcjPQAuiojNBohhZjZevEfihAkzVZyJydcn6HmkI4nh9d8lgph8vlHnQcTb8B9DzOMnSdx1E9J4TSPDfIJ1PHcd5u0960Y0Ct3lgi1KqqoUrra/BWPFP4EsJNKqwQYZJaoD5eUzZXPoe9GWGCuTC+ZpZLVZMw0TpXVUZclqteJykiZdYSwPd/dsdzswMKdgJcJAuUb6+foQKSdeA5Rmn9pM/GEw2Gw2GAND3+OMlRm0LmHtMXIdepq2pTQNYDBVycO7t/zVX/1b9vsHyfhNweFwpG0Mn59P/PyPHyi44ocTZWl5+2bL5ptHVuWOKw7KkvPxkjQzcmzbbcvnz58Jntt4TOtyJirnQd4UVOylgVSFXtpbUPPGZbDVjXM5c1mDLMjmqL0HDUzah9BqbJpnmqLOWfE5beI6C1gSBWGw7ff7XMFZa3M/QQM6xrya9axBUZ9hvWcqKtP1pri7VjHqRiCTwlyGkHRIzDzPWWehVaKeW9u2fPnyRWC8ecaHKdPPVWUtUOSN4grx1XVX+w1VgItqXkSy/VXU0uMgZoVhmrPP0Wa3waTepD7DS5qv2naMacxs8CmuRMPh5cinz5+4v7sTqnwadOaKksenZ8Z+xI89u3XLOMO5Gzk/fWa/bVltd7TbklW7w9qJn/9iz8vzC1++fOHYj1y/e2a9WrHbbamrkvPliCsMv/z5B4bhyu+/e+R0vVDZijiIJf3kZwrjCD5SlwJT/5jXj94Uvq9rKutxk6dIg9Cjs8xempTRJ9cyXMoA59zQlIB/CwrLsX36sBiEJqalY4w3dWYEXMoqrRN9Aenno4/Uq0qsG6ZJMPgEm1hj8EEYElVV0Q9CB/QhUGWn1pJxGAmzxwGNKxIV1AA1fSw42VoM7ixp3F3A2SJtEMlGLT04Icr5qJd7mRwsXVlyeHlhvd7gKiemWt4QZyn/Q5jZFnINa2cZiZzHic8vBzZvHzidzqyahqKuOLy8UJUl3kNZ3uxB1G0TeGVpIA/jQDAmq2QNyZiwMDK/3kkGZIJ4VsUQmaeZp49PbDcb1lsZbnL/5oE+aTps4ZjGUaabBRlUbtLGqWW9Yuir1SqrY7WRqU1C/btmxeixGRkqs6rb24ZqBbLwMl2J+7fvwEBjVvzD/+Af8t3vv+N8euGrD1/zm18/E2n51//jX/Gzu39MaXtgYnj5jseXgcvhyvbDjto6ugT3CFddYBZrHedTl4P2drsDzV6dzO0Yhj5tfiYH7aaRxqUGPNXY6Fxhzao1cDvn2Gw2nNLkMYV/VBR2vV55eXnh3bt3edNRk7dpEh99haU0IKsI9OnpiSYPnCEHcVFF73CuQMeFdl2X+lcmw113d3c8PT2x3+8zxKP9Ed0EQ86yy0XGL/CNmOXdE8JVKLcpIdjv93z58oX9fp81SaqMVvWzmFZO1E2V+2NaTfX9lbZd54C9dIlVFpZWWuqK4NK62m63nE8ndvsd0Qf8KGtuGAbc1VFU0p/RzUv7YPoZqglYQmV6j1ftSvpPdUVVOWJEpqSVJW61YbIFRd0IjFYYQjXz5TxQDmcefM2eGhM843xlv6tp12/xQ+Tw5cB1mpienykLy36/piwcp+cj1kR+8ZOvOF0mvvv4SAgVpak4DReadkPhRH/j7A/QlP9/N4W/cYX4cZsRE8RfvawKDkcZGO1nyZrLogYDzlU8Pz/DJBDJKg37GKeJshDqZIxAuonGWsmSqXnz5g3Pz8fktW4YEythImkA0u5tYqBZtfzRH/0R3377LdcoIhLBrCXrqMqKcZy5X28odvsMn/hkzzFOE4/nM7YoKDHcW/iJsdTWcsHxu37kuXT040hIoh1jjJS0sxdjs2kU3Hi9wgcvEJIRkZ8LgWEcGc9n/sW/+Bf8x//xf0zhLcY6Ti8XKldSlhUxjDxcO/7MWDbRyCZUlbz98BVXP1EWJWGeOfY9682KcRhT5tTlYKr+OEon1KzGJ4viIgRIVsHGiFHWPEv2VDYVfp4Z+mtqwk4QIufuwm63BeDubs88e+HdR0QVXNcEc4OujscjbdPkLE0DiW5W2lBW0eLd3V2GU/QchPoXsdbgCsc8jcRCG5xy3FVdc+jOXIaRsk60yOMTm+2Kttrw9PiFf/Dnf461M2W8ch48l+MX9mvLdt3wk6/u8Bz55vNHNm+/JnpP8CbRaAN1VfP89IwxkqWXhSQWBBG3SUAeM3PmeDzQNE22sf706RNNug7LwLUUES1tsjW7X1JJRZg2ZhfSx8fHLOgchiGzgMQSPuTNSjej3W6XA1nf98lSucMYmxvN12ufg/Fms8E6cu9D4ZO7uzsul0tO7PQ+arNVocHbMB9L09QMQ597ESEEofHWzStH4+PxmKuEpdeZJg2HwwEQBb9A1UGeF3gFv90a3JUM80rrXxvPXdexu9vT9z2Xy/kHhAyxEanqmmma6fpr7m9oNaTQp6qgnXO8vLxQVVXehHWjDEnLs6oLhnFk1a44ni4Y62QudD/ibM1sDJOLBNsAjseXjtOx4/5uzX5f42NPu3HETcVqs+bx02denp4pZ0msbBRV091+j/OGh13J24dfcOo9X146jscr63abvLQKhn78w24KQ10xFhGcFYVmjJQBQtsKpjZ76rrEJ2HLfn3HS1XQNLXY31Y14zSy3e84DFfadkV/7XGFzDqwRi50u1pRtSteLleGJAQLViYykbKEsiqxbcPsPbuf/5Tf9B2/u8jOfzweubu74xBmpnGEaaCuW777/Jn1ek2MgaEfefP2Lf11oCwKnquSoiyYr1eCn/lgpFAIpePoHN/HgK8chamZRuFgD/1VlKXDSIgRu1rRO0csxJ6YGNlsdoyD2Et4Y3n/j/4x302e/WrF6XIiliUP+3su04DF0RCZEuwWDXgrZmDBGcq6xAJtJY1iEXwF6qbk2ot3uitKZj8SSRldnPFTwJWlQFo+zXqGXIlhDMM4cOqOtE2NnycK53h8/My7N2/5xR/9HD975jBzvBwoa+GN17VM0lvVNcfzWTL4qmJbCMw1jTL0SEzJpJEaQiT4QFmXHI+HVywT5aQDObu+Xiem4crb9UbGTxhk2NAcmOKENY7Zzxik0fyTn/6Sw/MLddOy3kb6ccAYz1c/+wlzNHz9kz+iNldKOxOj4cPDlrLd8LvPz0zRMhuLMRV1tWEeAquqpawcAU9ZgDXSU5rnOQcEaw3z7KmqGrDESK58lG2ljVTNklVVHGPMvj8KeyjtMoSQA+Z2uyNG8vWSIF6mjDgSgk/upT5twi5Xs9M4E63c79PxTFlWYOFy7nj79l2+/nVdc75cRO/jHMbJREKMTRTLmjlpWjDxFbSiG5xuFkofjdFwuVwz7CYT0UbevHng5eXAPHtxAUD9z5qEvuqIUJl1fR2ulOVN0T0HEaoZZzDOMI2TQLvGEK2FEKjrhnGa2O72XBJVdxwm2mbFnER4p9NZ2Hm1bCzjPEOCWFxR8PHTJzabjVRaRYHzgXGUezSYMfs/KaVYHVk1yekHqRCmcRJ3ASMiTWvECuPadQQqJj9J4uZET/Tb7z7TPMH9fUszllSNwdqSd+/uef/uDR+//8TzlycRsflAUa9xs6f0gcmfGabAu/s3bFd/wqcvzzjnidHTtD+up/CjdQp17SidZUomc0Q4n8503YUYb1ObZi8P9OPjlzQKUEQm1jppXlrLu/fvGMaBqqmokw33MI6ECPf3D4K/pYak4JyC2FvnMNZSNw2zn7l/88C56/jdt7/HlgVd31NUFc+HgxjcESnKknkeOV+OnM5HAD589RXDMPD58xe+//gxTccCjHiG2NTP8CE5UU4eP3vGaaRZNUzzxDRPdNeOYRrAxFtTaewxBDZpcMY8TRAj33/3PX/1l39N26wIHt6//Yr9dsOURvJFIs4qrhtFZDOLEVJZOq79BWMTdGEdhbU4Z7AW0V9YYUXpz8codEOMxxnD9XqRhmS6n372dN0Fg6E7nyit4XQ68pvf/pppGmlXLbaw9GNPNJHJTzTrhsmPlE1BxDPPI9fukgeBhCC6iOCF3dRfB2KAupKhJNM4Sya+8GCRgell5sMrXVKGsliaeiWlNmTyQmULalPS2IpduyLMM5v1Dj9LhdqPEwGo25a62XC9znz88sLx2AMVcQYTZtat4c1dzVdvN3z57rdczk84E7DRUJiSwpZMwwBhxvtB+k3zlDNZHfQUQiQE4eQPw0RV3WaEqJZAXT41gK7Xa56fn2maJjsGayauEI3qE4D0p2G1WmdVL4gJWlHIbPTn5xfGYWKevLTukM3DWlnD2+1eJpnNgaZpOR7leZCGrbC3fIxgrYj2vOfa96zWG7bbPTrOUzPwqqrYbre8efMmN6i1UrKmIAbDPHnqqhFIJQnWnp6egJiVyXOiGmuvo21bWT99j3GiPB7nmZfjkWa1IhqDKwuiIXtFRWNkyuM0Y6xjSpThaZo5nc6JSjxxOV84ny+07YqiKKmblmGakjdbQVEKFH25dKxWa3EHuPbCoCoKVus1u/0+s530vLN1TqqwJLpahnESncc8U5cOwkxdlRwPB9HjFAW1LRmniclYyu2OwVWcveP3ny989+XKZZLNdZqFvv/w5g0//eUvaHdrXFPx3J15PnecrwFrG+72DzjruN9t+Ad/8kve7hoaMzKev/yoWP+jK4WicKxWWxlTlxplKhC5deUFx97tdjw8PGT/caVyXa9XrCOLSdbrNZ8/fcY5wXDFZlh8zZ1z3N/f55JUZyoou+H9+w9cBxHSFEVBVZb84pe/5Np1vLy8ZBpbTDzh9+/fsdkIbnk4HFJTTOCPSyeGYG1RiJ3B4tWkAeITE37yeQHs93teXl54eHjgkthUnz9/piwM7969ZxgGLpeOshD2yePjE//0n/7TROuLHA4vTOPIerXCOYHOhkGGuStPWnnpUwxUVS0U2iIJ0FL5q6WzwkXAK+ojIRLnwKZdMU8nvBFFbmVg5T1FmKmKgqEfKDG8+9kvMDGyKitWxhLnQO0MwRVMhyP7tsZfRJtxmWcu3ZVmvcZfe6I1rOuGtTVY7b0klfPT8xP7dsUYPevCsa1qhvM5bwqmLNP8hsjU95RFCfOEw7DyUHqx8QjG4NOoWTN6qQaqQqCwaWazluarKreLoqAfJqZ54PsvA3fbDxhjqZwhzhN1UfH1uzvWreXSd5htoCoKptEkDF57MDYbFRZFIXBEwsH1uitjRjF3rQrUE2gJpWlz99OnT9l7SKEY5f1fE4USyJumqpj1vYwxvBwOuf/inMtst3GamOcJf/WZay+xymYbB/0s7z2kfo0SIwCaps3nVhRiXa40V61stNLRcxPYUuan73a7/O8aRBWLn2efIS5tbFv7ek7Ateswyb9JNADtKxGfHkfdJgbTMFKu1zlOqFGmuqLqv3kvn60wlja51WtK74XCYfp8FUUhvbPUr7tcLuz3+yziVMj2dDphnawdQShiriD7vs+Q09RfqasK5zaEGOm6HmPEJ2u9WmFcwW/+p9/x5u0Du01FWVUUpeOubdm9uWfqOz5//Eh/6blePP21w5iO+/s90zDgrOGrrx64f7Ph88fvf1ys/3FbArlppMZY2vxaMiwgZp92tc9VC1tp0IycLyeGq1D0hrLi7Zs3gOHrr3/Gu7fv+Pbbb7NpneKhQP6cr3/yE9pEQXPWsttuMyf5dDjQdR3PT0+Zz7xZr2nqht/+9nfZDdLgUhZuBbqyFlckg7AUcOUlxn3mfkdZlGmYjpSeal+s5yiipS3WILt6kuIXZcmnT99xvXZcr9dkXCblafA+82198MnN0STRiclZhysLxrFjnEW0Z2Ig+IArpVHedRd2u11elMtJdmBwdYWfBfcNiEPkxlj+YrdhCJ5yJeI7TKKJznPSMyAWIZo9GodJ83Gjn+ljxNcVpbXY7ZZplj6NbdQBN9H3/MycxFixEgjQ1DU+PWA+yHWoqko0Eo1AKvNmg42wmQO7TJwwjCEy2oApHI1tuYaJaRTV9zjMFNZhykrYUIAtSiI1p+HM07njw75m9D3GwGZV0Y2ef/gnP+df/Ju/oT8daYs9xALvhTlSljZVjMLO0QZ6jNJoLovUF4qSmcooyBstmLSmNDDmQLZotismrknWkm2km06eLTzfxs0WRcFqveJ4OrK/l76DNu3VFA4rzX+dzLYUuumGFWPEOpcdXLWpO88z9WrFPE9cLhfW6zUGGU+p6m5tqnddx8PDA9M00V267DukwjznXBb1STYdWa3WeWOx1mYdhr6nSVi9XbifLifKKaFB4bg6zUnoF2M59fpdLhfevn2bNxRVgCvpRY81xvAqkV020cdxpG3EW02Fl+M4ZlhPTf3KUiazjeOY3yOLBpPh3uV8ZtVUjEOPnyOuLLmcLxSFZbd/gw8jo4+Uqz2Yhl//9nse7vc83O/wYWYaL/hp5P37e54eDzw+ygZjneG7775ntWpom5Lz+YW2bfjqq3d/2E3hcDjkjP3du3d5oa9WqwwdDUP/iq6nLIoPHz7w+PgICN1tvVrzqz/5VWpEhsxc+e1vf8t3332f6Yq6s+pNe/fuHdvtluPxKOM2k2BI3pvcpNSM4Hq9Mk8zz0//LlUxiRZ47fNwisvlQt0UzNNMmzYXm9hQ1rrkyV4xm5nRjz/YBMmfpZnH5XQmBsN+d8fnz5+pKwnSDw8PueoZhmtisaTmWFtjjeF6fMb7G/9dr+FlFCrhPHp82jBL5/DBczgceHi4z4tS2S26KYQQGceZlSuIYQLniHbGhYkHxHuo9B5wRBOJcc5Mn8z9nZI+REazYUNSsNoCbBKWhVl+IYhpnQRDo1IRTFEQfUgmewnEyhYdTqqjacYkPQKZvozYA8RAsAZvbDIXM0QnhmzBIa6txlJaxzxHiEZshAmUzoErKdyKzy8XHvYr8JHNqmaaOipX8NP3O777fkfXz+DFeC7ESJmSBWdF3RxDZBqlhxADYkoXJlauxJqCuiowNuQgp1W03kutbJWmqxuCis6W9tYajJTKqli1JkyHw0H6MInvf0iQhDaIdR6xiMyKTP08n8/sdjvO53POYLUZXTUyP+OcnuMwC+vHWXcbvtNfXx3HkoGkDCtrijyER4frLPUoykTUykifK604NMBWZUE0huv5nAkK+rnZhTneqrQiW3V4mqQjCOlnFblY9m10g9FjUP2Esp90M9MNHaTJT4RjGqKkv6eb663fMmUChqq7z+czIQS+/fZb1qsVQxwhBuZ0v+7v3tD1V8qq5DqIb1R36Rhmy2r7wPnaczj+jvWqZLep2W5WVHXB1z/7ioe38PT0xPVyweO5dB3T6HjzcM9mveeaNuQ/2KagcISO5vvmm2+y2lINqK7X7lVA0lkHv//976mqisenR2GuhMB33/yec7rRTbPiULzw+29+nzIjyW5U3KQ7sXOOjx8/5hujC0cpbfM0s1mtKVyBn2YuaUSdHEvLMExs1hucHRLlr5Dsy0b6vmNVFEzJ+iKmc9ZZDt3UMfaSwW23Ww6HQ2ZRqOLzy5dHnHGJmeJp2zXeR7799nt+9atfYa3hfD4Rghh8qafROA44K9mMlvjalOz7nqJKg0LKWqijRDFos+TNaWk/oNmkUH0jYRiZVg2ndc03w8Avi5p69rhoRH9oxH+UGLEk+CpBJ1E9040CKRIsBWLwt8/EpAZ5Cvg5sCc5SIw3eWJcqEaUJ5mqo/yKMb/XjMVEGIzjf+ovPMaZkIYlrZqWbhrEEFAVsx7hZEdo2hYfAjjLECzlHDh0M++2WwIj1kQMM5vG8quffcU3356YhgsmKcV9lEZuGQLRh2z2qEFNxWTZst3POCuMtCqZpmlm773n4eEhzz7W+71s1upQGA2Mag+yVBi/mjtS17jCEa0ExhhkZkhdOIx14goaIqfTiXdv3+WEo+u6TBXW4NqsBZoaU5C7Xq+JaRY5nU/iXhCE3KCbzlKDotmwDKWRBE2GwLS54tdgLqrkm2ZDA77GGq2o+r5nmmeKBVzz8PCQGVZ5cltRSI+srpiHic2mJfiZoR9YtSvadpOZVroBacWkwVyOe8jHqbYr+iwpgyuGNFzMmB9oQYrccJ/nWZwYzI1erfdON/pV2+IYIXjKombse86njna95cuXj2z2G87dQFnKhl9XhcDRw5XTuWcYeiJwX24BT9kYHt5t4c2W58cnvnx6JnjLp48vPD/1tE3Lw/9cgF+8fvSmoJSzDx8+8PnzZ+Z55uuvv+Z4PGZesZZY9/f3eUP47W9/y/39PV++fME5R9M2DEkVK4MrPB8+vOd4PKcYIRoHP9/83VUYo5mTLipd2DoZiQXNTPFGvcHjKFlSP/RY69JDLBhqURpOnUADaiCn2bIODq+qipcn6SF0XcebN284n8/ZokAzwv4qLo6S7V2yhUDbttzf3/E3f/M3C0Vrj0Ga1IUtUpm70GeEm2DGGMcwjlSVYJG73ZZ+6GVmsLtZBQAZmvDeM80Txjr6MDEbCMZw9YHSSG+ibmqmMGOtoevEn3/2nnGakqWEDIGJqXkWI4zDSF1Won8YBqqyYBhG2rZOXi+OcdTsacIkSmFEmBekKsKnikNcQVWtLi6pmuEZ5+jHK0V0MBu+8wN96TDTiMNQVBW1ARNjbkiXZUXpSqElmgJTGE7XHmcCrSt4Pl65364geKoCnAvYGPjZh3sOLx3ffHxmfVcDBd5HjDXMfhKrlxScQ4AYdcBMSd+PbDYV1ogiWtetiqI0yOgGAbfez9u3b3l8fOTh4SHNW6hzsNVguk44+eVy4euvv87irkt3YZwi3dDn5rdPQf18EoizLEtC0/Dy8kJRFLnPoYEZUmbcddiyyE3k3W4HISafIDGAXEJZqknQ6YZLEaI1NlcTqr9YjsJUREGgtts8Zt0gcwPXOmJpMtW6bdtMuVbTv6Zp8NEnD6kZV4jzcX+5pjXrGcdIXUsFptWUfrb2buSaC3tQqcR6z5xzknjOt5kX+tlfvnzJfxeChM6qvt1v7WWuVqscKyFy7S5EP1NXLZt2Q1W1nE4XdustxooZZj975hDFOv8kowaqEubxyqnzHM6f+fD+gaoKuMJBdLz78IG7+3c8fXri+fHANA/E6H5UrP/Rm0KZlK2///3vOZ1OvH//Pg/4uL+/B6DrLrkU/fz5M9573rx5k3fYYeiFnpUW+fFw4P3795yOJx6/PFE4xzyKiZ5a7S4bULoIFQstnGMaRiDi55ndbsfLywvWWZm9AFyuUjLJQJIV0zRnz5e6vi3mNw8PDOdLVjJKNiiZ2Gq14vn0zNdff/2KT/38/JybVXKjK/rLiC0Knh6f+Zf/6l/y7t0b/vRP/1QagkmAVBQifPt0+sjPfvpTzt35VSaoL5fsiWXI+jotUmHgnM9nmlVLIPwtQaA+mJIJtly6HmJgs95z+PLMkMRuJsJqteblcMDFwPbtW749HPAxUrYtLg2Xl4ORUap10xKtE9uPyWNdSVs3jNYyB8/kRbsRShklOs/CxvGowK2USsNZhmmSe502dC1HYohUriA6ob2ep5mqMDQk2miUwDvGiXW9oiyMKFODQAmEWRwsjTTwQwhiIz4bApHjeeBw6nl7VxOiWB0boGDmJ+/ueHzumKYLPpZJtGiznTXRZabJPAvDRQLHYjqai9RVSd9f8+assIcy6tSA7eXlJVNPP3/+zNu3bzOcEUJgt9vljFmtmzUo932PwTBFEUmqb74rCtrVKv+9BLAmJ3YalFVAlz2pZsell77hteto6prufGHVtuzv9pyPJxGIYnPjVpljChVrIHWpQTuOQt3UGc/aT9FsGchUU52trKiAGiNGyG6vilhoc1wb0+1aGtBlOr8QbtXI4XBIMG/ImgutFlR0dkuk5hzXFKFQQo0ekyaeukEpMrLsbcjGe4PMNAaqz5dWJmXdSE+NksPhxGYFdVnhnGWYx6RBEWTg3ds3PE8TAYfHQeE49yMmFvz1//Qd27Xj7Zs3tE0l0//czO5uR7tq+fLlM8P04+Yp/L3go2+//TYvKi0Zf/KTn6DS+uu143g85savMgtulsgFRMEoxxRID4cDRDFYe3x8ZL+/Y0445nKeqzbiuq7L72kirNKOTowMfU+drISVbheCZ7PdpgokJGbBOkNDSnmdFqpak7Joa+W4VJ2tTJ/T6cT1euXh4SEzCWKMPD8/8u7ha/7r//q/Zhh6/vhXf8Sf/dmfYgyJbbLOaMl6veIXv/gl59ORavG59geqw/1+z2WUbO7NwxueXl4oV6KaHPoBVzlZQAvMVrOT7XYrAp+yIljD8fGF0lWcxxlbGuq6wTtDtWqZhiun7kpR1XTnE7FwhHGmqiv6a4+PkehkbjEhEMaJ7aplniaGecKWBWH0BAPdcJX3RprBwUQC6csEorVMMeJaUc2u1uucwcUYiVZ47dZapmHEupJYlow+YJH+g44XnKYZnNheQ1LNRwjeYzFJGDXhKkNVyPS5GALPhwt32wZbFEI9NoaygDf3G+73a56HCIWsAdkyQto8bG5IKiSkVt8aEOT1evqZWkTo99UPSpkpRVFknL9t21cZ5+l0SjCr0FcfHx/z91whbCNjFxYMCVpR24nZe87HI3fbPUN/E6upIC0HOwMxiGW3S2SS9eLe6HzjorS5itdjn6aJ/X6fn3WxwjhmKwv1fHp6esrZss5Yl0E64kB6OByz4+zpdGKz3UBKCHWzfHl5yULBLBAMMw8P99kLKoYoUFCMrNfiWur9fOvDQLYLUZio64T9Z5JlyX6/z9m9wlzq6RVjlAQ09UE05qkFeSRSVhXD0GfmlFYcimAYY4i2ZBgnTGFwhTgJdP2FYezBIT5M1rLZrHh6/JJiGvS9T/e/pi4riljTd1c+z1fW68jd3ZqqrdhWFii5e/czXp5+HCX1R+sUPn3+IrNkrcVYy/ly4flw4Ne/+Q3/7te/5vHpKas4l9m9NkB1ERsnv+9DoKxEDo6BcepZrVv64YpxUFQFzlnatkmy9j5vLMMwyL9dO6ZpJMSQqG83f3ttEOuDaZJHjbUWHya8nxnHK8aIcrZwBWM/UrhCvInS8Zel3Oi2bSnqgueXJ1brFggcj88YYB5H4uz58O49f/nX/yM//dlP+D/8F/97/vFf/OPMZKjrKilKz/RDz/F04uV4oE1ZSkxZT9SAAszJtkMcLqv8cIUQqeqaqqkIwfP8/HxzRU0GYXAjB3gvQa9uG0wh07AA5jjRj1cZXFOXTH5mDLO4utYVVV1zOl/AWmxRyOfhwQRw4E0EB7Gw9NNIs1lTtg2r3YZgozTQS8uMmAyOfmL0Yu07zCPjPDKHWTxaqpI0ZEwa3ibKKFMihYGQsN5oIGAwhSVaw+glCJmk7rRAU4vAL6Yg7YzFj4L1+2johomXbuL5PIGpIBYQJIFwpeHduz2GgaE70dYl69VKRHdFSVOJjbOqkaWJ2idcWvoNOk3MOZvuXXIDSNWnQn2SdZKatDXjKAGy73vGcZIZHW1LMPD4/ERRllSJwgvSsyNCVZZs1mtpvnvPqmmYhpHoPfM4MvQ9282WIgV27Q0ux1WqX1VRljR1w8PDm8yyk8zY8/TyLBYmw4h1Bd2153od6IeRoqzorr1QlNsV0+zZ7LbYtGkNo0zVW23W+DQ/XNmKMUb6XpKalRoQGrHCKIsSYmQaBqwRuPfu7g6181D1cF1VnI8nji8H2rqhcpIgFaVU5VpJWyvq6fv7B8pCZiKo39Zut81qZa3wVJG9rAKGQey121UDJnJ3f5di2Eg/yPAsMePrAS+w6qSU15Hj8ZT6sCP9dcC5MgntNlyHK8bIfGeZymeoipLtdisz5F1J8Mk4Mhp22zvqekVRNpTVlkjDl8czf/PvvuPx84FxDIQYmeeB9fYP7JLartccLxeauqaqa8I0c31+EcOp9ZqiEG6y2v+u1+s8YUozGLGHMMToZSf3MzZaAp45esZ5pChLohExy/39npfDAZ/G8zlXUJZVUsbOrPY1fd9R103mBFd1eeNdAyBDx0VhWlIUNlPSmlYwb4LB+IqmahnHz/n3why4dhfa/XuCszwfntndbXl5eeH+zR1DdyV4T/SROXj+5f/7X/Hl+MT/6f/yf2YeJ+Z5YLvfSJN0IAWslqKuwFke1m8Z01BwG0zakPTjLUPfCz00RuZRxGjGFThXcjydcKVlHPtXYqIlBqrZl3Ul0ygT3cTwLeJCkbNVSI17DNY4QrAcjxeI4FxBU7b0/ZWqctgiJGuPCY9w2ykcTbWmSw3SCLiEUbvE8ogYojWYwhGtAR9whaUuSqL3yQPKgbX4EDGa+VtLlbLfoMlGFIqlKy3OGgojZn7GGREgOUs0IXWzA1Uh8yliDPR+YIwRvOH7546H/YaCEeKER/Dn+4cV60c4dR3j5cL2boV3FmfK1AOR8ZrROggSsKqmFlPBEJnHmdbKxmsMuZF5Y5UpZm6zqZv0JXqsLdhud5IEhcjTyzN1U1PUFafLmaIsGYYei2FVFIynE+2qJU4eP4xgwEfp49SuTBPQnHj8GJ83I2XVaMZ/Op1A713luJwvOFtkC46qqqjblVwjLJfLVRhNPnC32XI4Htms12CFbDD6GWccfpQKs2hqLv2VqiyJRJq2xZWzOAbHSNOs6C5XvA9UziGDcGYwco2bqpZN3txmTfvgGcZRIE7vaes2VbKGKK4YlFUBhZBW9vs29Q9ETOm9oalXTONE13UMQ8d6vckNcJPoxzHGTMEVunegKCzjNNCsavqhYw4Cm7frmu7a4Zzl2gvEba1Qx72f0nNmoV0xT3O2aAnzzHXssbUIDdta0Bg7yejipmloq5rZzkQkMa4Ki01swVhIbnO9jGDFruPz5yun48C7tyuaVU3drH9UrP/RlcI4jcnGtnhVCez3+8zWqRvB35Vhoc0yfRhulskui2BCCFy6TlgbhZh2qbmcTPEiwyJKMw0hMHsvHPEkljPGUNU1fg6cL5dUpptsMKZQFpDxzBD8wmXUYoxQ72KMSsahbRvquuHw8kJTtxxejtzt7whz5Hodads13/7+e/7Vv/w3ROA//c/+M7FesHLex8PxlXjp6fExuUcWPD095WvX9z2tVllC1+Hd+/fSnK9v1ZcxiBdUug+a7YEEn+v1miEkaTJWqJe9/tz1euX5+fnVcZWljEYdehH91VWdpovZXKHNiSas8JR4VgkNtv+BHXaMUj5rJqrwhn7fGIGiiqLIjdmYGEraE1mqQ1VApA3CW+8lvmZ2pH6J2HyEzBASt86CiMEUMoP60vUM40w04uUE5Ork7f0dhQn01w4/ewyW6zAQgzjFBu8zvBkTXCWaBslsRf8ixAGlTcoozHXufSh0o+wlGdpCroTv7vYUZUlZVWw229zkLqtKMu9UwQ59L9MEk6WCn30WYRqk31KkDUCvqQa+nKw5h04UFOHlBfUkUk8mvUd6TfM5qAFdarR23RWbzstYk/uMCstixHOs6y5ZUyMw8Zq6vg1ikutyE4QNwyBjVYviVa9Ge40K6R0PhyxSM0aH+UwJwhW7jr6XynMc1Za8EvFsaoCTnknt7fxwoJAOHBK21sgw9FyvHR8/fqQsk215gGGQyuB0OuZ4tJxQJ2I4qQCXtFelEouAteLaXbOhZFM33O33lEXB0PeAJCqFdek6J8QhOowp+fb7J77/+MLnL8cfFet/9KZwv99RlwXd+cTYd5jouV7ORD9xvZwoneWSDLr6vn+lql2O6zufL/lBvmXz8tLyaprGhbeMzThdk4QpIArrYADrOJzOjLMYWZmiAOvop5lxnhnGibZd0XU912vPPAeenl4Ay/F4oqpqhl7Gdor+YJ0DjjGGw/GY8VJrHHXd0l16LpeezXoPFERj+e777/kn/+F/xMPDQy41FTtVHHmaJn7+i19kfrpsZvDy/ExV13z6/DkPCSLC5Sz875fDgdnP4l4aodZGsVVv9xuNT4WFVVUl6qN89rIpqPCB9jH0S5uEikcrm0SDujCzbiIsDdDLUl4FUzkILHjbei+XNtF6DPrAZSV2ul7e+x9sAuTzVS2LD0HsGczNsA1lOaXAYKw0sV1RULiSqm4YJ8+nL8/4aLFlgwFKayHMbFcNm7aR2cujjG0d5zkZHor7rzEREOjHuuQYGyJVgiRABWc29whuRms2B7v1WjK40+mUz6ltWw4vB2ISk8V4Ez6pZkGbr+qDpBuu957z+ZwZOjpvQJulypRRzv7xeBTmkhXYZMkE0v6hHrt6Ouk90BGkagyo60U8z+acDKrltTZhlxtSWRavEpuvvvqKKkHL2nc8HA74eeaUdEjjOOJnn3uCcLMPWQrJNLDqGrxer0zz9EoIqANzVJyrokJViGv8uVHkJ6q6wZqCeQoUrsLgaOoVu+0egyMGKIqKpllhjMxc0Ou03W6z7UlIybEKYDHiZGwTa0vp/qpYV1GiuijoczIMA32iEK/blqpq8CGCKZkmyzBZvv/4B94UZPcO1HWVrQnq+pbRzfPMNjEllIqni+AmbhsSPTEiwygMzpXiN2QLpikZe/mYKWHzPFNXFUVR5rGC4pW+whqXfEnOOWMDsbyVRSDZ/zCMGZcvy5KHhweGYeDh4Q0xkhu94zjSD7dxfMYY2qbFgBwjhtKWrFdbWQC7e+Yp0nUD/+F/9L9kvd1ka2PBSfuMKyvD4ZKOVXsdx+OJd+/fQ4R1Ggaii7xpasYkw1dif11XYv1dVbgUYLRxpc1ADex6X5ZfyifX31ERjz4cGpR/+tOfZt/8eZ4JPqSgHTPXWjcErVCAnO3qYv2hhYE2W3M2n7LOH24GS+qfbiwaOJZTuTCiq7Cp15UbpunnnUsVAxEfY+K7W0IwWFtxOF25DB6fLN8Ngbp03G83vLnbURcW0ohDVzqmeWSeB4G+1MBvnpmmmRDFHM9gcU4+R1hJnpC8iLSC0Ouo10HXgxrkdV1HSM/W8XjMePZqtRIIJorPlDD05tww1dGeu90uC8pUIa3JiPaaVD2tG41ec712GoCkASuzjrX5rJWiVhTLOcuy6YScmCxhKn1/oaFqo1cyYdU9EWUNTOPE4eUlY/xN2wpJJamEfRLFGcTqu21bxmHkw4cPuZms1Y3ay1dVxapdZWbQslGsm+/xeMxsqXmes5OvkmeapmEcJp6fX1Kzume73RFCxLkyTdMbUiPdp0FDDj8HxkGU4XJMJXUliXDesFMvJIaQN2iteu4fHvKmpf+upBt9RqZR+qJlUTKMM6YoKeo1PtRg/8DwUfRedqBCnP66yyU3WbfrNc7IcHq1ANaFpQtAs1hlFchCj3lRQFIKYnKQUcZFjOROvwa98/nMte8x1ibLWynfN9sdx9OZ2YcMh8j7W/wsDql11QhrBVG9loXYKyy1DWn1pkz9BWLAxMCqbTkdT1RFwzDMfPvd93x5fOLN+3ccT0cu1y7zyceEd75//z5TX7e7HbvdfsEPL4QRUYsRl2rFIlJCVnXNLg0X96k5de06rt2VdeKga/avsJgulkzrSw/7MhuDW3msD6pWGEu7AX0wQvBJhPNa2KPvq9mkLnBNFjT4qD3BD2m32v/R9aKBRc9J32dJ+9NqcckUc0UhMyqKgjkEUA8pa4QqO89J/1CkhMQxB8scCl5OV7wp8VH6GaVzFAZ+/pMP3O9XRD8wjVfKwmJtxPuZeRoJQWCw2c+ig8Ewz4FhkOw9RoEnNNvzXthSLlmPa+B9rYKdc0arG8XS70iSGzn/Nona9J5eLpfMuVePf+XQZ3NJyPd4tVplGmjTNNJ/SGtm6dmkQ3fevXuHc47vvvsubzBLeqiK7mTtD7kaVpr0soGrlFBlVynb73q90iWI2FhLWVXc3d29EuwpTKnIwTjdrDr6oed4PObP0vijSupxHJimERAh3FITst1uUdsbZTcZY/LGpgOJlMYqzWs5d/1MiXngfWCexJm1cCXWWI7J00mvl5zjbQ6KrP9b/1WTATFBFPhW76E+Q2o7JEI+0UNFE5nmKcWQnu46EqID8+MazX+PnsLA73//TXLWjAz9laKwrNdi6LZat3z3/XccDoeM06vPiXp9iDLyjDGWtpEGzzTN1FWDcwXr9YZ5krGTCkdIiXyzqdUsYRhHXFHIgvCeQGSaPNerbCwxwiUNXI9BuO9F8qg5HI7c7e/xswSJEALvP7ynbQSfVR+0iFhX2ySoc9bIoPe7e56fX3h+fuH//s//OX/yZ38ijCQb80jDqrqNVnx6espYYlmII+Vut8tDzbfbrUAXRaFWSNlmQ6x5O3kA3U23UNUVz09PORvUIKmLThc0UTaeZaatGLZmG8pE0Q1Gfau0IS8PdZ1hDiAHsSXmv/x3zcr0M5WrraW5PkA3OMXlbFPfRwOGntMyIOQMO5XcEcGgZ3/rE4UoA5VCjEmlbZgmD6bAuJIQLcMc+fR0YgwOWzSUzZoYpXm9qUt2bYWNEyYMDN0RawMRT8Tj/ZSH0pAq4Gn2Iv4bRuZZPJ1kAKFNvYeAMTY78W6Td5fOEdCNQK9dSDYwYxpYr5CR9uNUSVsnPc1yJoGqixW6BVHAayBUOE+1NuM4UqaqTXt35/OZt2/fZl2EBi/V5miQVAPK0+kk0Axy/3XmgnMuwx+6XoYkuFuvV3lDEMv05JbaNNkqQ9dDVdc8Pz+nRGPOUKV+X/+usUKTCcXay7JiXEBpt/dwGbPX9aa+Zu/eveP5+TlfS6luyzRMqaYsq2z7LT0KMRKs6xVV1dC2a8Byt79PPnCOaZ6yLVDbtpSpUukXBoMy7/uUE4HD4cDj42OOKwptxSg2/a4omVIsdJXj/uEOkyw7YjQpGfq7Xz96U7DGsNttqOuKcex5+/aBeZo4n09AoCwcBjJXuSzLNFbRZG5xCIHddpfKa5mu5mxJCBC8eMoMw0iM5lXWLuXonJXB4zjS1LJgNluZHjWOE0VZME5z9hXSL1WdikldkRuvl0uXlc7dpeN0PqWs+ObD4H1gGAfxYB+vvDw/0l1OyKStwM9+9jMe3tzTrCqa1W3gh8IDIFjnt7//vXyO2makrEOzQGm+jQuGininyDFIViFwReT+/p4YI+vEJFpCMUo51OCuGg3glY+U/lweM5mCt+Kv6m55m2IlFuBz8kHSz/ph/0DLcK0MltjxDdJxr4L7ErNe9hl0Q9JKU7ng2lR2TuiOGINPWZdCf0PClTOV0AhxwCOmdeLB5/DBcr5OPJ86vCmZPEw+SGU0j/z0q7c0RSROV4y/Ev2I91OqEsRccJqkuSsW0E4ohuMEUeBRax3WCmHW+0gMorrOkGpa66vVKgcyhf+MTdctDbmvqir3krSS03uqCn8lVeimrgFTefnKUtONQGFA1UzohrHdbrOBnFaVuqnc3d1lmOrl5SVXMs45cQxNDeL1ep2V1KrvURKEMSoUNRlrB2mOKxow9H0eqam6BNUt6bnJLJG0mXW350uvrfc+9bEkUW2ahv1+n8Z9+gytAlwu5wx9q8hPNzbtMciGFNhstgzDmGFqax2bzTY1t+c0zyJQuBqZ2T0lAaI828MoG/zhcMAnuEj7C0t/LO/nV1oV7QepdqJpGoZxoJ9nYQKuV9y/uaeoLHViXM7jwJxo3X+wTaF0jqoo8dPEdr0hzJ62rnn35g0f3r3n+2+/yzupqhaXAUFxu6ZpE/1OMnoNEFMKNnd39yl4FPnh0H6B2gcLy6RkGEZR61pH3YoXfN02ooVwwk+evUxjG4aRoih5eTnStiu8j6zXG968eZuDy+VyYUi0PiAHqF/84hcyH2Hs2W7XMncZ+Pbb3/Pbb34rkvrhyuR71pt1sr7Y5BK7TPxy9VHS4CxZoQiMustFqLXWvvIGmlPWqwPghU3kqMoqP9xanmuJvQyeOvt6HMdsgLakCGsQ0gagfv9wOOTrL0E6ibYSo0d/Tj9/CQEtM3rdKKShWGZoSze6JVVTf6+qqlzpqLfT0i5A74u1lhgiIQZ8CBRpg3LOMUwyU8FYm11CfaJnjpNnnAIhGExRgS35/HigmwJzsBgrG2FTOtZ1yfv7HY0LDN2RGHxqjFrKqsyVT1krBVX7AwmeTIFSKyqZvSBzyjVgaXBfwjq3+yfrsHC3udeK/2vlFULMAULZTEKa2ORgrsFYn0ndNJa9ocPhkKxUbmtJkwRdGxqQVF2tLJzNZsP5fM7PugZQDcC6Qes5CDSi0GbMhAyFO7U/UiQVtPojdSmbryoRVWryoKNgm1Spq+7CJ5aYVqd1Lc+PTgUsyzRxLm3cAvvJ06cT79q2zY1pFbjqmpR7GvIzdBsaVWFtwX5/l3qaosWQRHTMFSFI0jsOErA3m02uFLXZredzSC7Q+ne1Eup7EZd2/UA3DBwvRyY/cTy/4P2IwQsd/8e5XPw9KoV0Q5fjAPUiff7yhf3+Dp+45oqXS0OkzBm/UkCXmLde/CVGrRtEpjam70m2IDd3TMF7lbD2a3fFJvxvHmf6a8f1KtjmMlPdbDa53HbOMc8Tl66ju3Zst5uF8Zt43jStjAQ8n8+ETMFz/O6b3/GXf/n/4X/zv/5fsd2umeaJMHu6S8c0jPhZ+gpt074aZTgm+2IJaCENpheGlAwTHyQ7TDh8Vafxfmnk4rW7EILHOsM4Dq8WpgaZJbvjdDoTiaw3K6q6pGlrrDNEAs4ZjBUmjDTH+tzUWqWmtza1lN4nG5dQLUOI+NmnwEVyEE24Z7zdP83mdAMyiS6qWf1S8Tun39FMSAOqVjZLyEqvKTFmH6UQpKms7zkrVdML5jxPE4WVsiFgiFjmYDl1E6duoKhaQpThNMSIM4H3b+4pncHZyPHwhDUR5yTbU1YTROYwE+KMNWJRIscuAWKeQyJCyHWb0/HAbWqZqtJVvRtjlFni6bzyhQ6BMcGqylApKx3QJFWTikPrBMN6L66Z3svgnAjc3d+J5ijqHHSXJgUWmcSwTpbn8zRRV5VMCwsh8e8tm/UaP88cXg45MLuUDNzd3SWc3r/a/LQi2KxlGh9IcuSDWI4ba3KCpnTlPtFuq6oipI2jaRrC7JmGMf38wPl0zkFbqyjdIECCaX8VgVjfi0WFvFfNatVmDU6ZCAmfP39Bp77N85xHsSpUpe+vKvd5HqmqAmNI91gEc0K3rXn79i0m0exdYqydz2ciwj57SpDwOGulKEJhmwZs9b3MrZ6mMcXNApvo7yFG3r57h7WO7ip+dLOfKKuCui6yaPUPtim4ssa4kmHyyTK5wJU1x3OHdSXDNDOHmX7o8EGGmkzzSD9cuXQnYW14mVyVS3/InubLTEGYNQXTKBOkQhAR1TAkr5xZ+gNh8sxDT104mqrARM9u1dLUjqZ2TENHVRZM00BZOkKYiVEmhnXdmXEUp8GyKrHJabKsyhRwwFhYrdfE6GlWKz58/QtsueLj5y/8P/+7/weH0zNff/0Bi6FyNfMQCVOgKRtMNGzXO4iGum5Zr7fYosSVFX3X01Y1cfZJpAXX4ZozNqLYFrRthbWRpqmxLlAUhqpxlJWhHzpi9DhniVHn6Y4pawwYUxCjxRpHjDPORcAT48w09RgTMDbiHGBkeE9ZubzANKBr81NeBhMtloI4Q5gifgYbHTaKEVeYAy6KOC162fRCCEzzjCtLbCEVXFGWGOdEdJboxcM0EogY5wjIpD3NnJUmqdUHSKLirKiVK+OwCbMPPmQleKZDl6V8f/JURUFdO7BeRjiWK0695fGpYxihrrYEb7A4qrKmaVrevnvDat3ipwtj94IzXuy8g0BNc5yIccDZgWm6sN/LVL2qsszzIHbaeIrSpOt+m5ehg160eZjtMGLEATZEwjwRZlG+F9ZmVppzoja/XAfKZoUtSiYfmXykH2cmHwnGEYwhAM16LV5OTcO5u3K5Xhmmicl70UFERAhW1czDyNBdRWiIKGv9KIOP+kvHpmm5Xi5s15ukoh5YNy3OWNbtmqZu+fTxM6fjOU+DM1jmSRr0p9OZwpXpnEdinBmnnvv7fcbtlc0jppMThXXiqTWMNGXJumkorRx36Qq2221GKrRPoaZ3WnkZCyFOhDix2TQ0bUXXXRjHiaatmWfV7wTKouH56cD5LCyseRq5Xi/UdcXj42O6b5HdbpOfRx9mYpyoV47T9cB1vIgg10Zenp9oq4apH6mLmujF0uV8PIuoLRquw0jXD9iyxMeYbD9mjAnYtHYulxPPz184n48YEyF65mHk5fEJEw1V2VCWDavNnilAPwVGn72J/zCbAtzss8dxzHaxmqloc1VxZhVdKHTQNLXMY00Kz2maXjXE9OFV/FLLK0mMPN5Lh77rBBOtyttMXymvXYJaAm1TMw499/d3hOAzvqg8aq1ytOtfVWWit40M/ZAFZfPseXx8pCjEn+RwvHA4nvhn/+1/y8PDPf/Jf/K/5f37dykDnZlnLwNeFswMzfrE2VF+xnvJrJx10nieJnzwSbx3c0nVqkSbb97PrNpW5PT9NVcHAgMNrwJKk8QzWmoLJ70nBE+MMkjk1rCyaXOJGR7S+63ePcYY5D+dEjcmbD7kbE8Ff2VR5qCsv9cnBov2KMwCCtIpZCpaxEBVV1lboawkvTZatUrGKlltZlhZKzbdCyx9nifKUgRshSsSg6mgriuZ32AcmJLPT0euo2cmMkePcTCOV6rK8tOvPvDm7o6mqBj7gctJCBPjNGOdo61risJBlM0bItbZ5Ks1IVP+KvkZbuwTrX6ylUpKmBTi0cUYfZRBQsYm2FCamyEk6nVquld1Q4RkqV0wzXJ8ZdJSjNOUKLQigNRqom4aMFbmMyOw4lahjPRMz8mrqUhVu/deWDWHQ/IdqzmfTsQYOJ1OPD8/s16vub+/R3n2Covofbj1ItRTymeNzDJJHJNdx/l0oq5qiJHz6czQD4QfsLJUV6CVpja5Fa4RevjIMFyZ5pGuu+TjmsYJV7gsLlPYU8Z3FrnZ3XXX/Jl3d/f5XE6nI6tVQ4he2G6FWON33QWTdDIxykZA6gtO45ThMa1cjDFpaJU48+o88Lu7uwR5rVmtZGDXMIjw9c3DAwYhtvT9wDR7zpeOEKGoKorqD1wpaCA3xmSamUI6iktOk3B6l+6LSzxSHQW1jGyahvv7+zwuT3nYCkFpqVkUZf5+nTLGJmGWy9JU/7xcLrx58yZnDMpqUexvid/KzZTZp6vVWgRa0YhNsi0w1gn1NTV8/pv/5v/Gfr/jv/qv/q/8w3/455zPJ4bhSl1X7HZbhuGKtSKuG8eezWaN9xNl6djvtxSFTVhhwzRJ6ahOrErbVe8j7z3X7pqwYqEx9kNP4QqqshIsPT0QoMPPpV/Rp01WKYYaoJcsIZJzqfCmEz/d2lcb05JfLhuIe8Uo0hJaF/Iy49XfU5X2Ekdd9geW0NBSu7Ck7unGoOcBZE8rPR4Nsrq+lsFV3kM2g6UgLoS0MRYlwVR8//TCRMBWFm8HXOGZp45t2/Lh/oG2avBT4PB8xESBSryXHoE+JyHBX3ouS/KAYuUKb732UBryGlBcX585dSJVIoQ+c0rH1Ou8pFkCr2iZ2nvS6689BmWhKR1ySRfV5rWuBcX71cp7qbVQSqqffWYO/eQnP8m9p+v1modgaeWnwfjGcpOkbb/f50CuFF11edXjUKU1kNX3yiDSqWxK5ND5z3qtdT0t7SyU1rndbiXBTQr1vu+5XM55/QlrcszxQ9TKpEAtQ3V0BkZRFNSNMPeufU+VtD/KzArpmdVBSwqr68jg8/lMURZcLh1FIe4Edd3QtivaVpye9/s7QogcDgeKouDNmze3AL+g8WpC9ne9fvSmoFm90saenp7yDVMc2llH8JGuu3I+X5hnKRkvlw7vI8FHxuEmltGGme7K1+s1XxhdKPpQFYVa6UomcXg58Pz8nBfJp0+fcumoC/R6vVKUt0UUQshWxfv9PgeMefZ0XU9/HVit1in7Vk58RdO0GGP5zW9+zel05B/9o/8FYqg3cji8pCbfwDgMPKQh5rvdlvv7e2Tmq8mlrGL3IOyGNw9vIFsgrFCmkN7Qw/HA6XTMHu/GiMmXK4SuqCwV2eRchnu0GaXaAmUJLRvAyvnXayiL54bv64Oq91eruuXvKXVy+W+QmvTp92MMt8Zh6ufMipkuAqZu2prh6fpY0gqXVYJsRre+k8JvSyqsBq2bzuKm3pZKUbHbgjGUHLqR2VhiCTMjOM88XWGe2K/WvNndMXQ9Yz8yDSMOmwYkJauB8sY4UybPknqr11ID+Wu22ZzvvZ639iHkvgiMqk1Rax2rds04jPl+6fXRZE0DoSZLp9MpbzJAfiY0GC0trZVeqhW8bmo6erNpmizy0ixd///l5YWXlxd+85vf5A1OKc1PT095tKdeq+UmozTU8/mcKbvKlFMNhm6sSsDQiWb7/R7vfa4yNVCu1+ssxFSh5rI5r4nCPM9pylxLWRUYC8aI46pWaMaI9b5uSuKY0HG5dMR0DSAZWkaZi2KtVGFwE3jq875sLivdN7MS5e4zTXNiVV2JUfquTdNwd3fH4+NTTqKVsqxaEHXdVSr/j3n96E1Bm8Xv37/P+NySdSIZiGUcJ9arLdM4Mw5zpmbZpO4syyovwMvlkk+8aZpcUSilUR8qZU7osGtt3C4HYv/sZz8jxpitu/XG63vpgl+v12JHnVSFWr2oUhGU+SPqVClPZRf+67/+K5qm5uuvv8oPidpiOGdp2prr9UKIgX7opWlmIMTAerOmaerkpVSnDOOcHsAbm0Y+3WR/mM1mw7t371it2hz852nOD5A+hJr535gRUw4GQs29lY7Lhu2N0ioBxbrbpvHDTUGvtV7PJRNIz2nJzMgZyiLQaBDo+1t5vpTs/5C5pMer/6+BwCw2HP0Z3Sh+2Ix2hfZdfD5ezdSlGgn4EJmi4zrD908vBOco6po5mTH6aaQpLG8f7hivHTZGhmvHPI5YhLqsrDlpJofM1NH/12unsJ0y9fJxvjom+RkxUgs0TUvfD3lzKMuKqqq5XDrWia6pVEpjZHbH4+Njzvp/CE8sqwZ99i6XLje4gZwo6BrTa6w29qfTibdv3+ZgpFn+9XrN1X/f99lpVSmw+j6aAIYQ8nwH7Tc1TcNms8mMxhucLG4Hql/puu6VY4JWSp8/i7Hler3m+fkZtQ55eHh4JaRT2FrjzeXSMU0Kc4q7aNMKFbzrOk6nM94LZKd22HqtdH7zbd0V4vSaNlKFtDTmqP3I8/Nz/v95nvNxZd+jq2wE8yyOAs/PL5nJ13XXxNC7aYP0VZZldlJYJkp/1+vHN5qd46uvvsoyeVmwkhWpJYW1hmEY066dhkiUFdM0cz5f0mIu8w3RUlWzE12M4glye9gVUtIbp74vyky5v7/nu+++y8wbzVolW7WZ/6sLUoOalsIqtz8cxWPFpJtsEI+bfrjy9PSIMfBf/pf/xyS2gaapKMuCtpXhHM5JUC8KmcN8PB04X06v/m6dyaX7er3meDqKwZ2TwTHeJ5YJImZbr9Y5K9RAvCwFNbAv79My4C/hJf25ZcDUYK2ccd1slhuHPpTLjUDfb4mNa8A16gK6CM5xAY/Ms0ylUs+WGGN+uJf37ocBUwOqblh6X3UdaVDUn7tBUhJIl5Yey80HUg8pQNd7Hp8vXEfDMDsCRareZkoXeXO/YbetCP5K358JUcgTYutcofRQTS20gtGsdHkflhuvbqzLHptWCiJinNlstkzjlIOD/G6Rg7RW7fpMLjeAoihu8E7arBT+1URLs1OtCMRORq6pZp/H4zFDU5kOmXB/TcRcUdAv+naKBqgdjaIASj3WNaf9iyGhCNM08fT0lCtZTeRUU6HPgiaHer9VkAnk4Kqb2TfffJM2b5chbM2uJQg3xGhSIBbmkHUmX6e2XaW+4E3B/fj4JV8rhab0ejw+PuKDVC5Kz9W1fL1eGadbAry8h03eXFLfJQltx3HCGsfl0mONJHzGOOrqZn2hgja9/ioaVITi73r96E1BMe/D4ZBvhu52OmvZ+5AbYNqDOB6PSX69yjuhqg11Eap53lKQog+UZv0C8XQZfihTU1uHky+FYgo96A3SDK1t26yyVr3Dfr8HAiHM7HZbKWkTxVB5zf/8n/9zhqHnP//P/3PevXuXMcoQZNylqkK77pLfu+s6vnz5wsPDQw4AuhmAKG11UWp2gSE1J1OTND1QYj9uM56pjqMalDUQmDQCMYSQvYpiiMzzlBfesrmp1QWQ399ADiI/1BLovy8zdz23H25MS62EsxZX3BqLQjxo89+X1D4NWhp0lGcOtyRkCY8t16JiznCrhuT4hNK7xPjVo2cJezlXME2R7hr48thjix0hVvgAxkYwA6sV/OqPv8LaAWsn5qkDPDpD4UbXDXnjUrz6BgndNtPlfdHrvNRxEI2wdbAYLEVRZQaP2KFbDi+H3Bs6Ho85CG+324yJ6zO6vNbn8zmTMGR93txmNcArkURpmLvdLsNB2o84HgXe1BihQkftF+gmpImgBvIlzq/XRm26gbzR6/ssg57CZXq8CplphaI4vR67ZuiqmdAKqmkaHh4eMizlnATaaUrWJRbmeWSa5Bq1zZrgYw7sutZljGqVIa7LpUtJInlc6fl85vHxKT8r0zRRps1EM3l9LhWql8qrv4khbXL8tTLyVlldarexXE8KRWmlpgna3/X6e7GPnp+f/1YjUYO5LC5hBxljWa02gKUsa2LCzIuiepXxns/nnCEsDbuWm4fCNEXh2G63TGpzMQw8PT3nHXU5J1lv9t3dXbZ0OB6Pr6wb9LO0zOyHnvP5lCT2acqWgW+++YaicPzFX/wFd/c7itJibGS33+AKS1FYrv2Fcez57vtv+f77b2nbml/96o/YbFbIoI1AXZc4Z2jbhmEYWbXCHNASWIOjaiSsMcIEsbeMWDYKYXyIwO0GF92a5q+9+6UZHV4FI80gNGvXhSTv87fNy/T99OuHC2z5dw3Szrk8G6IobspnZf7ALWjo5gjkTFkCYsxwhpIIltCWBgi4TTLT/19CMcugK8dzs2LXYw6p0WptwexLHh8HhrHGFFsCBT5OeAaM7fnpT9/g3ETbWGKciFFYb0J/rClSgqSBSzcrDchaMSyb60uIB8jwwFL4ptRthUW1Ol9Cdrvd7pUho0xEvCYzSZtdBvQZWSrSleuvgVt/R2FWzc6159S2bcb9gTw7OUbRPWhgUmKKJoKHwyHBwW3O2hUSXBIudF3qprKs7Oq6zmK5p6cnnBMltVpTOOfyhDatlvS66qahqIJm/Ou19Ge6SydDbtYr+v6aE4dpkgmMooS+VTwfPnxIfYE590rLshD4OVmT6GZxc5FtUjO6yBvser3O10YZVNM0sd1sc0UzjmMeJ9ontbdClJvNhtVqlXt94ziy3++zM7FqzP6u14/eFBSr10xdb6Jm5Io91snu1hjD0Pc0dU3bNGw3G06nE//sn/2zHPSlESsDxLuuy4MslkrZH2KgEVit2oShNpmHPCX8XReOMYaPHz/maub9+/e8efOGt2/fZhyw6zru7+95fHqkSIK0fuglYzfQXS/8m//hX/NP/uIvqMqC3/72d6jVdtuueHx8IvjAfn/HX/3VX3N//8Cf/dmf0bQtp/MpH5v3Pi9g0mbz8vIicNHs82Zw82oxrwKzsTfdgDWLKXZJjKPnq8HeOSeDWUptLN+cOH/IQtKgqA+idZZpngC1h765UEeiNGwT60n/XVlM8rrZbkRujpuGm5mdJAX2Fdap7JRlVVBWt7WkawFuUIYeu2a+y8CvwcQ5UX/rNdXz12CxrE6MgapscLbmdJp4fLwQQklRtgkKG4lxYNVa/vxPf8k4dBRO+jERGQwvAf91oNZ7sqzq9FiWm+uSZQRkxtlqJZYQRVGSx9NhCYGEbxusETGkXd5jY1klNa6KPIP37FIQF1+taz6GwjlcUXDpLjlj1wy867pX90eDt6qm1X5CEhNPSOe67Onp+WmlPY4jGKFdVynA9mmKnQZOpacKLHdzBDXG5qayMn1uQ6Yk0bq7uyNGFrDlnHuTx+Mx23MI/CSVtk1sQyCxl1pWq5ZplnGfIQTW600+b3FZOOfraYylqmq8v7mcztOcVMxi4a6JTFXXTPOU+zVqICg017tcWR2Ohxx79Z4syQliWBhy5aWEFK2UtCrTEcV/1+tHbwpVVb3iDgOZNaTmTWVhKYxh6C5UzrBZNRyenzgfD2zahqYs8HPy+kjl8d3dXV5ku92OzWaDCkemac6wgLWGaRqY51EMydSyYPDMUwQcfT8xDDOGgv464axAWdIAn9FJV13XU9ct799/IARpMu/2W95+9YH1ei92OiEQMPyj//Cf8P7dex42d7x784HnpyNtu2MaI19/9QuKYsXpOFDXO9pmz/l6ZZwnjpcLu7s9PpX1IQnVKicZ4yotxKIsqMqCIkb8NIp7WrQU0VCJyobRT0x+xJiYTPG8qKFT9nPrMciGU5aOqiooCoMxtypBXzdGl0yREggp4P2ECNwmfBgxNjBOV1xhmOaeGGciMsHNmECIYgwXovybdWI8F41MWdNjk1JRpqkVxmIR1118kPAWYR4nxn5gvPY4YzARnLFEHyhdIYK+KB5cxCgiq2RyqJCY/rmEZuQYZIbCPEfqaiW+R8FiTUmMVkSSUbysPJFgHd44Pj0dGWdDjJbSFrgA0yAwyJ//6Z+zW++Z+5m2bLABTPTEMGGt3CeMuPA6U1C6GhccBQUOR/QwT4Ho5T39NOGnCULIymCMwl1KLQ3E6IEAyP/7ecLPMsGsqWrmccJGKK2jqapk9+4wUYSHw3VkuA7YaHA4pl4YVFMvWPXxfKJZrYgG1vs9c/C4sqRZr5hjIKbgp7CsNoi1ZyIVn7gBhOizaFXJKN5rM94SgCIJGklrxhYFriwo6opoDNMcuHbyLHeXgbJosabCmhLnasqiZZ5j2qQCfT9iKFiv9ux2DzTN6hXlWQkuElRdatweqKqWazdSNzUCB4q2ZOgnnC1ZtSuGocc5+PL4MSc0yqASJ1OpJvrrRN9PMmrTFKxWay6XjmEcCETGODNFTzd0GGe5XDtR8hsoKhl2NQfPMI15LO4U5PtzCBRVhY9B7ocx+BiY/Ex3udL3A9euF6jRab+0p21WmFy7/4E2BS1lvPfZc0OdTxXjC7Pn2l3ETfR6JYZAVRYM/ZVvv/09dV3y53/2Z/z85z/PGbSWp8oc0IWlN1FLTC2dqqokxkBVFXKiidXUNisKV1K4ko8fP6XfIzViTKbROVfkGa9dd2W322OCxUXHut5QVg3RgLdgq5KHh7d8+83v+e6778X/aBKOsjGG3/72d7TNiqZp+erDV8zTzNPzE8eLDBx3ZYF1UhJvtpsEF804G6mKIhu4zX4imEh0EWGkeiYX8aXB2gITI9ZqQAwpMw0ZtwcyBS2rnYjIEJib2Z3+qZWaZrC3qiFVAulLBG4zEFJfQnQB2ny7vb8cj3D+YyJPvYabQJrNMQQZVTqlWQyJSRVTP8NaEcddU+8n9w6QSspgqBKDLf9egjMVNtINQSsTCQalKGqNJSYbixhh6MeUbUsvYBxHZh+gKDl1V06XnqJsmEZPYQoKUxBnCcB/+sd/RJwn4jTJsRuhvg7zhCeIEDAGZj9TpHnjMczM00BR2OzVhJEBRuM84YoSoqUqGwpXJqWv3HOFnzQJUL2NQUR7ZVHIHOaiEEGX9/h5Zk7Y9aptkxtvSQwRP8+8f/c+K8BBsten52e2+z2fP3/ClSXjPLHebEQgl4RdytrRhvGynyPVWZktpxWekbVmUqNc1t7lKjTOw+mY7E8iXd+nP69gTLZW+fDhK4yxmWIZg2TzBmFbjeNAVdaS7V8Hvvv2IzHAen3TQamgTk0x+35gGuckPp0JfqZta3a7be4PPT+/5N5BCJ6Hh/ucHCtN1FqX2JVSJUzjTFWqBsPyJlHVlZo9+5kpbaQfPnygXbU5vgY1WfRiJEoy8jtfLvRDT5eumcLDxt6mz03J4FPjZfCBupL+ymaz/cNuCmq+pWWgQiJaHk7TRN3U1E1DP/S0q5aXgwym3+13bLYbxnHi4c2bXJoaI57humj2+30unbUZ9Pz8nBkOCkUACSftc8NQS2Ap79b5uOZ5oqrKtBgnxNVSFMH7vTSHbVly7gYKV7Cqa0y0kqVNM3/zP/wPPH7+RNHWDGNPCDOfP3/k6ekL63XD8fjMly8fuXQnIp6yKFk3Kw7PL4z9mAbCB7rrlaZpCUGCIzamQfZiMTBGaWoFA8EZvJHgWjlLlVxlTVqkxlniAgLSILi8BktO/rIhp9dv2YxS2GWZZWtAXdqPKCVWX8teQghhUdq/npegLy3hdQSiYqn6+iGfXyFJPcalTmJJk4WbYd6yh/BDCEcfHG1UL69BjAHMjQwg51Hw+csTEQtOjPPKoibMgcLAu4c161VkGA8ULiYGk5OftxEcmNLgmTlenhnnDlsE5BJOuMJAYSmqUtZbNIAjxCjVjb3dr+WGoJCq9lmKLB6d8vEvE4HMxLO32RZ6/upgenMoFYrqx48fMz79/Pycm8zGiLcSkJNDXRMK04Bs4EstwcvLSyJ3lNlpVV8q9tIGuTbmq6pinAaq2rHdrbhez+zvtmAC7armeHymXdVJS3DTUCyHfHXXC8fjIfUQBHEAGValy9RYk5uxTdvQtm3upyizSVlXAgPO+fnS839+fma/33M6nbDW5gmM2ifpukuGD7VyEdrtjTKrMLjCoQ8PD6/gJI2zGu+0F2etaHN0sJLe+2EYXg05Uujv73r9OIlbWihKIdRMRV38tCml9q6uKGSwfOEYxoH7+3sulwu7/Y5rmo2qB6nshCX7QmwdBI9/8+ZNpm/pQ6zitnGYM7tDv69iFg1qOjlKF/A8T2nDGblcEvvCz7iqwsaZl+MTLhicD6znmT8va/7Rz3/B+eMXqqpgM45Uj4+8//AV4+WF7XbLgw9U1z5VEJHqOgr2j6EobkNnzPGaFqMhmDTEOwRsaamdZ38ZqL3YPLtoMWPCeosSAkRjKUtV/DrxWAk3Lv8PqafLXoMuDL13yw1Fg2A2/UrXTheuBhElCahNiL7nclPRz182trXPo//vnPQ5iqLMTVfFepd00bgIgvqgaDDT99QHQJu02pDUrFWTCb0mN0ZS/MFGGQkWqrJkHkdCMBjnOJ07Pj2+8OG+xU9X4jRhgyH6EWcLfvGzN/y//s1f8ub9r3BVi42Gab5SlGL74SqXMi/D5fqCNYFN2+AD2MLhg8G4AuMcVVGJlj2KKtgaQ101YG79B70XSn/s+56ivN3LJXV7SUvW+6vGhHoNNMj0fU/V1Hg/ZTHZ5XLJvZ7NZsPT05MM9knrQxl9X758Yb1Wd+AtGGjXKyYva6m/9pnT7/2cN2WpIm49QcW+Nfh1Xce7t++4XI65B3k8CnvHuprtbsU4TczjSF1X9P3Aar3CzzIIqSxdYr1Jha5JrVYLZSnsKjGrq5i9T9op6XPeBmHJfG+xiYlsNlu6bsibrTIen5+fc6BWkoE26TebLfM8pSAta3q32yXX1CEzlNq2pa7rzPoKIdA2DUM/8O7dO0DYjSpwu7u7y5WbJl0vL6JjUFKB3u/lVMQ/yKagF0epirqb65ANY2QCltIoy7IU/DsEPn/+LAPIU7PEJwaGNmIE1nG5EaS85zKVoBr4NXjd+PqvZwArtWs5CEQ3JFmElnmOXLqOr7/6imEcCZdA7RzWRGKcCTYwAzZGHqaR669/zfDlkfertYy6Kxy/cg7z777BB0/hHB98YJonrLGZ1idCMpevVar3JFOJhmgcxnsKIqEwmGCoZs/mcMTGAAGmEJltwBYlcRJn1MoV9MOV0lpGP2ONy8F/ySLS15Kyt5y+pd9b/rxuIBpsNWDoRqH+NNqgXXLwtbzW91xuCtoYg5sXU07TuFUQqk/JHP00OAdurpK67pZN9WXgW24ey2ppadWgX0smU1VXBBRSK8Aa+rGncY6Pn5+53/0ES4kjUhYwzwOuDnz9YcfXn/YcrwcKU1NWe8YpnXuM1LbGmEhkxNgZE4WuaYKhsCtCkPOcQ2CzXou/kXMM/UBVloQwY42h6zo2m00O1Ho9q6qiH7rcdPz3Nd2XTVbN0G+W6Dd65tAP2ELuy1JspeNrt9utQLqJhqrXUasYffau/ZWmbXIGXVW1uAGndeTzaFdy4xSEfKFw1M0ORAzmNtsVl+6cGU/nyylx/i1DnJm9SYmMwZRiLNd1HS5AUVihlnqpRFwhQ7NidElQ2uZE6HrtsihQFcC73S6t8SIlHrKBnU4nYox53WqVpM17XbOizxCUQhhYbWYdgmzOmlArerJkXc3TlMd01nWdCTrqL6XeauM4ZlKAVmc692LJ1Pu7Xj96U1iWppr1qR+JBuBLsmKoEn3KOUdRVcwhWflaS/CvffFDCK9G/+nOq4tVf05tHpSOJ+rBbS49NbtYClecc5SVo/Zi3308HrHO8O7dG659lyCxEmMtH798JswDb6qac71iO0y0o+ePbSCMT5jHZ4XLs+JZXkmBzA02eRWW/9ZfDIaItwZjPCYGgpFNwcWSIkzM1tJXNUMtJmZz8ucJMWCQnydGXFI+L4PbMhAvYaMlZLK8/kogWGbyeh+0+lhm2roB6PcU2tEMVauQpSBJg4smFDIo5pbVd12XIT/9GQ3qS8bQ8jP1YdLz0+NWodJyU4Jbc30Jb92a7SISE2O9CedKgp8xxjHME5c+8HwauN82GGYCafiNn2nqmj/945/z3/33f03nn9ner2iritkPQgjwnhkPJlC4yKosWJWGGA1lBddxph8n/CyNWWNMgo9uvkyaCOkGqfdVn8UlCUShpSXbTO+/Ur+XNNfMcFskWXpN9P4pxKf3MqYxugrV6nvo+4PhfL5QtaotmMGK/3/fD7nP55yYxSkdV2EbTWCklxR59+5tYiYKBCzsQwngSqsVK5U6Vb6BqhTzwWnuca6iTESMuikJXqY8isjWE6PY3AhTsc7XwXuf7Te0jyHU2gFjHPv9PkNeWiEs2T6KYGgFq3OhNbbJJn1zONDqqCzLbB+ifTJd16pfWOpsllRl1WyN48jd3R3n85lpmrKy+ce8fvSmoAFDb5g2QZbcWO07aNamjTAN1grxiCrwhsvp4tRSdb1eZ+OsJWdXg4Zy15cBRx8C3X31Bs2nKTGXplzWPT4+cn9/nxf/09OR/f4Nxnj6ceT7v/gV5X//12yGHhN7HIEQIZqlTFydQYXxk4ikqcUbpdNvDCamqQEx0TqjePdGDNGINbIJhugsIwVER9eWjP/kH/Dy1R2DAYvQ9kzwWC+0Q/FTcbDAiTXgLYO+BnK9N8usfQlHaODIzctFk/iG898w/SWuv6S3wk2op2Ijvb8auKdpolyMK1Vuvn5fhUSGWzXwQ8qtBiw9Dt2UlgIx4NU5LAPlMuGoa+kXFYVuPJ6iKJmngLUl12nk88uJu/s75mnACJaHiRF84H6z5mdfveNf/dU3VHVDs95BcEQfGboeV0QqB64w7FYVjQk0lQgwz4Xn2y9HClMzXDuKqmEcJ+qqZLj21E2TZ0Us8WS9dvM8U1ZFHsOpm4XqPiQQ9xmSVcrrcn0sN0ljDatks6Cb9vV6fWVfPo5D5ujrpq4BzFpLXVdE4uL+VmkMqRBH1CZnvb5NmtOYotWnc47VasXXX33FlBI4zX5V26LrToLgwOxHYjCMowhRrQN/HilKx263TTYhEnwvlxNlWWVKPYkocTqN6CCrzJIKcDic0vWW9b/drnOiqsesmqjr9crT01MWxsmm4bKXlHM2r3FhMG2yVknj2263y8+zs5bL+fIqyTNG9Bk6Z1thfU2QtV+km5smbD/m9aM3hRsHeM54o6ocFVd8/+EDGDgdj+z3d4zjQD8OXPsrzjpc4ZjSrNllANGNRgOXLmp9YK/XK8655M9yScZ3DdYU9P2Qs08dNahZ02q1Ypx6IHK5nGka8Upv24bL5ZQNqX72859yHUaej8+s9vd8+tM/5dTsqPzM4/ff4Yzj/u1bqrbIG0CMyKAfbkZtOg/YGpsfEAP4xLawxuLnGYJn3a6YrQzhqGzFbALeW9qypq8t3Vf3XFcthRFOv7OOefLMzNkbv0i0xyVU8D+3Keh9078vG8RZVxBv/kLLDBtuAXuJ12v1ofYMS2hv2czUn3uFi9s593+W/QkNCPq7N5ojr85x6Wulx6W0SE0a9IHQh00rDD2vrLjW8w2JShkgWkuMljnNtn05dlzHmW3bEMYZgsNhCbOhMI6fffWBf/vX/47D0++oil/Rtnc4MzFMvQgOG9g0LftVRWNm1rXYP6/qluPxxEvXM0crQ3UKMCZQVg7MawHe0qpDM355Xm4COT0/Hc25rAKXJARduxp8jDFMYYZUcSncqHBVdjm2liGhBPpeGhvUKG8Kc/YoKgoxxRS77fiqItTETpu0y4xcf/+b3/wmbeo9q5VYdsdgCF6S1ZGJshR4E+uI0aTPdaxWLRD5/PmTQHUpWTVGrq1eH0121mtRM0uVsMlwlq57US7XrwwqVR+g60kri6qq8rTDGItFMiL3SKqhMsc0TV60imjbVjbn1MNbkj6A3ATXZ0irrfP5nA0RP378+Erv8WNef69K4UbLCjloV5UMm5CbORBiZLVeMyRXwWma8gBqFZqUiVuuWKSWnSGEzEzquo7tdsvxeHzVPVdcVQ3C9IFu25anp6cc/CQQTjRtxTD0yXtcfYNKponkq1LS1GvOnQzkOV+vsNlw+aWlHwa+FJHNasfl/g2rTcU0T7RNw7UXV1VlXijWGIIsDmvES9+QqoUo3PppnsDBbrUWGidIX6CIRB8pC8dkZmZjKUxFmDyUYLzBxijMJeMkUzUywUwDmwbOZZavmeOyBF3i7cvf0U1MA4dO39LvjWPi4C+y7mW2ri/57Bv1delttQxMmvHoMfg0qep4lIQDQ3aYXMJIS6xcYYhpusFa+uBob0fhD+ll3OAX51zOaK0xzNMIzmKtDHcXf0rL7GWOwfefHtn+8g04uU4Gg41gXcG6Njzs1/z+8xfmoaOwG8qyIBYVs79SuoK39zv2DWxLqGLEj5HZGb6633O6fGGaeqapZr1ZE7zHOhIxwr2qEvQa6+YY4q1CmudZvIeuV2zi5Cst9IfQ0Q83YOcKCDPTOBIW/SE1aFOIosDQNk2moi/7TmVZUpQFp8MZVxY0dc3hcBR4zjkgsNmsc+LUNE2OIdvt9hVV+uXlBWdt1iDohiFVS8nhcEREiTfqtCQQBmNkquI0DRSlCmLdgro903UX7u/eZMhFFcn6WW/evMmCL71WOhvicrnkITvLTUXXuMI+QEp6+wwjqe2MoCoz03Rb43qd9/s9z8/PsjEeT6ySl5Ve7xhjhly1B6fxVGd0O+d4eHjgdDoxjhNt2/yoWP/jbS6MoawqGS5vrQwSd4b1ZsU0j7SrhhA8bdug08DGcUDmsMoBrVZtdjQEMn6o7IO6ltGX0zSzXm+IWB4e3tK2a8bJM02eEA3WlZzP3atAc5uCZDIl7XQ6czlfWa+3XC5XLpcrq9UGsV1o2G73VGXN7GfKoqAuKtqqIhAZYuQ8T1R3O3xbcPI9z33HaGG0hskZim3LYCJzYfBlgWlrYlsylxZfOkZj6GNkwDAXjslCKB2hrhjLgqkomcuSqXDMhSPUJWNlmZO30TxNRCPTxPw8CyQVDYSIS2MgfwjPLAVt+lrixEu8fQnLEKOIxhBhmUtla2EtVVFgF/BXLmsXKuNlNqujJ71PTVtupm8y39hhjPhNlaVDDlXmHYhF+kRR3Dae5dcPq59pGnGFJSRhXVk6MAGdJmcMWAcRz5wEgMbKl2xw8n1XFPhgiMEKbOW9jN20jtlHIgWfvhw5X0eMrTC4BON5/NRTF4av3jwQx4F57IlxwpiZqoK6tBTGsW43NGVDVZYYA640eN+z21ZsVxYbeubhjAlRBHZRZgbLtRMevLjLBqxxkPyQ7MLzKoLoCVICN8eQp9hp1qv3bRnQfNI0OGOxWBmrOssAJe8DEYN1BafTmes4Mk4zxhUUVcU4e6q6wZUl12Fgnr2IBYeZ60VQgu1mQ/SB4Xpl3bY0dcXp8EJTVZwOB1ZNy/VyJfrI2I9URYkzlp9+/VP+g3/8T3j37j1q1yI20H0+F3HdHRmHiWlMupd4g7mscdTViv4qmpQYxENovdomlEHmXGsALwphRX3+/AnxBBP9TQg+W+HIMKAhuR7LLBWxtpiIUb439FcMke5yFtKA96zaluAD1+5Kf+2ZxonddkvwQmefpxk/zzIW1Tr8PNO2DVVdYayhTEaS0vS3xEi29ne2kApIZ8HgWK02OFvw8PCGsrzRgP8gm0JZVVz7q7CL/Myl65j9zOcvn9lsN1z7K8M4EGNIyj+bL64wcWQyW1WVuQ+gZnZL/3AVeUxzZLvdc+l6Ipb1ektVt4hXT6RpV7mjrnxiEPMpNb5br9d4H/ju249UZUNTr6jKRi6acRSuoihus1dDUs8GHwlYERFVLUVRIfOlRyByOh8xBqqqlODiDBhR9GIC0QTmIANQAhGscKGLylHVDpvm9IYYEZ8lGfJjnT7kcnNHPwkzZZ7xJhKswdgiUV1vg2WWTWbZv18P09ENQzOK2z5vcnZnk+Va9AGC2GkQYlYWEyOFvfGfl32MZY9CprFJUDPIPAr9vg5xUfFbRBTR1kGIM5jA7EdcoeI48woOWZ6vMilC9KkHYIlRGrrOGQn2lvz3pqkwJhLCzDyPeD8laMZjTDKDsyUxOky0OGPAawZqmaZIP3geH89EU0qPKQaMCdg448eOd3d79k3LpikpS0+MHSEMWGu4XkdOxx5DRQgGn/7Desoi8vbNmlUDlQ0MfZ/U1jLj+dbPK0UcNQdA1nAIMVenxkp16mPAOMscA2VdEyzUiyxRG5vLtZDJClHmohS2ELpzJJmxCbPQuoIQYQqy2ZD6WlMI+AgBw+QDq3bFbrMjzJE4B/pLR5hn3tzfcb2cCbMo2M/HE1VRsWpWVEVJmD377Y7r5YqJhsPLgY8fPzEMMmjKWpfg55iweYG116st+/19WmdFijVV2khHxA67oq6a5MdmUk9B3Ft1dofMdLfsdlu8nxmGnr6/UhSO67VLSY3l/v6Ot2/fcLmckWp0Yp4n9nthKu33O/w8M/R9gpAlqZunmbZpefPwBmssm/UGP/uciGw3G2KQnuSbN2+oK6Xsy8ak51UUBff3D6zXG0KI1HWLdQXjONNdexH2DSLMq+uW67XPo0//rteP3hSu1y7z0xVfK8s6l13n8+UV20QxNcWfl2WVUq2U8qUTlrQZrE0kbXQpRhxCoOvE3EuHdGhD+fn5OVNcVcqupdjDw0OuIHSGw1JvobbRP4Q41NpXlNuiuNXf1VJOoRPNvJZioZtoymcIY9nsXEI4+pKM+GZyp5jxMlvW99CfVwhOs8UltrlswiuWq4F8eU9U1JODfbhVAHBTC1tzYwDpOWqpGqMMFIkxVQ7cBGnLzUlFcQptLZunel/UskOZZEsm0lKoJQ/+lBvky8/R49Y/dV0tfbX0Pt5+Vj5X9gRxoAQlkVkeXw74aIQmDBADfp4YuhOX4yOV8bSFZ7+2lE6sQIItmCl4fLkwjOJ3RQhYEymdwZrAfrvmbivzwOdJgt7sb2I7hSeW9GyQqiDfS2NyxqvrK8Tw6h4uRYgK3+m1WsKAN/gjzTEgjbksblP3luwvXRNVVdGuVlRVzcvLC9ZaVqt13ty1F3m5XNhs1ux2O+q6ytYTCoHozx4OB54eH/M6EcThlkwuGVZK5TTGvIK2lJWj4jl9VvU99RyU8aSMHSXHqCYjBJn7sDTqBPLPK91X4Tqlumrs0fumvk/a99EenDaHFeJ6ehJNxm63y414nS3z6dMnnp4es9mhWtWs1w1V5ZjmAWvh5fCEDxPOyWzwH/P60ZuCioOsta8aGyC844eHN6zXm1diMsUx9d+67srhcMhNYH04FUJaUrWyMCcpKtX0So23tIzUYKJsEvVtX61WeeyfNkMVg4Nb9qWBckl5VGhEhSQacEK6acrM0K6+WoGHtCCnZIA1TZNYCMTXtEqbHqiIbhKvb4P+3LIRuoRq9IHUB0CPQ4PBkkqqP//DSkIfJn2FhQhu+f75Kzm3WmezilJx7KVAzbzqQ5jFn7cNQu/5vw/S0gd1qexcbgj6Je9z2xyB3DPRIL/UbejPLTeHJbNKMzG5NunhMHKvrHUYV2BdQXedeD5cwIk9xTRN9NcLBM83/+6vieOFdel5t2/YrqWHMfjIbGtO18DnxxND73GmQBDAiAkztYO7zQoTZkz04q5rICLeVM4Jdq7XBBIs+AN6sI5T1eaoXodhGPJcbA2mSvFe0lL1ebo9l0W+Xsvrr5+vz5beS+ccx+ORw+GQg2lRuExd1b6hvof3/tUcB/VTU/RAny390ueo74ec5Kk9tHMuB/GiKHKc0BHBdV3z9PT0qvG+Wq149+5dPj7tSeochmXSoBvp6XRKNtiP+d+Wg8GWm44xMldZ1/vSEViTIj1m51xCN27PkyqkNQ7pvbDW5jEG9/f3xOild1IYXGFo2op+uFI3pRAWCJzOhx8V63/0prDMcAXP9dR1gzGOr7/6KTEYqkrKsnGU74NNDo9SBhdFSVGUeWiOVgK6Cy7H0d2aMrdJRMrVttaySeP2NHArIwB4tWMrpxfITU/NSpaL+4cZ/NLWWRvq4nPSvVKEauYQY0xc6JvbaVVVlNXr+cPGiHuo9+JNswy8ulAyzq8P/r8neGow1uxqGej0YVsGPV2otwbsLZu8lafkxavf15effRac6T0KIeQN/7aRhMW5LgVkt+xdz1MDkmZLGmQ0WOk5L5lMr8V0N99/hRKX1ZFeJ91YlgI2PT8Nft57fJixlkQsvlUZqaUMxhGi43fffuR4GZijZfZCZZyHnqm/cDk+0RSRTWXYthVVWRCxjN4QXMXhPDBMEE1BjOJlVVioLOzXNe/uN5g4EvwIqUcSk3+Veu9Ebuek56B/aka+fMUYmVJ1tbSh0OdCf0fviV4bvR8qClMa6uxvE+T0a3l99b5rIgYmOXl6pmnO60yrTFVM678DmVqpCYiuae9DGvIVs5hObTP0s5WFo1YZyyShaRrevXuX2T56fZaNcrUFX9q2aIBXPF8TX23kLxl7ek0UiVBxrlJulX3ZNA3b7fYViwhuYwDUfVYp+Ut0QZPkHIsAmfQoQ74g0LZVgkahrIqMBvxdrx+9KSjbSPAroW0N/UjwcLn0hGCYRrnpxliu157LpaPvBy6XjjLZF2vp9cMyVbMZvSGagWjQ0AvpnKPrOp5fXnI/IcaYx/c9PDzkAKjUML3pQC4rVZQiLKhbINQHazkQRP6/EIl/+r5aQugxa7Uh5yUPbdM0MhMh3QutbnLpb24q1CU0pAF1WeIvG6z60ixmmZnA65GW+nNLGGfZZL5tjDf/mGXAXrJdwkIgqPdQg9MSvtLsMAT1U/rbnj36EMJtIppWRXrNl3CRbmSqfVgGNA1GehzLjFJfWnksNxh94PX8isKKyKyQ5qJJOJI0adOmYArO15kvzycCjqJuqNI8X1c4DocXbAw4E9iuatpKKwKYPVyGmWM3MXiLNw5XiqGdJbCqHQ/7FeumwDGK+pmAc2Q4oCyFVWMsoswNt5kD3vu8oeV1ks6xSvdVr71eq+X11T+XG7XYd79Wf1udBuZvnw38rTW3rN7171VV5oA/LszbFErWhE7fe1mdzPPM5XJ+dazL8aHWyrwIzeQVqlGmT9M0FEXBp0+fFgNsbpoCybjjq0pM5zYoUrJcl+/fv8/QtG6st+vm8san1czS402fL+1lLM9fk1uF1Ndp0qQmbAKjtdlbqixLmmRfIkyjMW+Aerxdd2Ge/8CU1HEcEPqX+LwHL5azX7488v33H3HO8fVP3vOLX/w0HUzzKlP0Xkb5NUl3oIILXSxaEcgQiY7ZnwjhNjhCF8bd3R1PT0/JcqLIYpbr9crd3R3H4zEHfb15Cr1o9qNYn95AofW9Dpw/pDZqFnF3d0fTNFnmru+pQXlKVMa2bQk+Cv32hxVBvNFG5fqAtTdB0b8P6llCW8vXD+may2u+7BsArwLv8ufkcwQuWWbty8yHBP/ERZm8rBD0FYKorqP2FSL5vZTWtwzMmo3rddSHS//9fD6/CjR6vxT/lz/LfJzL+6zXz1qbK8xlkHldocn0Oe89xplky+KYkXMIMRnxBYuxFU+HC1+93bMuG8Q23LLe3VFWDePkhR232rKuSl6OV+Y5gimhKPl4uLDdt6xrhyPgMDgLFs+uKeg2FeNzx7WfsOUKTKKNolUYeD9hbYWwbG6b5jRNDItEoTA3TUlpX0NN+lz+MMstioKY2F1VIgeEdF2rssJVcj0161VRqK7FqqqYI1kz0jRCvZT1skxaTObqn07nV2wo7RX+0A/LezXiW45mtYzjNWfPigaojkoTCX1fzdg/f/6c4ZwYheJ5PB7z2lSBmSIRl8slxw0NttoHPRwOr9byOI6YSN5INEG9Vea3+dwKqat9hX6+WlRoZRBCwFmHLVzyRvK07Spf8+PpkCqYns1mw2az5XA4puE9R5rmx7GP/h46BZFkd//f9s7s2a6rzu+ftfZ05jtJsmVL8mzZpj0P2DQYNxV3gtvVCWACgQeqKwH6X0hSlX7gKSHpqjzRKQgPJA+pSmi6mEwYjMHGDcbCmLYdWzI2xhKWdaU7nWlPa608rPXbZ191Kq2HfkrOr0pl6frce8/Ze+3f+P19vzM/2f7tb9/ixInnOXr0Km679Q6UUrz08gucOvUqd9x+B4evuKLJOouiAOe55bNRhzQgUiRDlDJLbv6rJ0/y0kuvYK3H3D7yyCONapC0fvqB4wNo+nDiEOQwtDPQxmG1suG284eFIld7tV8cS5LEOJc2MqBiMoxuO23pw5a2Chm4Jg2oBa8HsaDDEMd5cRbfLt/b7STJxi4OAu2g0+47t/HS8v3t1tTCFpw37ZV6CahKea57cSQyG5DrI8538TsID+7+9yUPkwSK9rxEMktxOEmSkM+LptprPyDiVByLKkTunzyUcm+kmpLPLO//YlPKI4r8mfQJkNJRQ4ltjA0wzJiimrC9NyFb7xMnGU4prrz6GqalRad9trenTM/tcOjIVfSjGZaKRCdYp8hdzLlxzpVZlwxNpGrPi0VNpjXd2NCJauoaSlMEdtYU50yQ5IwoihLnCHOdmij2jrttZVn6ykF56gzrHDrcL7nuDe9RaNk2M4dwfYqiCAI44WdWXjVQnls5K+0WT5qmJAFOaa1t0ELye6Ul0uv1Q8XgxWVkK1vmDkLoJs7ezyVtQ9sgsxGhtHduwfra3niX9o0Egn6/z3w+58CBA+zs7DQSnu3lL3mmpM0kg2LpSogfkcGvVNTSeq7rmizNmrO9trbWVGbtllQURRw6dChQfy8YXuX9JElCEeYpWmvyImd9bYNut8vu7rgJcFGUoNBMp/OQiEJZGEZDL9aTpd1LZkm95PZR23lNJhNOnDjBxz72cf7kT/45R44c5dixq/jMZ/+UAxsH+dnPfu4FHpKMqqwY9IeApt8fUNemoeKVGy0DoTj2zKsKeN+DD/Lpz3yG0WjUoBWkCkjTFFMvZCcl0spmoZRmgmRoZ7biwCVzaQcm+Zxyg8UByyFLA7pJorYgpXw7zZdmOpS0i8xmP0rEeixjeF8LRJL87nZ7pe3s5T1Jhid2cdC4+L9tlE37Xl6cKXtemXpfNi2Ovt3CisI1kdZWu70Ux3EgBdQIVLL9ftpDyva8AoSJcr+Epqz8tzN/CZr+z/7lN7nn7cAn16Z9HeWBlvPXfq0PGnXQQfBQYVHkQilqY6lqi1MRe+MZtQWnY2oi1g9dyQ03/x791QMUleLNN95k78Imh9dHDBKFshWVM5Qq4uz2hJ1ZSY2mDivypirRyrA+6rE+7ODMHO3VhMLnD3KskQqb117zogxJTFWWTcuuTV4pFVi77SbPjlxzGbKKs2qDCPxZXMxzinyxtClBXhxaOwBL4hBFcVONiDyoDwDTps0qVCzyfMrcSp4Ha21oBdmwCGub5G1zcxOtdTNflHlgP8wdJeGQxTdp98rS2ubmZrNEp5RiNBo112lra2sfOk3OjQRP8THic+S8dToddGBDHQ6HCMO0IJWkjSxIovb7kiG7dDSksvYa1CM2Nze5cOFCc33EJysV0+v0iaKEqrLM5wXOacbjmRcic5fm7i+d5iLrht5wxM+fO8GDDz7E9TfcwJ//+X/gnXfO4Sysbawwz6ccPXqUF371N7x1+jR1cOLXXX89l192Oc8++1OmkzFK+an8bbfeyttnz3Lq1Kmmnzabzbnv/kP0+kPGkxk/eOJJ5rmHXt1w/Q3cdvttZJ2ed+yt9oV1Dh1HqEjTSWOMscSJ7207u2iHRJGn9FYKv5/gnC/ftZc3xPmvC5uRsUE8XgVYYJqyF1gNpfy0IUOa5bl3sFVJpBNUeKjLMkeL+E1QpbLGkGUpIvDdRn74h1sqAfz3KR3+KLz6FvuqAHGY7V55OyC2e7RtGK0KmaSxhiRNMXUoy7VCaS/RaauFoAcs6BEubiWhHNbVKOs/d5L4pUbC5rB3Gp42OtJ+/6FdlcnfjTEot5i7SGBqD5GBxmFbu3DqEKg4lN/C9tcEfz+dsMamgJ9/+aAjwVlhjCMJLRO0V7ZSKKI49dQKzkN2p0XJznjCgbUB6AQcDIbrxFGKqTSmUrz2yhu8+/ePsNIv2Z6URGmHSVFSK8XWzpxBlnkd7lihla/I4yhiNOiyNi8Zl5rS+WVQlKa2/j3HSQQGYqUwdYXWceN8XViE6qRdr/ZlLbHW1KaiCtdSzpsEdtntqWvPBCzPR5zEOL90QpzEJJGmKOZ0w7mtq4pOllICReFbJqaqsEBVVmRZGkAaeWj7CXGhb6mIchtBqrKqK+IkZjKdsra6Rl4WfmcmQIWHw0FYEvOtJWlFz2YzptMJDq+VIH10rTWj0Qrj8R6drm+f5MWcru6yu7sb5pYReVFQFl5ESikYDob+vURxw+3U6XTY2d4hzdImm5fnAGh6/KLF4Pd0Ira3togTj1DSUUSSpBRljdIxlbXoJGUvLOOmWacJDNJqns1zep0Oee5bt3Ga4awjzSLvk6OYNOsxnU1JOl3SMCx3yrMygGM+nbK+vn5Jvv6Sg0JReurY6XzGuc0LfPbB9/Ptxx9HRZp5MWd9fYPaWTq9Pqd/9zabF7b40Ic+xMGDBzlz+gzffvzbPPvcCd7zwAPcfdddGGN56qmn+M53n2B1dZUPP/YxBoMBu7u7vPPOJnvjCaiIvKx517FreOCB+5lOp3z969/ghV+9xN1330UUoqixITuJIE4jdKwwzmCV8ctlOKqQbSjlD561NVHke8VeCcehVIKpDJFO0CrCBedr8XjzVFod1mLrmkh5GuEyQHRN7R9ocFhn0A7SLKHE4NWmQg9cK8pQSkZRh8WOgFs4L+sJ+BRROKiizSw6zQnW+kFkuxUmweHirF9MBnawv5VkrIUo8sgoPAQ1ieJGArB2FmkxtSspCUqL1pGHTjr8wxXHmrr2ztY7b7ev3RbpBWW6Voos8aW3MxbnPLxX2gCS7e6bvVi/0Jek7dZHjHWaNEoWQ30inA3v12lM7Tdb68pfcz828V8rq5ps2MXYCqXjJlA753DhXDidUDvD1t6MldGAWMVo5+glGodjbaXHqL/KqV+f5pbfKxgMB0zq8+S2ROsEZxPGk4LxoCYbpThl8KTtXn2v00nYWB1SbpZYU1M5qKyhVpDGCRpNrCymNEGHwcsvJnGCVgblFGmcgnFUrvayp0oRtwazUoFJRZymCXk+p9vtYG1NVRbEgZ0g9nzhxIDBkM/HdDo9qqIgUh0ircgEulob//1VhajzCTeSMY48n3vgRiDG7Pf7GGdBa5SAASJFaUtqW5NGcVNVC1BkUVVEYTdKk6QxdVVRlHNPwKcdK6sj8nzmqUCsV0TrdDvUtmZtY43JZMrK6hqzmed1Gg76RMq33qzSfhG1MmS9vpc6VX7rX2CkEgBkWCwgmbIs6SZdLIYkjoiTmL3xHkmaEicd0qzHX37t62ztbIFyZCGxJFRv89nMXx9T44zBVibMILxWtbFeH9vUvhWqoohp4a+rddYHjTRQ77iwlxJF/Me7H/r7CwqgAidRjnWWlZUR4/GYjY0N3n77bf7sz/4NK2urRDrim9/6JsePH2c8HvPNb36LRx99lMce+6c8/fRT/PEf/xP+6mtfI0kSPv7PPsFffOELfPSjH+XcuXM88cSTfOQjH2EwWOEXz/8SrSOuuupqPvzhD/ONb3yDu+66i0996lN88Ytf5Kqrr2LY99BWf+wER+8z3+ls5qUHdUSSZF67VQm23gZhkrBRHITorQPrFEncYl+si8BDo5uHpy3kYq3Xizhw4ABVtSD60lqTZila+R6wh2qG5RwchGUXKeHbuwZA2G72rQv/EPset6kNdWgH+Ix5P6Gd9OTblUJ7BiD/X95/e+FNgkMbpw5+G9P/jGhfoJE2TrvdJL1c6XXKMPLiIXi7pSNBRWY1eZ77Hq5KGnIyef+CfGo2nZ2jqqtmziBVkA8gnmZDax24svLmnrWXj5RSWGOpWot37epFHKi81qGwVlNUlr1xxWRqWO2nWJeDqVDao3bW19dQv36L2XTK4Y3LSeOYybRE4RffSgtbOxPWR4eYlyVZ7LmHcJ4Da9jvkucx880t4qSDqXJUqD6jOCJyYeclsqB9Nm2td0JVVWFaqoPWGYxR+1iLm/sbzp0fwvpKJMs6VNVin8AaUVhTdJQXnK9awkZxvEDk+e9ZDL6l1doezLYrzCzLKE3N7t5eAz/POhnWucYpDnqesUAcL7APup5mKZPJHlprVvurTRU7n3uBLS/l6s/H7u4Ow+GKrwDihHk+J0lihsMR2xfOMxz0G/2KLMuamUMceynfvMjpBHZUOe/T6bSB1Av0PU0TyiJnb2+XJFQXeVGG5KjiN795k9O/O4N1C1h5VVW+KosWrMXWGDpJ2qDq2otyUkWXpsZpr4q3AID4GVnbb12KXfJMQRgLtfYl3MlTJ7nttts4deokeZ7zuc99jn/1L/81r732Okmccejg5fzg+z/krd+e4Xvf/QFXXnGE++59Ny+99DInTjzPs88+x/nNC9x5592sbxzgiSee5OWXX+GHP/xRWKcHpeGmm26k0025/4F3kxczHv/Ot9jZ3WIyGaOVIlKKRGsSHTVr8vPpDI1nEbXOZyYeehq2JdWix12W1QL3rn2gKIsSYVT02Y0NCJYFlE8O44ULF9jc3GxaEO2WShvBI5m0OE5BNLSHoxIg/Os0vpsRtJZVoMNQFmOr8HV/b9rZnlh7DiD7Fm0nKE72/9QCEgfd7u+35wttIru2g5fP1p4ftLHe0ndtzzck+Mhnt9Y2+PP2tZI9EXH8zVzE+qyoKkpMVaNRno4jUHQkUdzclzaeXwJke25kWw9UO3g1n08rLCboGcdAh6JM2NrOcfgBtE4scaJIEs0VVxyiyMdMJzvgLKujVRIdgfH8Nk7H7M0LtsdzDAnWRSgVg7VQlySq5uB6j41RB1dN6SUKTIWzBh1FlLXBat92ad9P6cO34ZEyAG1/PoHyioNtn2uhb5b5i8wN/PXzQApjfUIF7AvG/h6Z5ndfXLmOx2OGw2EzL5pMJlRlSSfM5tI0ZRYYWbMsY2VlhTzPm5mhvA85N4PBwKNyVMxgMGI2zZlNc6rSgNNUlUGriH6v7/90B0zHE0xVY41pdK2nkzFVVTbvQSpYMZlrVNUCii7XUhbwJCiOx+OgQx41UqeCWjp//nxAWU4ZDFb4wAce5g//8IO8973v59Chw9SVwxi47robefjhf8Qf/dE/5tbb7yJKOlgikqyHcRpLBDqhMqBUTF07srRHXTluuflWrjp2DVol4CLKwhBHf8/oo8l4l27mGQivOnKE73/3f/KZz/4pZ99+m1+cOIGxjofe/35WVlb4ydNPc8stt3DDDTewvb3Nu265hdl0ypnTZ/iDP/gAGxsH6XY6jEarvPH6b7jrzru5/robyecFx2+8mSSOiZRGOcfW1gWSOOGZp5/mzO/ONCpOWEcWlJ609kyZfoArS1hJcJq+dZSFslmgjH6o5p0tgcFx3+wzIA7KoiDNErSOKE2BMQtI32Qy4ezZs82DFkWaOrSIXHhIhGc7jmOMXThDgfFJWd12uD7TCj39hqp7MRj0LZS/LTUpBzeKPN1AG3Ir/08cobSR2qgncX6CihBnAjK49nONpqoIAU++r92mEpRSG+kjmZQML9stL2NMszAkkFNnaTJHeX17biJfi6MFhbQ4vzzPg0D7AiYo1UV7MU7ea3u5qB0s2lVYEsc45/moIiIUMdYptnZzDm3UjAYQZxF1UWEqx+pqj0OHVjj56otcfuXlrB68jN1xQTXOUTqiqGssmt+d26J75UHiTkKiNIoKjSFRljiuObTeZ2+6hy1mRCQ4U2F0jNN+SB3FEc4ZP3gPyYvSYcfDVM2980yZ3X33vY1U873+FK09zNWzauoA1bUhw/WtC3+WoiZRShKPNpPM3/++/bKtgrsfDodMJpOmYqhNTZZ0miSi0+nQJZwN5ym7hepBAkI7UwYPJ15dXWU2m2NCVeOXuzxE3Dnfkk2TjLoyDS9aVVVQ1aiOIKcWey/yLMpweH19Pby/jLoVdKWtKecxSRKvlKY1ztZeoTGJKUOFtrq6ymxeEumIG248TlnU9Hsx21vnufaa69k8d56V0Qo3XH+cHz35FLPZjOM33ojWMe96140cPXqUzc1N3njjde6++x7iOOZnz/6Mq6+9lpXVVaqqYmdnh07H8oEP/AO01vzyl8+zuXn+knz9JQcFDdRlCShuOn6c73//B3z5i1/iscce45EPPoICLmzv8J+/9EV2drb56v/473z84x/jPQ/cT1VVfOUrX+HcuXe45ppr+PSn/wVZmvLz557juRPPUVYln/zkJ3nfg+9lNBrx5ptvMJ/PmUwmPP+L57n6qqt59NFHsdbyzDPPcPbtt1lfX/fZXiCSinXiaakV/uHFZ5Bx5IcxSmlMHWijG7oAjQnBRbDsxjiS2Ou1VmZRJShF4ySlFNvd3cVav8TitRs83FRr3eC6ozC8VEo1Q1rwjlqWWZqbES+UlOR3KiUOezEIFxZasX2wUb2g8RWHJw9PG5klWY1kdtJGktf6lktnH0rJ/7F/K4i0EUFtpJAEhvb7FGckHFny9/bnl39LQJPt24uRSLIVHmvdDIYVEGvfVsF5tJQ1iy3uiyG97c3TNnxZTBynfF+kAzeVS/3jEznK2vDOhR36gxVcCNhx7O/f3ffcxn/5r/+N4zffwtrGZXSzDtmsoHAO4zyZ3CSfc2FvTq+zCiYni3UQdqqxqmLQ01y2MeSd7Rmd1M8ysIZIRxjrz4a1ztNiNGfFEwQWRd5ooQjaRgKo4OcFi++ruwUSaUFN78+FT2Bozkq326UsSopC0DlJc18FqdNutWqtOXz4cOPcJ5MJ4/GY9fV1ysCvJL9ba5/YbKyvc+HcJrP5DBVa2JKlz+dzhsNhK8FKSdPFXpFQY4DXNR4OBr4KSvzzPp1O6XS93oIxhm4n9Z9faba3t5uBr5zDRtWsKEEvtvbl9x08eLBZQsvznKQfh5mKotvtoLTfgN/Z3ibNev7nJgmdTpcXX3wpoI18tah1jDGO+TzngQfew9raGvO84OChy3jq6Z9wxx13cO+99/PGb96gLEruvOseKmP46U+f5cEHHyTPS66++poQKGfcfvtdfO97370kX3/JQcGTwVV0uz36/Yz3ve+9vPDLX/HvP/950jTzg0qluPLIER754D/k1KlTfP7f/dswpc85evQYt99+G3/1ta96lsVweMA75y9/+Uvs7u7y8MMPoxQ8/dRT/PUzz9Dv9/jLr36V7zz+OGXppfx+/z3vYWU0xFR1w/9fVRW1MUSRCkMp76zKsDkZRymSUC+cmAIVkSYpUj1YK8s1EaZcyCG2t3K73S7b29tUVcXa2hrr6+v+Aah8j7U2ocUQUC86OB9nLU55OU55WMSajVQJJjKwtot/LyCb8hlpHF27hdN2gLCg3ZCvtykSFjDMhfOW721DVyWjlFYMLIKHlNCScQp0sf2adrCSNt4CtrufHqD5HFo3zkWw5tImkRaJYsFjpaCBzHqx85xBi+/qYgQWLKg+2gR9i3tu9gcJpVrXQBPFqZ/9GMvW7g4HphlrwwxXVygcxhZceXiDY0cu543XXuPKI9dxaGODqq45t7OL0inGgiNmb1Ky169Y6yce7aQA5cBVxDphddhldzynMlXITGsMflAvszGpknUQCpJqsq6rJjBIJSUqbOLQ5L4561FDnscr7GagwvKen1EUpRfYKfI8iGlVzOc5WUYT1LNsweYJNIFeoKuyyTsYDHxlNxkTBXg3wOnTp7n2umsZj/02salqRqNRUynI7oJHGc3odLpMJtPm2dzY2MAYGwJFRpoWjPcmZGnKfDpn0PeD4ySKqaoSHWmmkwng2VOHwyHdbpe9vT0Gg0FzRkREaDqfNRUC0Gg9iAbMcDjE1pbZzIt7ZamX1i3KCkVMlPhdh+lkgrVw7bXXNiJGAt3NspTLLruMF198kXvvvZd+v9+0zHznIZx77auVvCwC8so2KK+TJ09y+vRplFLNtf277JKDgpSWnSyjqmsG/R733ns3iojz588TxTGXH76MrJOhlWJ1dDvHjl4een7Dpq92w/XXemRRiPpP/uhHHDiwzic+8XG2t7fpDwb8p7/4Avfdezc333Szd45KsXXhAs75rcPRaISCkGmHDNUY7xAUQZhFNRmNtRarrad0trKBHBPHmlgtnIx3bKmH4TlFXuQoDdZmKCy6NYQUNbiNjQ2Gw2HjQKIooqwqlF4s5DnrB3WRtCmsJQ+boG2aCxkaiS1aHA5Y4Mlt6OUuhkkLYryLA8PFWaH8XRx5u6xv99elnSIBoP3f9m6AWBuhI45mH0rILbSjoyjaJ0co8ozi6L1j87sOkqm1SdLEfKXoN7HxibLPmJ2vFuu6pg6JQ1Uv1PzawViQIoI/9y2HsmntyWuMMVRlSaQdtTF+7hUnOG3BOFQUsTcuWOmtgvNU4FHksLrm/vvu4sdP/ZKdC1uMVtdZW+kzLiZgNFiIoi6VtWzvzVkZrmNcHXrk1rOwYhn0OqyvDNke11SmBh0FOLUh7WRNRSmgBAKRngow17LczwnmnGsqVemfi3aytYRkZoHHt9bSyboerh02kXs9v3e0trbG2bNn/YC4RVMhJjsERVE0A9t9WghF7u+BUs3S2uHDl3P27FkOHjjoz2WSNOCA1dVVjDENPX6e55w7d55jx46Fbd4hW1vbTRIxHk9I0w5lURGriGJe4IxlNBiS53OS2FNtj8fjoBkxbnYLBGba6/Ua/RdjTbOf5IfZ830V9draGnVVMyumrKys4Jz1ewVVSX8wJM0yjPEQ71deeYXjN93MgQMbzOZTTp58FR1Bns/4658+wy233EK/1+fMmTO8evIValNx37vv49y5c7zxxuvcc8+9RJHmx0/9iCuPHOHOO++g08l47bV3OH36LY4dO8ZNN93ICy+8sC+R/L/ZJQeFTsg0nDWkcUw5LziwvkFVVayMRoHRsaYopqyMVpjPa6684vIFhNCKRGaXbu8ARUAs3P/ue/jJM89w6tQrDIZDzodFkttvvbVZkFFKER88wGw2819znrcf54VhtNYY5wNBVZUNVtvWNXGa+LaNU80kXuEPtseym7D44y9YlnWIo5TpbI6pa5LMzya00mhngajJcrIsa5AQPvOP9g2MoyiirnzbBkWDVqhD5irDOYn8crAWeHsZJqvm4faO27PltDNmeRClRdTmpmkHiMXPpvlaO9gopfYhedpBpq4XtMvtygAWZIPtPn37M7WhsO3A4a951vRkxfyZcdS2bpyK/BxxZp1OB6RFFx5Iaww1Puvtd3uURYGOo4a36WLCOEFjtX+mBExY7IEIYidLfMvSmJrSzIljTZQoXKnY3p4x6vTZGPZxZoKpcjppTL+bYU3NbDwBZxkOuvT2YoqZwfodZWrj2BnP2NnrsjaMcUGXwwVEURRrNtZWMW7O1t4c7YwfLloPj25fuwV77OLe+6C/X4azLXglyUGadALdTNUEcpl7bW/vsLIypJN1yIs5CiiKnG63x/r6BpPJtLmmMlRttNJbg+Z2UuEHx37ukaZpwyVkrGFjY8ML5cQJ3V63ycCFKE7oLAaDAXXtz4XoSctnHg6HDRlfJ/P614cPH6YsC7wqW8x8PqPf75KmMdO5TzQlMZBKVaoTkQolBMY2yaJUvdPplOFgSL8nssUdBsMB3ZAMzucVg8EK4KhNyUsvv8hLL//NPv+RZilnz/6OCxfOh4VF33576ZWXOfXr14IaoOLJHz/Z+IpOJ6Xb7fDb377Jq6++ShRFvP76rxtdaH2JsKJLDgpRpBonXRYFxnixkqIoUZJR1DPSLKaqC6JYeSFt55pBbhwGYnkp0MCS9Y0VHnrofbzzzlmqquLw4Xdx8MDBpmSTmyuZbLtVEevYZ2VWBppxUyZ5IRUTcLumobVOkgTrDF5cReMwXgzH+KUnubFCotXr9/xhNiW4hZRlG+u/OBQtYZhW6yUOjiUKEFClVDPwkwdUMvP9w88FouhiRIz8bnFq4uzbKJTFYH2Rucm/21m9ZFSyBi+ZY3vI2s762/QIYu1MCRYbyhfPJOQzys+SWYHgz9uvzYucTtZtHLZcWzkPcRyjHFjZRWjNT9rQYPkc7TnGxVWVOK225oQsSO1r3VlHlHhuoKoqiOKU2vq9ihTF1taEtX6XuirpZDHz+ZSVlRGrKyuceestrj1+I0kSMxoOmORjqlBFUlvKquDc+S36vQ2iyFNqOyzOednVJMnodzOm85pZ4SksnIqwbkH73Z6B1PUi+CZJymw2bwbBbWcv1BRFUWIjR5p2qILS4eK1GUpF5IWniJelz0WikOwLMkrFzZkTPP/F53gBGKgaCdGmRac8PLqTZSTa03Gnacp4PGY0Gu1jxBWQgkj4SgUqMwFB+tnK0u32QlspYTDsk+dz6trPNwQJZ60KNByec00gqUJVXVWVPwNhqCyMCnLmAbZ3tkmI6fU61KammJV0e73ACWebTe54MiMP9BrSzvNMAI5OJwutY78v4fnHvDhVFMnekk86syzj5f/1Eid+8Zy/55GmNqVnR438wp7477/LlGunjUtb2tKWtrT/r+3SNZqXtrSlLW1p/8/bMigsbWlLW9rSGlsGhaUtbWlLW1pjy6CwtKUtbWlLa2wZFJa2tKUtbWmNLYPC0pa2tKUtrbFlUFja0pa2tKU1tgwKS1va0pa2tMaWQWFpS1va0pbW2P8GfdXPiN2/EdEAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "import matplotlib.image as mpimg\n", + "\n", + "# Define the path to the image\n", + "image_path = '/Users/sompoteyouwai/env/YOLO/YOLO9tr/yolov9/runs/detect/yolov9_c_640_detect5/United_States_000062.jpg'\n", + "\n", + "# Load the image\n", + "img = mpimg.imread(image_path)\n", + "\n", + "# Plot the image\n", + "plt.imshow(img)\n", + "plt.axis('off') # Hide the axis\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/Users/sompoteyouwai/env/YOLO/YOLO9tr/yolov9\n" + ] + } + ], + "source": [ + "!pwd" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "import torch._custom_ops" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "import streamlit" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting streamlit\n", + " Using cached streamlit-1.37.0-py2.py3-none-any.whl.metadata (8.5 kB)\n", + "Requirement already satisfied: altair<6,>=4.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (5.3.0)\n", + "Collecting blinker<2,>=1.0.0 (from streamlit)\n", + " Using cached blinker-1.8.2-py3-none-any.whl.metadata (1.6 kB)\n", + "Requirement already satisfied: cachetools<6,>=4.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (5.3.3)\n", + "Requirement already satisfied: click<9,>=7.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (8.1.7)\n", + "Requirement already satisfied: numpy<3,>=1.20 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (1.26.4)\n", + "Requirement already satisfied: packaging<25,>=20 in /Users/sompoteyouwai/Library/Python/3.11/lib/python/site-packages (from streamlit) (23.2)\n", + "Requirement already satisfied: pandas<3,>=1.3.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (2.2.2)\n", + "Requirement already satisfied: pillow<11,>=7.1.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (10.3.0)\n", + "Requirement already satisfied: protobuf<6,>=3.20 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (4.25.3)\n", + "Requirement already satisfied: pyarrow>=7.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (17.0.0)\n", + "Requirement already satisfied: requests<3,>=2.27 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (2.31.0)\n", + "Requirement already satisfied: rich<14,>=10.14.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (13.7.1)\n", + "Requirement already satisfied: tenacity<9,>=8.1.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (8.5.0)\n", + "Collecting toml<2,>=0.10.1 (from streamlit)\n", + " Using cached toml-0.10.2-py2.py3-none-any.whl.metadata (7.1 kB)\n", + "Requirement already satisfied: typing-extensions<5,>=4.3.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from streamlit) (4.12.0)\n", + "Collecting gitpython!=3.1.19,<4,>=3.0.7 (from streamlit)\n", + " Downloading GitPython-3.1.43-py3-none-any.whl.metadata (13 kB)\n", + "Collecting pydeck<1,>=0.8.0b4 (from streamlit)\n", + " Using cached pydeck-0.9.1-py2.py3-none-any.whl.metadata (4.1 kB)\n", + "Requirement already satisfied: tornado<7,>=6.0.3 in /Users/sompoteyouwai/Library/Python/3.11/lib/python/site-packages (from streamlit) (6.4)\n", + "Requirement already satisfied: jinja2 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from altair<6,>=4.0->streamlit) (3.1.4)\n", + "Requirement already satisfied: jsonschema>=3.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from altair<6,>=4.0->streamlit) (4.23.0)\n", + "Requirement already satisfied: toolz in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from altair<6,>=4.0->streamlit) (0.12.1)\n", + "Collecting gitdb<5,>=4.0.1 (from gitpython!=3.1.19,<4,>=3.0.7->streamlit)\n", + " Downloading gitdb-4.0.11-py3-none-any.whl.metadata (1.2 kB)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/sompoteyouwai/Library/Python/3.11/lib/python/site-packages (from pandas<3,>=1.3.0->streamlit) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from pandas<3,>=1.3.0->streamlit) (2024.1)\n", + "Requirement already satisfied: tzdata>=2022.7 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from pandas<3,>=1.3.0->streamlit) (2024.1)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from requests<3,>=2.27->streamlit) (3.3.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from requests<3,>=2.27->streamlit) (3.7)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from requests<3,>=2.27->streamlit) (2.2.2)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from requests<3,>=2.27->streamlit) (2023.7.22)\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from rich<14,>=10.14.0->streamlit) (3.0.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /Users/sompoteyouwai/Library/Python/3.11/lib/python/site-packages (from rich<14,>=10.14.0->streamlit) (2.17.2)\n", + "Collecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->gitpython!=3.1.19,<4,>=3.0.7->streamlit)\n", + " Downloading smmap-5.0.1-py3-none-any.whl.metadata (4.3 kB)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from jinja2->altair<6,>=4.0->streamlit) (2.1.5)\n", + "Requirement already satisfied: attrs>=22.2.0 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (23.2.0)\n", + "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (2023.12.1)\n", + "Requirement already satisfied: referencing>=0.28.4 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.35.1)\n", + "Requirement already satisfied: rpds-py>=0.7.1 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.19.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages (from markdown-it-py>=2.2.0->rich<14,>=10.14.0->streamlit) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /Users/sompoteyouwai/Library/Python/3.11/lib/python/site-packages (from python-dateutil>=2.8.2->pandas<3,>=1.3.0->streamlit) (1.16.0)\n", + "Using cached streamlit-1.37.0-py2.py3-none-any.whl (8.7 MB)\n", + "Using cached blinker-1.8.2-py3-none-any.whl (9.5 kB)\n", + "Downloading GitPython-3.1.43-py3-none-any.whl (207 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m207.3/207.3 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m\n", + "\u001b[?25hUsing cached pydeck-0.9.1-py2.py3-none-any.whl (6.9 MB)\n", + "Using cached toml-0.10.2-py2.py3-none-any.whl (16 kB)\n", + "Downloading gitdb-4.0.11-py3-none-any.whl (62 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.7/62.7 kB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading smmap-5.0.1-py3-none-any.whl (24 kB)\n", + "Installing collected packages: toml, smmap, blinker, pydeck, gitdb, gitpython, streamlit\n", + "Successfully installed blinker-1.8.2 gitdb-4.0.11 gitpython-3.1.43 pydeck-0.9.1 smmap-5.0.1 streamlit-1.37.0 toml-0.10.2\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "pip install streamlit" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/temp_image.jpg b/temp_image.jpg new file mode 100644 index 0000000000000000000000000000000000000000..304e67fe897960d49aeeb078c0852ddea6bdf23e Binary files /dev/null and b/temp_image.jpg differ diff --git a/tools/reparameterization.ipynb b/tools/reparameterization.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..8116c05a099ec64a70798c95a4749e6e132ce06b --- /dev/null +++ b/tools/reparameterization.ipynb @@ -0,0 +1,450 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "4beac401", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from models.yolo import Model" + ] + }, + { + "cell_type": "markdown", + "id": "d1a8399f", + "metadata": {}, + "source": [ + "## Convert YOLOv9-S" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c7a40f10", + "metadata": {}, + "outputs": [], + "source": [ + "device = torch.device(\"cpu\")\n", + "cfg = \"./models/detect/gelan-s.yaml\"\n", + "model = Model(cfg, ch=3, nc=80, anchors=3)\n", + "#model = model.half()\n", + "model = model.to(device)\n", + "_ = model.eval()\n", + "ckpt = torch.load('./yolov9-s.pt', map_location='cpu')\n", + "model.names = ckpt['model'].names\n", + "model.nc = ckpt['model'].nc" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b046bb2", + "metadata": {}, + "outputs": [], + "source": [ + "idx = 0\n", + "for k, v in model.state_dict().items():\n", + " if \"model.{}.\".format(idx) in k:\n", + " if idx < 22:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv2.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv2.\".format(idx), \"model.{}.cv4.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv3.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv3.\".format(idx), \"model.{}.cv5.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.dfl.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.dfl.\".format(idx), \"model.{}.dfl2.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " else:\n", + " while True:\n", + " idx += 1\n", + " if \"model.{}.\".format(idx) in k:\n", + " break\n", + " if idx < 22:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv2.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv2.\".format(idx), \"model.{}.cv4.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv3.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv3.\".format(idx), \"model.{}.cv5.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.dfl.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.dfl.\".format(idx), \"model.{}.dfl2.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + "_ = model.eval()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "07eb0cde", + "metadata": {}, + "outputs": [], + "source": [ + "m_ckpt = {'model': model.half(),\n", + " 'optimizer': None,\n", + " 'best_fitness': None,\n", + " 'ema': None,\n", + " 'updates': None,\n", + " 'opt': None,\n", + " 'git': None,\n", + " 'date': None,\n", + " 'epoch': -1}\n", + "torch.save(m_ckpt, \"./yolov9-s-converted.pt\")" + ] + }, + { + "cell_type": "markdown", + "id": "ba87d10f", + "metadata": {}, + "source": [ + "## Convert YOLOv9-M" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc41b027", + "metadata": {}, + "outputs": [], + "source": [ + "device = torch.device(\"cpu\")\n", + "cfg = \"./models/detect/gelan-m.yaml\"\n", + "model = Model(cfg, ch=3, nc=80, anchors=3)\n", + "#model = model.half()\n", + "model = model.to(device)\n", + "_ = model.eval()\n", + "ckpt = torch.load('./yolov9-m.pt', map_location='cpu')\n", + "model.names = ckpt['model'].names\n", + "model.nc = ckpt['model'].nc" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf7c3978", + "metadata": {}, + "outputs": [], + "source": [ + "idx = 0\n", + "for k, v in model.state_dict().items():\n", + " if \"model.{}.\".format(idx) in k:\n", + " if idx < 22:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx+1))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv2.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv2.\".format(idx), \"model.{}.cv4.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv3.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv3.\".format(idx), \"model.{}.cv5.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.dfl.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.dfl.\".format(idx), \"model.{}.dfl2.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " else:\n", + " while True:\n", + " idx += 1\n", + " if \"model.{}.\".format(idx) in k:\n", + " break\n", + " if idx < 22:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx+1))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv2.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv2.\".format(idx), \"model.{}.cv4.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv3.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv3.\".format(idx), \"model.{}.cv5.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.dfl.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.dfl.\".format(idx), \"model.{}.dfl2.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + "_ = model.eval()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00a92a45", + "metadata": {}, + "outputs": [], + "source": [ + "m_ckpt = {'model': model.half(),\n", + " 'optimizer': None,\n", + " 'best_fitness': None,\n", + " 'ema': None,\n", + " 'updates': None,\n", + " 'opt': None,\n", + " 'git': None,\n", + " 'date': None,\n", + " 'epoch': -1}\n", + "torch.save(m_ckpt, \"./yolov9-m-converted.pt\")" + ] + }, + { + "cell_type": "markdown", + "id": "8680f822", + "metadata": {}, + "source": [ + "## Convert YOLOv9-C" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59f0198d", + "metadata": {}, + "outputs": [], + "source": [ + "device = torch.device(\"cpu\")\n", + "cfg = \"./models/detect/gelan-c.yaml\"\n", + "model = Model(cfg, ch=3, nc=80, anchors=3)\n", + "#model = model.half()\n", + "model = model.to(device)\n", + "_ = model.eval()\n", + "ckpt = torch.load('./yolov9-c.pt', map_location='cpu')\n", + "model.names = ckpt['model'].names\n", + "model.nc = ckpt['model'].nc" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2de7e1be", + "metadata": {}, + "outputs": [], + "source": [ + "idx = 0\n", + "for k, v in model.state_dict().items():\n", + " if \"model.{}.\".format(idx) in k:\n", + " if idx < 22:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx+1))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " elif \"model.{}.cv2.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv2.\".format(idx), \"model.{}.cv4.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " elif \"model.{}.cv3.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv3.\".format(idx), \"model.{}.cv5.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " elif \"model.{}.dfl.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.dfl.\".format(idx), \"model.{}.dfl2.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " else:\n", + " while True:\n", + " idx += 1\n", + " if \"model.{}.\".format(idx) in k:\n", + " break\n", + " if idx < 22:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx+1))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " elif \"model.{}.cv2.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv2.\".format(idx), \"model.{}.cv4.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " elif \"model.{}.cv3.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv3.\".format(idx), \"model.{}.cv5.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " elif \"model.{}.dfl.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.dfl.\".format(idx), \"model.{}.dfl2.\".format(idx+16))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + "_ = model.eval()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "960796e3", + "metadata": {}, + "outputs": [], + "source": [ + "m_ckpt = {'model': model.half(),\n", + " 'optimizer': None,\n", + " 'best_fitness': None,\n", + " 'ema': None,\n", + " 'updates': None,\n", + " 'opt': None,\n", + " 'git': None,\n", + " 'date': None,\n", + " 'epoch': -1}\n", + "torch.save(m_ckpt, \"./yolov9-c-converted.pt\")" + ] + }, + { + "cell_type": "markdown", + "id": "47c6e6ae", + "metadata": {}, + "source": [ + "## Convert YOLOv9-E" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "801a1b7c", + "metadata": {}, + "outputs": [], + "source": [ + "device = torch.device(\"cpu\")\n", + "cfg = \"./models/detect/gelan-e.yaml\"\n", + "model = Model(cfg, ch=3, nc=80, anchors=3)\n", + "#model = model.half()\n", + "model = model.to(device)\n", + "_ = model.eval()\n", + "ckpt = torch.load('./yolov9-e.pt', map_location='cpu')\n", + "model.names = ckpt['model'].names\n", + "model.nc = ckpt['model'].nc" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a2ef4fe6", + "metadata": {}, + "outputs": [], + "source": [ + "idx = 0\n", + "for k, v in model.state_dict().items():\n", + " if \"model.{}.\".format(idx) in k:\n", + " if idx < 29:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif idx < 42:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv2.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv2.\".format(idx), \"model.{}.cv4.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv3.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv3.\".format(idx), \"model.{}.cv5.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.dfl.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.dfl.\".format(idx), \"model.{}.dfl2.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " else:\n", + " while True:\n", + " idx += 1\n", + " if \"model.{}.\".format(idx) in k:\n", + " break\n", + " if idx < 29:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif idx < 42:\n", + " kr = k.replace(\"model.{}.\".format(idx), \"model.{}.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv2.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv2.\".format(idx), \"model.{}.cv4.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.cv3.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.cv3.\".format(idx), \"model.{}.cv5.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + " elif \"model.{}.dfl.\".format(idx) in k:\n", + " kr = k.replace(\"model.{}.dfl.\".format(idx), \"model.{}.dfl2.\".format(idx+7))\n", + " model.state_dict()[k] -= model.state_dict()[k]\n", + " model.state_dict()[k] += ckpt['model'].state_dict()[kr]\n", + " print(k, \"perfectly matched!!\")\n", + "_ = model.eval()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27bc1869", + "metadata": {}, + "outputs": [], + "source": [ + "m_ckpt = {'model': model.half(),\n", + " 'optimizer': None,\n", + " 'best_fitness': None,\n", + " 'ema': None,\n", + " 'updates': None,\n", + " 'opt': None,\n", + " 'git': None,\n", + " 'date': None,\n", + " 'epoch': -1}\n", + "torch.save(m_ckpt, \"./yolov9-e-converted.pt\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/train.py b/train.py new file mode 100644 index 0000000000000000000000000000000000000000..59c372afebbbb32035de5819e35285cbef77d4db --- /dev/null +++ b/train.py @@ -0,0 +1,634 @@ +import argparse +import math +import os +import random +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import val as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.dataloaders import create_dataloader +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_img_size, + check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, + intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, + one_cycle, one_flat_cycle, print_args, print_mutation, strip_optimizer, yaml_save) +from utils.loggers import Loggers +from utils.loggers.comet.comet_utils import check_comet_resume +from utils.loss_tal import ComputeLoss +from utils.metrics import fitness +from utils.plots import plot_evolve +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, + smart_optimizer, smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = None + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze + callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + last_striped, best_striped = w / 'last_striped.pt', w / 'best_striped.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + hyp['anchor_t'] = 5.0 + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + + # Register actions + for k in methods(loggers): + callbacks.register_action(k, callback=getattr(loggers, k)) + + # Process custom dataset artifact link + data_dict = loggers.remote_dataset + if resume: # If resuming runs from remote artifact + weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + + # Config + plots = not evolve and not opt.noplots # create plots + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + # v.requires_grad = True # train all layers TODO: uncomment this line as in master + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + elif opt.flat_cos_lr: + lf = one_flat_cycle(1, hyp['lrf'], epochs) # flat cosine 1->hyp['lrf'] + elif opt.fixed_lr: + lf = lambda x: 1.0 + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # from utils.plots import plot_lr_scheduler; plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader(train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + close_mosaic=opt.close_mosaic != 0, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + min_items=opt.min_items) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + prefix=colorstr('val: '))[0] + + if not resume: + # if not opt.noautoanchor: + # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + #hyp['box'] *= 3 / nl # scale to layers + #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model) # init loss class + callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + if epoch == (epochs - opt.close_mosaic): + LOGGER.info("Closing dataloader mosaic") + dataset.mosaic = False + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(3, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) + if callbacks.stop_training: + return + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'opt': vars(opt), + 'git': GIT_INFO, # {remote, branch, commit} if a git repo + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + del ckpt + callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + if f is last: + strip_optimizer(f, last_striped) # strip optimizers + else: + strip_optimizer(f, best_striped) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss) # val best model with plots + if is_coco: + callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + + callbacks.run('on_train_end', last, best, epoch, results) + + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + # parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='initial weights path') + # parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--weights', type=str, default='', help='initial weights path') + parser.add_argument('--cfg', type=str, default='yolo.yaml', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--flat-cos-lr', action='store_true', help='flat cosine LR scheduler') + parser.add_argument('--fixed-lr', action='store_true', help='fixed LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument('--min-items', type=int, default=0, help='Experimental') + parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental') + + # Logger arguments + parser.add_argument('--entity', default=None, help='Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + + # Resume (from specified or most recent last.pt) + if opt.resume and not check_comet_resume(opt) and not opt.evolve: + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLO Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', + 'val/obj_loss', 'val/cls_loss') + print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/train_dual.py b/train_dual.py new file mode 100644 index 0000000000000000000000000000000000000000..1d21ac8f554502e519f92b442a29f66f53e4a30f --- /dev/null +++ b/train_dual.py @@ -0,0 +1,644 @@ +import argparse +import math +import os +import random +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import val_dual as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.dataloaders import create_dataloader +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, + yaml_save, one_flat_cycle) +from utils.loggers import Loggers +from utils.loggers.comet.comet_utils import check_comet_resume +from utils.loss_tal_dual import ComputeLoss +#from utils.loss_tal_dual import ComputeLossLH as ComputeLoss +#from utils.loss_tal_dual import ComputeLossLHCF as ComputeLoss +from utils.metrics import fitness +from utils.plots import plot_evolve +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = None#check_git_info() + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze + callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + hyp['anchor_t'] = 5.0 + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + + # Register actions + for k in methods(loggers): + callbacks.register_action(k, callback=getattr(loggers, k)) + + # Process custom dataset artifact link + data_dict = loggers.remote_dataset + if resume: # If resuming runs from remote artifact + weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + + # Config + plots = not evolve and not opt.noplots # create plots + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + # v.requires_grad = True # train all layers TODO: uncomment this line as in master + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + elif opt.flat_cos_lr: + lf = one_flat_cycle(1, hyp['lrf'], epochs) # flat cosine 1->hyp['lrf'] + elif opt.fixed_lr: + lf = lambda x: 1.0 + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + + # def lf(x): # saw + # return (1 - (x % 30) / 30) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] + # + # def lf(x): # triangle start at min + # return 2 * abs(x / 30 - math.floor(x / 30 + 1 / 2)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] + # + # def lf(x): # triangle start at max + # return 2 * abs(x / 32 + .5 - math.floor(x / 32 + 1)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] + + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # from utils.plots import plot_lr_scheduler; plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader(train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + close_mosaic=opt.close_mosaic != 0, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + min_items=opt.min_items) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + prefix=colorstr('val: '))[0] + + if not resume: + # if not opt.noautoanchor: + # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + #hyp['box'] *= 3 / nl # scale to layers + #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model) # init loss class + callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + if epoch == (epochs - opt.close_mosaic): + LOGGER.info("Closing dataloader mosaic") + dataset.mosaic = False + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(3, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) + if callbacks.stop_training: + return + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'opt': vars(opt), + 'git': GIT_INFO, # {remote, branch, commit} if a git repo + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + del ckpt + callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss) # val best model with plots + if is_coco: + callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + + callbacks.run('on_train_end', last, best, epoch, results) + + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + # parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='initial weights path') + # parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--weights', type=str, default='', help='initial weights path') + parser.add_argument('--cfg', type=str, default='yolo.yaml', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-high.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--flat-cos-lr', action='store_true', help='flat cosine LR scheduler') + parser.add_argument('--fixed-lr', action='store_true', help='fixed LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument('--min-items', type=int, default=0, help='Experimental') + parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental') + + # Logger arguments + parser.add_argument('--entity', default=None, help='Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + #check_git_status() + #check_requirements() + + # Resume (from specified or most recent last.pt) + if opt.resume and not check_comet_resume(opt) and not opt.evolve: + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLO Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', + 'val/obj_loss', 'val/cls_loss') + print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/train_triple.py b/train_triple.py new file mode 100644 index 0000000000000000000000000000000000000000..4dbbc1eeec60eb1871aecefe9808af4d33260f6d --- /dev/null +++ b/train_triple.py @@ -0,0 +1,636 @@ +import argparse +import math +import os +import random +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import val_triple as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.dataloaders import create_dataloader +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, + yaml_save) +from utils.loggers import Loggers +from utils.loggers.comet.comet_utils import check_comet_resume +from utils.loss_tal_triple import ComputeLoss +from utils.metrics import fitness +from utils.plots import plot_evolve +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = None#check_git_info() + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze + callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + hyp['anchor_t'] = 5.0 + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + + # Register actions + for k in methods(loggers): + callbacks.register_action(k, callback=getattr(loggers, k)) + + # Process custom dataset artifact link + data_dict = loggers.remote_dataset + if resume: # If resuming runs from remote artifact + weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + + # Config + plots = not evolve and not opt.noplots # create plots + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + # v.requires_grad = True # train all layers TODO: uncomment this line as in master + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + + # def lf(x): # saw + # return (1 - (x % 30) / 30) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] + # + # def lf(x): # triangle start at min + # return 2 * abs(x / 30 - math.floor(x / 30 + 1 / 2)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] + # + # def lf(x): # triangle start at max + # return 2 * abs(x / 32 + .5 - math.floor(x / 32 + 1)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] + + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # from utils.plots import plot_lr_scheduler; plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader(train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + close_mosaic=opt.close_mosaic != 0, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + min_items=opt.min_items) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + prefix=colorstr('val: '))[0] + + if not resume: + # if not opt.noautoanchor: + # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + #hyp['box'] *= 3 / nl # scale to layers + #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model) # init loss class + callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + if epoch == (epochs - opt.close_mosaic): + LOGGER.info("Closing dataloader mosaic") + dataset.mosaic = False + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(3, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) + if callbacks.stop_training: + return + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'opt': vars(opt), + 'git': GIT_INFO, # {remote, branch, commit} if a git repo + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + del ckpt + callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss) # val best model with plots + if is_coco: + callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + + callbacks.run('on_train_end', last, best, epoch, results) + + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + # parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='initial weights path') + # parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--weights', type=str, default='', help='initial weights path') + parser.add_argument('--cfg', type=str, default='yolo.yaml', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-high.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument('--min-items', type=int, default=0, help='Experimental') + parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental') + + # Logger arguments + parser.add_argument('--entity', default=None, help='Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + #check_git_status() + #check_requirements() + + # Resume (from specified or most recent last.pt) + if opt.resume and not check_comet_resume(opt) and not opt.evolve: + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLO Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', + 'val/obj_loss', 'val/cls_loss') + print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d1de89ab906487f1ef16c8de246f7da5e2f408c --- /dev/null +++ b/utils/__init__.py @@ -0,0 +1,75 @@ +import contextlib +import platform +import threading + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +class TryExcept(contextlib.ContextDecorator): + # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager + def __init__(self, msg=''): + self.msg = msg + + def __enter__(self): + pass + + def __exit__(self, exc_type, value, traceback): + if value: + print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) + return True + + +def threaded(func): + # Multi-threads a target function and returns thread. Usage: @threaded decorator + def wrapper(*args, **kwargs): + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + + return wrapper + + +def join_threads(verbose=False): + # Join all daemon threads, i.e. atexit.register(lambda: join_threads()) + main_thread = threading.current_thread() + for t in threading.enumerate(): + if t is not main_thread: + if verbose: + print(f'Joining thread {t.name}') + t.join() + + +def notebook_init(verbose=True): + # Check system software and hardware + print('Checking setup...') + + import os + import shutil + + from utils.general import check_font, check_requirements, is_colab + from utils.torch_utils import select_device # imports + + check_font() + + import psutil + from IPython import display # to display images and clear console output + + if is_colab(): + shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + + # System info + if verbose: + gb = 1 << 30 # bytes to GiB (1024 ** 3) + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage("/") + display.clear_output() + s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' + else: + s = '' + + select_device(newline=False) + print(emojis(f'Setup complete ✅ {s}')) + return display diff --git a/utils/__pycache__/__init__.cpython-310.pyc b/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0a8bf139bbda15d0a5a9d26eaa3e2624b7326f4 Binary files /dev/null and b/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/utils/__pycache__/__init__.cpython-311.pyc b/utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11201b83ba7046b564b61d95df7e4a111371916e Binary files /dev/null and b/utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/utils/__pycache__/augmentations.cpython-310.pyc b/utils/__pycache__/augmentations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f6e27493bcb3fdb3e04f82e31686e921bfdffff Binary files /dev/null and b/utils/__pycache__/augmentations.cpython-310.pyc differ diff --git a/utils/__pycache__/augmentations.cpython-311.pyc b/utils/__pycache__/augmentations.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7926278882d285cb598dfb18245d6a3b2fd4725e Binary files /dev/null and b/utils/__pycache__/augmentations.cpython-311.pyc differ diff --git a/utils/__pycache__/autoanchor.cpython-310.pyc b/utils/__pycache__/autoanchor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1cc3fae7d9d87a6715d63c530132592619fab27 Binary files /dev/null and b/utils/__pycache__/autoanchor.cpython-310.pyc differ diff --git a/utils/__pycache__/autobatch.cpython-310.pyc b/utils/__pycache__/autobatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..196a4f6c3053172c6f6e9d3490771753481fb3e8 Binary files /dev/null and b/utils/__pycache__/autobatch.cpython-310.pyc differ diff --git a/utils/__pycache__/callbacks.cpython-310.pyc b/utils/__pycache__/callbacks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1f63a31456c0425cc87f4eaa043609284b9fb66 Binary files /dev/null and b/utils/__pycache__/callbacks.cpython-310.pyc differ diff --git a/utils/__pycache__/dataloaders.cpython-310.pyc b/utils/__pycache__/dataloaders.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baeccd0339cd866b19459095d4159d8661f2209d Binary files /dev/null and b/utils/__pycache__/dataloaders.cpython-310.pyc differ diff --git a/utils/__pycache__/dataloaders.cpython-311.pyc b/utils/__pycache__/dataloaders.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1275af07f5bbca6723d52a8d7ee5ff06f75f2ead Binary files /dev/null and b/utils/__pycache__/dataloaders.cpython-311.pyc differ diff --git a/utils/__pycache__/downloads.cpython-310.pyc b/utils/__pycache__/downloads.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23c42b60222e07818430144574210e23b7db9e6a Binary files /dev/null and b/utils/__pycache__/downloads.cpython-310.pyc differ diff --git a/utils/__pycache__/downloads.cpython-311.pyc b/utils/__pycache__/downloads.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc1f077d9482e188e199e505428a047aeb17c81b Binary files /dev/null and b/utils/__pycache__/downloads.cpython-311.pyc differ diff --git a/utils/__pycache__/general.cpython-310.pyc b/utils/__pycache__/general.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63d5ff231d8482fd77aef64fd54c469cb0402efa Binary files /dev/null and b/utils/__pycache__/general.cpython-310.pyc differ diff --git a/utils/__pycache__/general.cpython-311.pyc b/utils/__pycache__/general.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d687c7712f6af875220cdb23056606f39e55d18 Binary files /dev/null and b/utils/__pycache__/general.cpython-311.pyc differ diff --git a/utils/__pycache__/lion.cpython-310.pyc b/utils/__pycache__/lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..612ee7931c2c58e512be3617bdd0d0448f969ef3 Binary files /dev/null and b/utils/__pycache__/lion.cpython-310.pyc differ diff --git a/utils/__pycache__/lion.cpython-311.pyc b/utils/__pycache__/lion.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef06ff7235d415841b7155f6c666eae8a50c6591 Binary files /dev/null and b/utils/__pycache__/lion.cpython-311.pyc differ diff --git a/utils/__pycache__/loss_tal_dual.cpython-310.pyc b/utils/__pycache__/loss_tal_dual.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50c99c6b7b479067b1e9abefb076d45368343865 Binary files /dev/null and b/utils/__pycache__/loss_tal_dual.cpython-310.pyc differ diff --git a/utils/__pycache__/metrics.cpython-310.pyc b/utils/__pycache__/metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbb05f882e189f9b20e09cecc5a02b870a3c6ff5 Binary files /dev/null and b/utils/__pycache__/metrics.cpython-310.pyc differ diff --git a/utils/__pycache__/metrics.cpython-311.pyc b/utils/__pycache__/metrics.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f57d800d48c6f19d6172907ec57bd0911f51d60 Binary files /dev/null and b/utils/__pycache__/metrics.cpython-311.pyc differ diff --git a/utils/__pycache__/plots.cpython-310.pyc b/utils/__pycache__/plots.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b6ede8524bbbc11539a3a7d118bdb5515be893b Binary files /dev/null and b/utils/__pycache__/plots.cpython-310.pyc differ diff --git a/utils/__pycache__/plots.cpython-311.pyc b/utils/__pycache__/plots.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b350d0235bb04b87bdc57d569e5f4acc9e48efc Binary files /dev/null and b/utils/__pycache__/plots.cpython-311.pyc differ diff --git a/utils/__pycache__/torch_utils.cpython-310.pyc b/utils/__pycache__/torch_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59d426a8b5edf638f74b9cfebcd97abd147fceae Binary files /dev/null and b/utils/__pycache__/torch_utils.cpython-310.pyc differ diff --git a/utils/__pycache__/torch_utils.cpython-311.pyc b/utils/__pycache__/torch_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08c063bef2aa164c75bb509a580a253c9ac115d8 Binary files /dev/null and b/utils/__pycache__/torch_utils.cpython-311.pyc differ diff --git a/utils/activations.py b/utils/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..aeb00e6c7fc936ad596706c19a89ae7d2605f1c9 --- /dev/null +++ b/utils/activations.py @@ -0,0 +1,98 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class SiLU(nn.Module): + # SiLU activation https://arxiv.org/pdf/1606.08415.pdf + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): + # Hard-SiLU activation + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for TorchScript and CoreML + return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX + + +class Mish(nn.Module): + # Mish activation https://github.com/digantamisra98/Mish + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + # Mish activation memory-efficient + class F(torch.autograd.Function): + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +class FReLU(nn.Module): + # FReLU activation https://arxiv.org/abs/2007.11824 + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) + + +class AconC(nn.Module): + r""" ACON activation (activate or not) + AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1): + super().__init__() + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) + + def forward(self, x): + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x + + +class MetaAconC(nn.Module): + r""" ACON activation (activate or not) + MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + super().__init__() + c2 = max(r, c1 // r) + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) + # self.bn1 = nn.BatchNorm2d(c2) + # self.bn2 = nn.BatchNorm2d(c1) + + def forward(self, x): + y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) + # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 + # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable + beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/utils/augmentations.py b/utils/augmentations.py new file mode 100644 index 0000000000000000000000000000000000000000..ad4c07fb69ea43a113b4fcd0bed58eb8dda13f71 --- /dev/null +++ b/utils/augmentations.py @@ -0,0 +1,395 @@ +import math +import random + +import cv2 +import numpy as np +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as TF + +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy +from utils.metrics import bbox_ioa + +IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean +IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation + + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self, size=640): + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std + return TF.normalize(x, mean, std, inplace=inplace) + + +def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean + for i in range(3): + x[:, i] = x[:, i] * std[i] + mean[i] + return x + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, p=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if p and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + + # calculate ioa first then select indexes randomly + boxes = np.stack([w - labels[:, 3], labels[:, 2], w - labels[:, 1], labels[:, 4]], axis=-1) # (n, 4) + ioa = bbox_ioa(boxes, labels[:, 1:5]) # intersection over area + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + for j in random.sample(list(indexes), k=round(p * n)): + l, box, s = labels[j], boxes[j], segments[j] + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments + + +def cutout(im, labels, p=0.5): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([[xmin, ymin, xmax, ymax]], dtype=np.float32) + ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h))[0] # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): + # YOLOv5 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + T += [A.ColorJitter(*color_jitter, 0)] + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +def classify_transforms(size=224): + # Transforms to apply if albumentations not installed + assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' + # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + + +class LetterBox: + # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/utils/autoanchor.py b/utils/autoanchor.py new file mode 100644 index 0000000000000000000000000000000000000000..bd81af92c93353786ebcaced0cc2bbb419378071 --- /dev/null +++ b/utils/autoanchor.py @@ -0,0 +1,164 @@ +import random + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +from utils import TryExcept +from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr + +PREFIX = colorstr('AutoAnchor: ') + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da and (da.sign() != ds.sign()): # same order + LOGGER.info(f'{PREFIX}Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + + +@TryExcept(f'{PREFIX}ERROR') +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1 / thr).float().mean() # best possible recall + return bpr, aat + + stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides + anchors = m.anchors.clone() * stride # current anchors + bpr, aat = metric(anchors.cpu().view(-1, 2)) + s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' + if bpr > 0.98: # threshold to recompute + LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅') + else: + LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') + na = m.anchors.numel() // 2 # number of anchors + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchors[:] = anchors.clone().view_as(m.anchors) + check_anchor_order(m) # must be in pixel-space (not grid-space) + m.anchors /= stride + s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' + else: + s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' + LOGGER.info(s) + + +def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + dataset: path to data.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + from scipy.cluster.vq import kmeans + + npr = np.random + thr = 1 / thr + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k, verbose=True): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ + f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ + f'past_thr={x[x > thr].mean():.3f}-mean: ' + for x in k: + s += '%i,%i, ' % (round(x[0]), round(x[1])) + if verbose: + LOGGER.info(s[:-2]) + return k + + if isinstance(dataset, str): # *.yaml file + with open(dataset, errors='ignore') as f: + data_dict = yaml.safe_load(f) # model dict + from utils.dataloaders import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') + wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels + # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans init + try: + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + assert n <= len(wh) # apply overdetermined constraint + s = wh.std(0) # sigmas for whitening + k = kmeans(wh / s, n, iter=30)[0] * s # points + assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar + except Exception: + LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') + k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init + wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) + k = print_results(k, verbose=False) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k, verbose) + + return print_results(k).astype(np.float32) diff --git a/utils/autobatch.py b/utils/autobatch.py new file mode 100644 index 0000000000000000000000000000000000000000..a5f0d519e59f7b13ef67e8934b61a2cd30770701 --- /dev/null +++ b/utils/autobatch.py @@ -0,0 +1,67 @@ +from copy import deepcopy + +import numpy as np +import torch + +from utils.general import LOGGER, colorstr +from utils.torch_utils import profile + + +def check_train_batch_size(model, imgsz=640, amp=True): + # Check YOLOv5 training batch size + with torch.cuda.amp.autocast(amp): + return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size + + +def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): + # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory + # Usage: + # import torch + # from utils.autobatch import autobatch + # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) + # print(autobatch(model)) + + # Check device + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') + device = next(model.parameters()).device # get model device + if device.type == 'cpu': + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + return batch_size + if torch.backends.cudnn.benchmark: + LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') + return batch_size + + # Inspect CUDA memory + gb = 1 << 30 # bytes to GiB (1024 ** 3) + d = str(device).upper() # 'CUDA:0' + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / gb # GiB total + r = torch.cuda.memory_reserved(device) / gb # GiB reserved + a = torch.cuda.memory_allocated(device) / gb # GiB allocated + f = t - (r + a) # GiB free + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + + # Profile batch sizes + batch_sizes = [1, 2, 4, 8, 16] + try: + img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] + results = profile(img, model, n=3, device=device) + except Exception as e: + LOGGER.warning(f'{prefix}{e}') + + # Fit a solution + y = [x[2] for x in results if x] # memory [2] + p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit + b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) + if None in results: # some sizes failed + i = results.index(None) # first fail index + if b >= batch_sizes[i]: # y intercept above failure point + b = batch_sizes[max(i - 1, 0)] # select prior safe point + if b < 1 or b > 1024: # b outside of safe range + b = batch_size + LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') + + fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') + return b diff --git a/utils/callbacks.py b/utils/callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..893708bb7fcc10b0e51f5b51360a2c2c86afe462 --- /dev/null +++ b/utils/callbacks.py @@ -0,0 +1,71 @@ +import threading + + +class Callbacks: + """" + Handles all registered callbacks for YOLOv5 Hooks + """ + + def __init__(self): + # Define the available callbacks + self._callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], + 'on_params_update': [], + 'teardown': [],} + self.stop_training = False # set True to interrupt training + + def register_action(self, hook, name='', callback=None): + """ + Register a new action to a callback hook + + Args: + hook: The callback hook name to register the action to + name: The name of the action for later reference + callback: The callback to fire + """ + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + assert callable(callback), f"callback '{callback}' is not callable" + self._callbacks[hook].append({'name': name, 'callback': callback}) + + def get_registered_actions(self, hook=None): + """" + Returns all the registered actions by callback hook + + Args: + hook: The name of the hook to check, defaults to all + """ + return self._callbacks[hook] if hook else self._callbacks + + def run(self, hook, *args, thread=False, **kwargs): + """ + Loop through the registered actions and fire all callbacks on main thread + + Args: + hook: The name of the hook to check, defaults to all + args: Arguments to receive from YOLOv5 + thread: (boolean) Run callbacks in daemon thread + kwargs: Keyword Arguments to receive from YOLOv5 + """ + + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + for logger in self._callbacks[hook]: + if thread: + threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() + else: + logger['callback'](*args, **kwargs) diff --git a/utils/coco_utils.py b/utils/coco_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..87fa6e935a413f405f27ac8641390faf3ec10635 --- /dev/null +++ b/utils/coco_utils.py @@ -0,0 +1,108 @@ +import cv2 + +from pycocotools.coco import COCO +from pycocotools import mask as maskUtils + +# coco id: https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ +all_instances_ids = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 27, 28, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 67, 70, + 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 84, 85, 86, 87, 88, 89, 90, +] + +all_stuff_ids = [ + 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, + # other + 183, + # unlabeled + 0, +] + +# panoptic id: https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json +panoptic_stuff_ids = [ + 92, 93, 95, 100, + 107, 109, + 112, 118, 119, + 122, 125, 128, 130, + 133, 138, + 141, 144, 145, 147, 148, 149, + 151, 154, 155, 156, 159, + 161, 166, 168, + 171, 175, 176, 177, 178, 180, + 181, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + # unlabeled + 0, +] + +def getCocoIds(name = 'semantic'): + if 'instances' == name: + return all_instances_ids + elif 'stuff' == name: + return all_stuff_ids + elif 'panoptic' == name: + return all_instances_ids + panoptic_stuff_ids + else: # semantic + return all_instances_ids + all_stuff_ids + +def getMappingId(index, name = 'semantic'): + ids = getCocoIds(name = name) + return ids[index] + +def getMappingIndex(id, name = 'semantic'): + ids = getCocoIds(name = name) + return ids.index(id) + +# convert ann to rle encoded string +def annToRLE(ann, img_size): + h, w = img_size + segm = ann['segmentation'] + if list == type(segm): + # polygon -- a single object might consist of multiple parts + # we merge all parts into one mask rle code + rles = maskUtils.frPyObjects(segm, h, w) + rle = maskUtils.merge(rles) + elif list == type(segm['counts']): + # uncompressed RLE + rle = maskUtils.frPyObjects(segm, h, w) + else: + # rle + rle = ann['segmentation'] + return rle + +# decode ann to mask martix +def annToMask(ann, img_size): + rle = annToRLE(ann, img_size) + m = maskUtils.decode(rle) + return m + +# convert mask to polygans +def convert_to_polys(mask): + # opencv 3.2 + contours, hierarchy = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + # before opencv 3.2 + # contours, hierarchy = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + segmentation = [] + for contour in contours: + contour = contour.flatten().tolist() + if 4 < len(contour): + segmentation.append(contour) + + return segmentation diff --git a/utils/dataloaders.py b/utils/dataloaders.py new file mode 100644 index 0000000000000000000000000000000000000000..77604299954f7f7207370f83f6c01530f76fcbc1 --- /dev/null +++ b/utils/dataloaders.py @@ -0,0 +1,1217 @@ +import contextlib +import glob +import hashlib +import json +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import Pool, ThreadPool +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse + +import numpy as np +import psutil +import torch +import torch.nn.functional as F +import torchvision +import yaml +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed +from tqdm import tqdm + +from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, + letterbox, mixup, random_perspective) +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, + check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, + xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.md5(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + with contextlib.suppress(Exception): + rotation = dict(img._getexif().items())[orientation] + if rotation in [6, 8]: # rotation 270 or 90 + s = (s[1], s[0]) + return s + + +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90}.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + +def seed_worker(worker_id): + # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + close_mosaic=False, + quad=False, + min_items=0, + prefix='', + shuffle=False): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabels( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + min_items=min_items, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + #loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + loader = DataLoader if image_weights or close_mosaic else InfiniteDataLoader + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for _ in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler: + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadScreenshots: + # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): + # source = [screen_number left top width height] (pixels) + check_requirements('mss') + import mss + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.img_size = img_size + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor["top"] if top is None else (monitor["top"] + top) + self.left = monitor["left"] if left is None else (monitor["left"] + left) + self.width = width or monitor["width"] + self.height = height or monitor["height"] + self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + + def __iter__(self): + return self + + def __next__(self): + # mss screen capture: get raw pixels from the screen as np array + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + if any(videos): + self._new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self._new_video(path) + ret_val, im0 = self.cap.read() + + self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + im0 = cv2.imread(path) # BGR + assert im0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + return path, im, im0, self.cap, s + + def _new_video(self, path): + # Create a new video capture object + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + return self.nf # number of files + + +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + torch.backends.cudnn.benchmark = True # faster for fixed-size inference + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + self.vid_stride = vid_stride # video frame-rate stride + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] + n = len(sources) + self.sources = [clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0: + assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' + assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'{st}Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + self.threads[i].start() + LOGGER.info('') # newline + + # check for common shapes + s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + self.auto = auto and self.rect + self.transforms = transforms # optional + if not self.rect: + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + # Read stream `i` frames in daemon thread + n, f = 0, self.frames[i] # frame number, frame array + while cap.isOpened() and n < f: + n += 1 + cap.grab() # .read() = .grab() followed by .retrieve() + if n % self.vid_stride == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(0.0) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + im0 = self.imgs.copy() + if self.transforms: + im = np.stack([self.transforms(x) for x in im0]) # transforms + else: + im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + im = np.ascontiguousarray(im) # contiguous + + return self.sources, im, im0, None, '' + + def __len__(self): + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabels(Dataset): + # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + cache_version = 0.6 # dataset labels *.cache version + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + + def __init__(self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + min_items=0, + prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + self.albumentations = Albumentations(size=img_size) if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) + else: + raise FileNotFoundError(f'{prefix}{p} does not exist') + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.im_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e + + # Check cache + self.label_files = img2label_paths(self.im_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except Exception: + cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in {-1, 0}: + d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels, shapes, self.segments = zip(*cache.values()) + nl = len(np.concatenate(labels, 0)) # number of labels + assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' + self.labels = list(labels) + self.shapes = np.array(shapes) + self.im_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + + # Filter images + if min_items: + include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') + self.im_files = [self.im_files[i] for i in include] + self.label_files = [self.label_files[i] for i in include] + self.labels = [self.labels[i] for i in include] + self.segments = [self.segments[i] for i in include] + self.shapes = self.shapes[include] # wh + + # Create indices + n = len(self.shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Update labels + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = segment[j] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.segments = [self.segments[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride + + # Cache images into RAM/disk for faster training + if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): + cache_images = False + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache_images: + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) + pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache_images == 'disk': + b += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + b += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' + pbar.close() + + def check_cache_ram(self, safety_margin=0.1, prefix=''): + # Check image caching requirements vs available memory + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + n = min(self.n, 30) # extrapolate from 30 random images + for _ in range(n): + im = cv2.imread(random.choice(self.im_files)) # sample image + ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio + b += im.nbytes * ratio ** 2 + mem_required = b * self.n / n # GB required to cache dataset into RAM + mem = psutil.virtual_memory() + cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question + if not cache: + LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " + f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + return cache + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f"{prefix}Scanning {path.parent / path.stem}..." + with Pool(NUM_THREADS) as pool: + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, + total=len(self.im_files), + bar_format=TQDM_BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" + + pbar.close() + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + try: + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + except Exception as e: + LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable + return x + + def __len__(self): + return len(self.im_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp['mixup']: + img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels = random_perspective(img, + labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) + + if self.augment: + # Albumentations + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + + # Flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + + # Cutouts + # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.im_files[index], shapes + + def load_image(self, i): + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) + img9, labels9 = random_perspective(img9, + labels9, + segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + @staticmethod + def collate_fn(batch): + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(im, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + im, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', + align_corners=False)[0].type(im[i].type()) + lb = label[i] + else: + im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2) + lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + im4.append(im1) + label4.append(lb) + + for i, lb in enumerate(label4): + lb[:, 0] = i # add target image index for build_targets() + + return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def flatten_recursive(path=DATASETS_DIR / 'coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(f'{str(path)}_flat') + if os.path.exists(new_path): + shutil.rmtree(new_path) # delete output folder + os.makedirs(new_path) # make new output folder + for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() + # Convert detection dataset into classification dataset, with one directory per class + path = Path(path) # images dir + shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in IMG_FORMATS: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file) as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.dataloaders import *; autosplit() + Arguments + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + n = len(files) # number of files + random.seed(0) # for reproducibility + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + for x in txt: + if (path.parent / x).exists(): + (path.parent / x).unlink() # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path.parent / txt[i], 'a') as f: + f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + + +def verify_image_label(args): + # Verify one image-label pair + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = [segments[x] for x in i] + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +class HUBDatasetStats(): + """ Class for generating HUB dataset JSON and `-hub` dataset directory + + Arguments + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) + autodownload: Attempt to download dataset if not found locally + + Usage + from utils.dataloaders import HUBDatasetStats + stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1 + stats = HUBDatasetStats('path/to/coco128.zip') # usage 2 + stats.get_json(save=False) + stats.process_images() + """ + + def __init__(self, path='coco128.yaml', autodownload=False): + # Initialize class + zipped, data_dir, yaml_path = self._unzip(Path(path)) + try: + with open(check_yaml(yaml_path), errors='ignore') as f: + data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir + except Exception as e: + raise Exception("error/HUB/dataset_stats/yaml_load") from e + + check_dataset(data, autodownload) # download dataset if missing + self.hub_dir = Path(data['path'] + '-hub') + self.im_dir = self.hub_dir / 'images' + self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images + self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary + self.data = data + + @staticmethod + def _find_yaml(dir): + # Return data.yaml file + files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive + assert files, f'No *.yaml file found in {dir}' + if len(files) > 1: + files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name + assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' + assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' + return files[0] + + def _unzip(self, path): + # Unzip data.zip + if not str(path).endswith('.zip'): # path is data.yaml + return False, None, path + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + unzip_file(path, path=path.parent) + dir = path.with_suffix('') # dataset directory == zip name + assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' + return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path + + def _hub_ops(self, f, max_dim=1920): + # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing + f_new = self.im_dir / Path(f).name # dataset-hub image filename + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new, 'JPEG', quality=50, optimize=True) # save + except Exception as e: # use OpenCV + LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new), im) + + def get_json(self, save=False, verbose=False): + # Return dataset JSON for Ultralytics HUB + def _round(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + self.stats[split] = None # i.e. no test set + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + x = np.array([ + np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) + self.stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': dataset.n, + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} + + # Save, print and return + if save: + stats_path = self.hub_dir / 'stats.json' + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(self.stats, f) # save stats.json + if verbose: + print(json.dumps(self.stats, indent=2, sort_keys=False)) + return self.stats + + def process_images(self): + # Compress images for Ultralytics HUB + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + desc = f'{split} images' + for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): + pass + print(f'Done. All images saved to {self.im_dir}') + return self.im_dir + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if self.album_transforms: + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + else: + sample = self.torch_transforms(im) + return sample, j + + +def create_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + # Returns Dataloader object to be used with YOLOv5 Classifier + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) diff --git a/utils/downloads.py b/utils/downloads.py new file mode 100644 index 0000000000000000000000000000000000000000..a108313b3988a59948b6db609659358ea236ac4e --- /dev/null +++ b/utils/downloads.py @@ -0,0 +1,103 @@ +import logging +import os +import subprocess +import urllib +from pathlib import Path + +import requests +import torch + + +def is_url(url, check=True): + # Check if string is URL and check if URL exists + try: + url = str(url) + result = urllib.parse.urlparse(url) + assert all([result.scheme, result.netloc]) # check if is url + return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online + except (AssertionError, urllib.request.HTTPError): + return False + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def url_getsize(url='https://ultralytics.com/images/bus.jpg'): + # Return downloadable file size in bytes + response = requests.head(url, allow_redirects=True) + return int(response.headers.get('content-length', -1)) + + +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + from utils.general import LOGGER + + file = Path(file) + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 + if file.exists(): + file.unlink() # remove partial downloads + LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') + os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + if file.exists(): + file.unlink() # remove partial downloads + LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") + LOGGER.info('') + + +def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc. + from utils.general import LOGGER + + def github_assets(repository, version='latest'): + # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + if version != 'latest': + version = f'tags/{version}' # i.e. tags/v7.0 + response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api + return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets + + file = Path(str(file).strip().replace("'", '')) + if not file.exists(): + # URL specified + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + safe_download(file=file, url=url, min_bytes=1E5) + return file + + # GitHub assets + assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default + try: + tag, assets = github_assets(repo, release) + except Exception: + try: + tag, assets = github_assets(repo) # latest release + except Exception: + try: + tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] + except Exception: + tag = release + + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) + if name in assets: + url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror + safe_download( + file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') + + return str(file) diff --git a/utils/general.py b/utils/general.py new file mode 100644 index 0000000000000000000000000000000000000000..efe78b29ac69975890b47e6dd47d0c13024771a4 --- /dev/null +++ b/utils/general.py @@ -0,0 +1,1135 @@ +import contextlib +import glob +import inspect +import logging +import logging.config +import math +import os +import platform +import random +import re +import signal +import sys +import time +import urllib +from copy import deepcopy +from datetime import datetime +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from subprocess import check_output +from tarfile import is_tarfile +from typing import Optional +from zipfile import ZipFile, is_zipfile + +import cv2 +import IPython +import numpy as np +import pandas as pd +import pkg_resources as pkg +import torch +import torchvision +import yaml + +from utils import TryExcept, emojis +from utils.downloads import gsutil_getsize +from utils.metrics import box_iou, fitness + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +RANK = int(os.getenv('RANK', -1)) + +# Settings +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory +AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode +VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format +FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf + +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) + + +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + + +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return bool(re.search('[\u4e00-\u9fff]', str(s))) + + +def is_colab(): + # Is environment a Google Colab instance? + return 'google.colab' in sys.modules + + +def is_notebook(): + # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace + ipython_type = str(type(IPython.get_ipython())) + return 'colab' in ipython_type or 'zmqshell' in ipython_type + + +def is_kaggle(): + # Is environment a Kaggle Notebook? + return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + + +def is_docker() -> bool: + """Check if the process runs inside a docker container.""" + if Path("/.dockerenv").exists(): + return True + try: # check if docker is in control groups + with open("/proc/self/cgroup") as file: + return any("docker" in line for line in file) + except OSError: + return False + + +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if not test: + return os.access(dir, os.W_OK) # possible issues on Windows + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + + +LOGGING_NAME = "yolov5" + + +def set_logging(name=LOGGING_NAME, verbose=True): + # sets up logging for the given name + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR + logging.config.dictConfig({ + "version": 1, + "disable_existing_loggers": False, + "formatters": { + name: { + "format": "%(message)s"}}, + "handlers": { + name: { + "class": "logging.StreamHandler", + "formatter": name, + "level": level,}}, + "loggers": { + name: { + "level": level, + "handlers": [name], + "propagate": False,}}}) + + +set_logging(LOGGING_NAME) # run before defining LOGGER +LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) +if platform.system() == 'Windows': + for fn in LOGGER.info, LOGGER.warning: + setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging + + +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + + +CONFIG_DIR = user_config_dir() # Ultralytics settings dir + + +class Profile(contextlib.ContextDecorator): + # YOLO Profile class. Usage: @Profile() decorator or 'with Profile():' context manager + def __init__(self, t=0.0): + self.t = t + self.cuda = torch.cuda.is_available() + + def __enter__(self): + self.start = self.time() + return self + + def __exit__(self, type, value, traceback): + self.dt = self.time() - self.start # delta-time + self.t += self.dt # accumulate dt + + def time(self): + if self.cuda: + torch.cuda.synchronize() + return time.time() + + +class Timeout(contextlib.ContextDecorator): + # YOLO Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_msg + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + if platform.system() != 'Windows': # not supported on Windows + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + if platform.system() != 'Windows': + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + +class WorkingDirectory(contextlib.ContextDecorator): + # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + def __init__(self, new_dir): + self.dir = new_dir # new dir + self.cwd = Path.cwd().resolve() # current dir + + def __enter__(self): + os.chdir(self.dir) + + def __exit__(self, exc_type, exc_val, exc_tb): + os.chdir(self.cwd) + + +def methods(instance): + # Get class/instance methods + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + + +def print_args(args: Optional[dict] = None, show_file=True, show_func=False): + # Print function arguments (optional args dict) + x = inspect.currentframe().f_back # previous frame + file, _, func, _, _ = inspect.getframeinfo(x) + if args is None: # get args automatically + args, _, _, frm = inspect.getargvalues(x) + args = {k: v for k, v in frm.items() if k in args} + try: + file = Path(file).resolve().relative_to(ROOT).with_suffix('') + except ValueError: + file = Path(file).stem + s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') + LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) + + +def init_seeds(seed=0, deterministic=False): + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe + # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 + if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 + torch.use_deterministic_algorithms(True) + torch.backends.cudnn.deterministic = True + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + os.environ['PYTHONHASHSEED'] = str(seed) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} + + +def get_default_args(func): + # Get func() default arguments + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def file_age(path=__file__): + # Return days since last file update + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_date(path=__file__): + # Return human-readable file modification date, i.e. '2021-3-26' + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def file_size(path): + # Return file/dir size (MB) + mb = 1 << 20 # bytes to MiB (1024 ** 2) + path = Path(path) + if path.is_file(): + return path.stat().st_size / mb + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + else: + return 0.0 + + +def check_online(): + # Check internet connectivity + import socket + + def run_once(): + # Check once + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + return True + except OSError: + return False + + return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues + + +def git_describe(path=ROOT): # path must be a directory + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + try: + assert (Path(path) / '.git').is_dir() + return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except Exception: + return '' + + +@TryExcept() +@WorkingDirectory(ROOT) +def check_git_status(repo='WongKinYiu/yolov9', branch='main'): + # YOLO status check, recommend 'git pull' if code is out of date + url = f'https://github.com/{repo}' + msg = f', for updates see {url}' + s = colorstr('github: ') # string + assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg + assert check_online(), s + 'skipping check (offline)' + msg + + splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) + matches = [repo in s for s in splits] + if any(matches): + remote = splits[matches.index(True) - 1] + else: + remote = 'ultralytics' + check_output(f'git remote add {remote} {url}', shell=True) + check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch + local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind + if n > 0: + pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' + s += f"⚠️ YOLO is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." + else: + s += f'up to date with {url} ✅' + LOGGER.info(s) + + +@WorkingDirectory(ROOT) +def check_git_info(path='.'): + # YOLO git info check, return {remote, branch, commit} + check_requirements('gitpython') + import git + try: + repo = git.Repo(path) + remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/WongKinYiu/yolov9' + commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d' + try: + branch = repo.active_branch.name # i.e. 'main' + except TypeError: # not on any branch + branch = None # i.e. 'detached HEAD' state + return {'remote': remote, 'branch': branch, 'commit': commit} + except git.exc.InvalidGitRepositoryError: # path is not a git dir + return {'remote': None, 'branch': None, 'commit': None} + + +def check_python(minimum='3.7.0'): + # Check current python version vs. required python version + check_version(platform.python_version(), minimum, name='Python ', hard=True) + + +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): + # Check version vs. required version + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) # bool + s = f'WARNING ⚠️ {name}{minimum} is required by YOLO, but {name}{current} is currently installed' # string + if hard: + assert result, emojis(s) # assert min requirements met + if verbose and not result: + LOGGER.warning(s) + return result + + +@TryExcept() +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): + # Check installed dependencies meet YOLO requirements (pass *.txt file or list of packages or single package str) + prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version + if isinstance(requirements, Path): # requirements.txt file + file = requirements.resolve() + assert file.exists(), f"{prefix} {file} not found, check failed." + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] + elif isinstance(requirements, str): + requirements = [requirements] + + s = '' + n = 0 + for r in requirements: + try: + pkg.require(r) + except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met + s += f'"{r}" ' + n += 1 + + if s and install and AUTOINSTALL: # check environment variable + LOGGER.info(f"{prefix} YOLO requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") + try: + # assert check_online(), "AutoUpdate skipped (offline)" + LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) + source = file if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + LOGGER.info(s) + except Exception as e: + LOGGER.warning(f'{prefix} ❌ {e}') + + +def check_img_size(imgsz, s=32, floor=0): + # Verify image size is a multiple of stride s in each dimension + if isinstance(imgsz, int): # integer i.e. img_size=640 + new_size = max(make_divisible(imgsz, int(s)), floor) + else: # list i.e. img_size=[640, 480] + imgsz = list(imgsz) # convert to list if tuple + new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] + if new_size != imgsz: + LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + return new_size + + +def check_imshow(warn=False): + # Check if environment supports image displays + try: + assert not is_notebook() + assert not is_docker() + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + if warn: + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') + return False + + +def check_suffix(file='yolo.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower() # file suffix + if len(s): + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +def check_yaml(file, suffix=('.yaml', '.yml')): + # Search/download YAML file (if necessary) and return path, checking suffix + return check_file(file, suffix) + + +def check_file(file, suffix=''): + # Search/download file (if necessary) and return path + check_suffix(file, suffix) # optional + file = str(file) # convert to str() + if os.path.isfile(file) or not file: # exists + return file + elif file.startswith(('http:/', 'https:/')): # download + url = file # warning: Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth + if os.path.isfile(file): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + return file + elif file.startswith('clearml://'): # ClearML Dataset ID + assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + return file + else: # search + files = [] + for d in 'data', 'models', 'utils': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file + assert len(files), f'File not found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_font(font=FONT, progress=False): + # Download font to CONFIG_DIR if necessary + font = Path(font) + file = CONFIG_DIR / font.name + if not font.exists() and not file.exists(): + url = f'https://ultralytics.com/assets/{font.name}' + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=progress) + + +def check_dataset(data, autodownload=True): + # Download, check and/or unzip dataset if not found locally + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): + download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) + data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + data = yaml_load(data) # dictionary + + # Checks + for k in 'train', 'val', 'names': + assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") + if isinstance(data['names'], (list, tuple)): # old array format + data['names'] = dict(enumerate(data['names'])) # convert to dict + assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car' + data['nc'] = len(data['names']) + + # Resolve paths + path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + if not path.is_absolute(): + path = (ROOT / path).resolve() + data['path'] = path # download scripts + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + if isinstance(data[k], str): + x = (path / data[k]).resolve() + if not x.exists() and data[k].startswith('../'): + x = (path / data[k][3:]).resolve() + data[k] = str(x) + else: + data[k] = [str((path / x).resolve()) for x in data[k]] + + # Parse yaml + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + if val: + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) + if not s or not autodownload: + raise Exception('Dataset not found ❌') + t = time.time() + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + LOGGER.info(f'Downloading {s} to {f}...') + torch.hub.download_url_to_file(s, f) + Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root + unzip_file(f, path=DATASETS_DIR) # unzip + Path(f).unlink() # remove zip + r = None # success + elif s.startswith('bash '): # bash script + LOGGER.info(f'Running {s} ...') + r = os.system(s) + else: # python script + r = exec(s, {'yaml': data}) # return None + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" + LOGGER.info(f"Dataset download {s}") + check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts + return data # dictionary + + +def check_amp(model): + # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation + from models.common import AutoShape, DetectMultiBackend + + def amp_allclose(model, im): + # All close FP32 vs AMP results + m = AutoShape(model, verbose=False) # model + a = m(im).xywhn[0] # FP32 inference + m.amp = True + b = m(im).xywhn[0] # AMP inference + return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance + + prefix = colorstr('AMP: ') + device = next(model.parameters()).device # get model device + if device.type in ('cpu', 'mps'): + return False # AMP only used on CUDA devices + f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check + im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) + try: + #assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolo.pt', device), im) + LOGGER.info(f'{prefix}checks passed ✅') + return True + except Exception: + help_url = 'https://github.com/ultralytics/yolov5/issues/7908' + LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}') + return False + + +def yaml_load(file='data.yaml'): + # Single-line safe yaml loading + with open(file, errors='ignore') as f: + return yaml.safe_load(f) + + +def yaml_save(file='data.yaml', data={}): + # Single-line safe yaml saving + with open(file, 'w') as f: + yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) + + +def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): + # Unzip a *.zip file to path/, excluding files containing strings in exclude list + if path is None: + path = Path(file).parent # default path + with ZipFile(file) as zipObj: + for f in zipObj.namelist(): # list all archived filenames in the zip + if all(x not in f for x in exclude): + zipObj.extract(f, path=path) + + +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + + +def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): + # Multithreaded file download and unzip function, used in data.yaml for autodownload + def download_one(url, dir): + # Download 1 file + success = True + if os.path.isfile(url): + f = Path(url) # filename + else: # does not exist + f = dir / Path(url).name + LOGGER.info(f'Downloading {url} to {f}...') + for i in range(retry + 1): + if curl: + s = 'sS' if threads > 1 else '' # silent + r = os.system( + f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue + success = r == 0 + else: + torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download + success = f.is_file() + if success: + break + elif i < retry: + LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') + else: + LOGGER.warning(f'❌ Failed to download {url}...') + + if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)): + LOGGER.info(f'Unzipping {f}...') + if is_zipfile(f): + unzip_file(f, dir) # unzip + elif is_tarfile(f): + os.system(f'tar xf {f} --directory {f.parent}') # unzip + elif f.suffix == '.gz': + os.system(f'tar xfz {f} --directory {f.parent}') # unzip + if delete: + f.unlink() # remove zip + + dir = Path(dir) + dir.mkdir(parents=True, exist_ok=True) # make directory + if threads > 1: + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + download_one(u, dir) + + +def make_divisible(x, divisor): + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def one_flat_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf + #return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + return lambda x: ((1 - math.cos((x - (steps // 2)) * math.pi / (steps // 2))) / 2) * (y2 - y1) + y1 if (x > (steps // 2)) else y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = { + 'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights).float() + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample + class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels]) + return (class_weights.reshape(1, nc) * class_counts).sum(1) + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + return [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y + return y + + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + s = np.concatenate((s, s[0:1, :]), axis=0) + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): + # Rescale boxes (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + boxes[:, [0, 2]] -= pad[0] # x padding + boxes[:, [1, 3]] -= pad[1] # y padding + boxes[:, :4] /= gain + clip_boxes(boxes, img0_shape) + return boxes + + +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + segments[:, 0] -= pad[0] # x padding + segments[:, 1] -= pad[1] # y padding + segments /= gain + clip_segments(segments, img0_shape) + if normalize: + segments[:, 0] /= img0_shape[1] # width + segments[:, 1] /= img0_shape[0] # height + return segments + + +def clip_boxes(boxes, shape): + # Clip boxes (xyxy) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + + +def clip_segments(segments, shape): + # Clip segments (xy1,xy2,...) to image shape (height, width) + if isinstance(segments, torch.Tensor): # faster individually + segments[:, 0].clamp_(0, shape[1]) # x + segments[:, 1].clamp_(0, shape[0]) # y + else: # np.array (faster grouped) + segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x + segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y + + +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nm=0, # number of masks +): + """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + if isinstance(prediction, (list, tuple)): # YOLO model in validation model, output = (inference_out, loss_out) + prediction = prediction[0] # select only inference output + + device = prediction.device + mps = 'mps' in device.type # Apple MPS + if mps: # MPS not fully supported yet, convert tensors to CPU before NMS + prediction = prediction.cpu() + bs = prediction.shape[0] # batch size + nc = prediction.shape[1] - nm - 4 # number of classes + mi = 4 + nc # mask start index + xc = prediction[:, 4:mi].amax(1) > conf_thres # candidates + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + + # Settings + # min_wh = 2 # (pixels) minimum box width and height + max_wh = 7680 # (pixels) maximum box width and height + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 2.5 + 0.05 * bs # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[:, 2:4] < min_wh) | (x[:, 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x.T[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + lb = labels[xi] + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) + v[:, :4] = lb[:, 1:5] # box + v[range(len(lb)), lb[:, 0].long() + 4] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Detections matrix nx6 (xyxy, conf, cls) + box, cls, mask = x.split((4, nc, nm), 1) + box = xywh2xyxy(box) # center_x, center_y, width, height) to (x1, y1, x2, y2) + if multi_label: + i, j = (cls > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, 4 + j, None], j[:, None].float(), mask[i]), 1) + else: # best class only + conf, j = cls.max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + else: + x = x[x[:, 4].argsort(descending=True)] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if mps: + output[xi] = output[xi].to(device) + if (time.time() - t) > time_limit: + LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") + + +def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): + evolve_csv = save_dir / 'evolve.csv' + evolve_yaml = save_dir / 'hyp_evolve.yaml' + keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps] + keys = tuple(x.strip() for x in keys) + vals = results + tuple(hyp.values()) + n = len(keys) + + # Download (optional) + if bucket: + url = f'gs://{bucket}/evolve.csv' + if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): + os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + + # Log to evolve.csv + s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header + with open(evolve_csv, 'a') as f: + f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') + + # Save yaml + with open(evolve_yaml, 'w') as f: + data = pd.read_csv(evolve_csv) + data = data.rename(columns=lambda x: x.strip()) # strip keys + i = np.argmax(fitness(data.values[:, :4])) # + generations = len(data) + f.write('# YOLO Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + + f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') + yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) + + # Print to screen + LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix + + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}' + for x in vals) + '\n\n') + + if bucket: + os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload + + +def apply_classifier(x, model, img, im0): + # Apply a second stage classifier to YOLO outputs + # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_boxes(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for a in d: + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): + # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. + path = Path(path) # os-agnostic + if path.exists() and not exist_ok: + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + + # Method 1 + for n in range(2, 9999): + p = f'{path}{sep}{n}{suffix}' # increment path + if not os.path.exists(p): # + break + path = Path(p) + + # Method 2 (deprecated) + # dirs = glob.glob(f"{path}{sep}*") # similar paths + # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs] + # i = [int(m.groups()[0]) for m in matches if m] # indices + # n = max(i) + 1 if i else 2 # increment number + # path = Path(f"{path}{sep}{n}{suffix}") # increment path + + if mkdir: + path.mkdir(parents=True, exist_ok=True) # make directory + + return path + + +# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ +imshow_ = cv2.imshow # copy to avoid recursion errors + + +def imread(path, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(path, np.uint8), flags) + + +def imwrite(path, im): + try: + cv2.imencode(Path(path).suffix, im)[1].tofile(path) + return True + except Exception: + return False + + +def imshow(path, im): + imshow_(path.encode('unicode_escape').decode(), im) + + +cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine + +# Variables ------------------------------------------------------------------------------------------------------------ diff --git a/utils/lion.py b/utils/lion.py new file mode 100644 index 0000000000000000000000000000000000000000..63651cff24e3d00e7e15a2cff2a81d1da46b8c5e --- /dev/null +++ b/utils/lion.py @@ -0,0 +1,67 @@ +"""PyTorch implementation of the Lion optimizer.""" +import torch +from torch.optim.optimizer import Optimizer + + +class Lion(Optimizer): + r"""Implements Lion algorithm.""" + + def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0): + """Initialize the hyperparameters. + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-4) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.99)) + weight_decay (float, optional): weight decay coefficient (default: 0) + """ + + if not 0.0 <= lr: + raise ValueError('Invalid learning rate: {}'.format(lr)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) + defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + Returns: + the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + grad = p.grad + state = self.state[p] + # State initialization + if len(state) == 0: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + + exp_avg = state['exp_avg'] + beta1, beta2 = group['betas'] + + # Weight update + update = exp_avg * beta1 + grad * (1 - beta1) + p.add_(torch.sign(update), alpha=-group['lr']) + # Decay the momentum running average coefficient + exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2) + + return loss \ No newline at end of file diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8fc8377ab60987a0903de62ef0b20a946289ea8f --- /dev/null +++ b/utils/loggers/__init__.py @@ -0,0 +1,399 @@ +import os +import warnings +from pathlib import Path + +import pkg_resources as pkg +import torch +from torch.utils.tensorboard import SummaryWriter + +from utils.general import LOGGER, colorstr, cv2 +from utils.loggers.clearml.clearml_utils import ClearmlLogger +from utils.loggers.wandb.wandb_utils import WandbLogger +from utils.plots import plot_images, plot_labels, plot_results +from utils.torch_utils import de_parallel + +LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML +RANK = int(os.getenv('RANK', -1)) + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: + try: + wandb_login_success = wandb.login(timeout=30) + except wandb.errors.UsageError: # known non-TTY terminal issue + wandb_login_success = False + if not wandb_login_success: + wandb = None +except (ImportError, AssertionError): + wandb = None + +try: + import clearml + + assert hasattr(clearml, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + clearml = None + +try: + if RANK not in [0, -1]: + comet_ml = None + else: + import comet_ml + + assert hasattr(comet_ml, '__version__') # verify package import not local dir + from utils.loggers.comet import CometLogger + +except (ModuleNotFoundError, ImportError, AssertionError): + comet_ml = None + + +class Loggers(): + # YOLO Loggers class + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): + self.save_dir = save_dir + self.weights = weights + self.opt = opt + self.hyp = hyp + self.plots = not opt.noplots # plot results + self.logger = logger # for printing results to console + self.include = include + self.keys = [ + 'train/box_loss', + 'train/cls_loss', + 'train/dfl_loss', # train loss + 'metrics/precision', + 'metrics/recall', + 'metrics/mAP_0.5', + 'metrics/mAP_0.5:0.95', # metrics + 'val/box_loss', + 'val/cls_loss', + 'val/dfl_loss', # val loss + 'x/lr0', + 'x/lr1', + 'x/lr2'] # params + self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] + for k in LOGGERS: + setattr(self, k, None) # init empty logger dictionary + self.csv = True # always log to csv + + # Messages + # if not wandb: + # prefix = colorstr('Weights & Biases: ') + # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLO 🚀 runs in Weights & Biases" + # self.logger.info(s) + if not clearml: + prefix = colorstr('ClearML: ') + s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLO 🚀 in ClearML" + self.logger.info(s) + if not comet_ml: + prefix = colorstr('Comet: ') + s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLO 🚀 runs in Comet" + self.logger.info(s) + # TensorBoard + s = self.save_dir + if 'tb' in self.include and not self.opt.evolve: + prefix = colorstr('TensorBoard: ') + self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(s)) + + # W&B + if wandb and 'wandb' in self.include: + wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') + run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None + self.opt.hyp = self.hyp # add hyperparameters + self.wandb = WandbLogger(self.opt, run_id) + # temp warn. because nested artifacts not supported after 0.12.10 + # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): + # s = "YOLO temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." + # self.logger.warning(s) + else: + self.wandb = None + + # ClearML + if clearml and 'clearml' in self.include: + self.clearml = ClearmlLogger(self.opt, self.hyp) + else: + self.clearml = None + + # Comet + if comet_ml and 'comet' in self.include: + if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): + run_id = self.opt.resume.split("/")[-1] + self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) + + else: + self.comet_logger = CometLogger(self.opt, self.hyp) + + else: + self.comet_logger = None + + @property + def remote_dataset(self): + # Get data_dict if custom dataset artifact link is provided + data_dict = None + if self.clearml: + data_dict = self.clearml.data_dict + if self.wandb: + data_dict = self.wandb.data_dict + if self.comet_logger: + data_dict = self.comet_logger.data_dict + + return data_dict + + def on_train_start(self): + if self.comet_logger: + self.comet_logger.on_train_start() + + def on_pretrain_routine_start(self): + if self.comet_logger: + self.comet_logger.on_pretrain_routine_start() + + def on_pretrain_routine_end(self, labels, names): + # Callback runs on pre-train routine end + if self.plots: + plot_labels(labels, names, self.save_dir) + paths = self.save_dir.glob('*labels*.jpg') # training labels + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + # if self.clearml: + # pass # ClearML saves these images automatically using hooks + if self.comet_logger: + self.comet_logger.on_pretrain_routine_end(paths) + + def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): + log_dict = dict(zip(self.keys[0:3], vals)) + # Callback runs on train batch end + # ni: number integrated batches (since train start) + if self.plots: + if ni < 3: + f = self.save_dir / f'train_batch{ni}.jpg' # filename + plot_images(imgs, targets, paths, f) + if ni == 0 and self.tb and not self.opt.sync_bn: + log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) + if ni == 10 and (self.wandb or self.clearml): + files = sorted(self.save_dir.glob('train*.jpg')) + if self.wandb: + self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Mosaics') + + if self.comet_logger: + self.comet_logger.on_train_batch_end(log_dict, step=ni) + + def on_train_epoch_end(self, epoch): + # Callback runs on train epoch end + if self.wandb: + self.wandb.current_epoch = epoch + 1 + + if self.comet_logger: + self.comet_logger.on_train_epoch_end(epoch) + + def on_val_start(self): + if self.comet_logger: + self.comet_logger.on_val_start() + + def on_val_image_end(self, pred, predn, path, names, im): + # Callback runs on val image end + if self.wandb: + self.wandb.val_one_image(pred, predn, path, names, im) + if self.clearml: + self.clearml.log_image_with_boxes(path, pred, names, im) + + def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): + if self.comet_logger: + self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): + # Callback runs on val end + if self.wandb or self.clearml: + files = sorted(self.save_dir.glob('val*.jpg')) + if self.wandb: + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Validation') + + if self.comet_logger: + self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + + def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): + # Callback runs at the end of each fit (train+val) epoch + x = dict(zip(self.keys, vals)) + if self.csv: + file = self.save_dir / 'results.csv' + n = len(x) + 1 # number of cols + s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header + with open(file, 'a') as f: + f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + + if self.tb: + for k, v in x.items(): + self.tb.add_scalar(k, v, epoch) + elif self.clearml: # log to ClearML if TensorBoard not used + for k, v in x.items(): + title, series = k.split('/') + self.clearml.task.get_logger().report_scalar(title, series, v, epoch) + + if self.wandb: + if best_fitness == fi: + best_results = [epoch] + vals[3:7] + for i, name in enumerate(self.best_keys): + self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary + self.wandb.log(x) + self.wandb.end_epoch(best_result=best_fitness == fi) + + if self.clearml: + self.clearml.current_epoch_logged_images = set() # reset epoch image limit + self.clearml.current_epoch += 1 + + if self.comet_logger: + self.comet_logger.on_fit_epoch_end(x, epoch=epoch) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + # Callback runs on model save event + if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: + if self.wandb: + self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + if self.clearml: + self.clearml.task.update_output_model(model_path=str(last), + model_name='Latest Model', + auto_delete_file=False) + + if self.comet_logger: + self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) + + def on_train_end(self, last, best, epoch, results): + # Callback runs on training end, i.e. saving best model + if self.plots: + plot_results(file=self.save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter + self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") + + if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log(dict(zip(self.keys[3:10], results))) + self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model + if not self.opt.evolve: + wandb.log_artifact(str(best if best.exists() else last), + type='model', + name=f'run_{self.wandb.wandb_run.id}_model', + aliases=['latest', 'best', 'stripped']) + self.wandb.finish_run() + + if self.clearml and not self.opt.evolve: + self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), + name='Best Model', + auto_delete_file=False) + + if self.comet_logger: + final_results = dict(zip(self.keys[3:10], results)) + self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) + + def on_params_update(self, params: dict): + # Update hyperparams or configs of the experiment + if self.wandb: + self.wandb.wandb_run.config.update(params, allow_val_change=True) + if self.comet_logger: + self.comet_logger.on_params_update(params) + + +class GenericLogger: + """ + YOLO General purpose logger for non-task specific logging + Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) + Arguments + opt: Run arguments + console_logger: Console logger + include: loggers to include + """ + + def __init__(self, opt, console_logger, include=('tb', 'wandb')): + # init default loggers + self.save_dir = Path(opt.save_dir) + self.include = include + self.console_logger = console_logger + self.csv = self.save_dir / 'results.csv' # CSV logger + if 'tb' in self.include: + prefix = colorstr('TensorBoard: ') + self.console_logger.info( + f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(self.save_dir)) + + if wandb and 'wandb' in self.include: + self.wandb = wandb.init(project=web_project_name(str(opt.project)), + name=None if opt.name == "exp" else opt.name, + config=opt) + else: + self.wandb = None + + def log_metrics(self, metrics, epoch): + # Log metrics dictionary to all loggers + if self.csv: + keys, vals = list(metrics.keys()), list(metrics.values()) + n = len(metrics) + 1 # number of cols + s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header + with open(self.csv, 'a') as f: + f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + + if self.tb: + for k, v in metrics.items(): + self.tb.add_scalar(k, v, epoch) + + if self.wandb: + self.wandb.log(metrics, step=epoch) + + def log_images(self, files, name='Images', epoch=0): + # Log images to all loggers + files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path + files = [f for f in files if f.exists()] # filter by exists + + if self.tb: + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) + + def log_graph(self, model, imgsz=(640, 640)): + # Log model graph to all loggers + if self.tb: + log_tensorboard_graph(self.tb, model, imgsz) + + def log_model(self, model_path, epoch=0, metadata={}): + # Log model to all loggers + if self.wandb: + art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art.add_file(str(model_path)) + wandb.log_artifact(art) + + def update_params(self, params): + # Update the paramters logged + if self.wandb: + wandb.run.config.update(params, allow_val_change=True) + + +def log_tensorboard_graph(tb, model, imgsz=(640, 640)): + # Log model graph to TensorBoard + try: + p = next(model.parameters()) # for device, type + imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand + im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') + + +def web_project_name(project): + # Convert local project name to web project name + if not project.startswith('runs/train'): + return project + suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' + return f'YOLO{suffix}' diff --git a/utils/loggers/__pycache__/__init__.cpython-310.pyc b/utils/loggers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c54e80ad7b507dfa5a14d21bc02489fb27991df Binary files /dev/null and b/utils/loggers/__pycache__/__init__.cpython-310.pyc differ diff --git a/utils/loggers/clearml/__init__.py b/utils/loggers/clearml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..84952a8167bc2975913a6def6b4f027d566552a9 --- /dev/null +++ b/utils/loggers/clearml/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/utils/loggers/clearml/__pycache__/__init__.cpython-310.pyc b/utils/loggers/clearml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a378c641e3398980bfaf8f67da844d50af709dce Binary files /dev/null and b/utils/loggers/clearml/__pycache__/__init__.cpython-310.pyc differ diff --git a/utils/loggers/clearml/__pycache__/clearml_utils.cpython-310.pyc b/utils/loggers/clearml/__pycache__/clearml_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ca037e52d839ac591f245deeb564bc92f73fbc0 Binary files /dev/null and b/utils/loggers/clearml/__pycache__/clearml_utils.cpython-310.pyc differ diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fe5f597a87a635b15dbfe5d7ed5a6c285ebff6bd --- /dev/null +++ b/utils/loggers/clearml/clearml_utils.py @@ -0,0 +1,157 @@ +"""Main Logger class for ClearML experiment tracking.""" +import glob +import re +from pathlib import Path + +import numpy as np +import yaml + +from utils.plots import Annotator, colors + +try: + import clearml + from clearml import Dataset, Task + + assert hasattr(clearml, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + clearml = None + + +def construct_dataset(clearml_info_string): + """Load in a clearml dataset and fill the internal data_dict with its contents. + """ + dataset_id = clearml_info_string.replace('clearml://', '') + dataset = Dataset.get(dataset_id=dataset_id) + dataset_root_path = Path(dataset.get_local_copy()) + + # We'll search for the yaml file definition in the dataset + yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) + if len(yaml_filenames) > 1: + raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' + 'the dataset definition this way.') + elif len(yaml_filenames) == 0: + raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file ' + 'inside the dataset root path.') + with open(yaml_filenames[0]) as f: + dataset_definition = yaml.safe_load(f) + + assert set(dataset_definition.keys()).issuperset( + {'train', 'test', 'val', 'nc', 'names'} + ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + + data_dict = dict() + data_dict['train'] = str( + (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None + data_dict['test'] = str( + (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None + data_dict['val'] = str( + (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None + data_dict['nc'] = dataset_definition['nc'] + data_dict['names'] = dataset_definition['names'] + + return data_dict + + +class ClearmlLogger: + """Log training runs, datasets, models, and predictions to ClearML. + + This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, + this information includes hyperparameters, system configuration and metrics, model metrics, code information and + basic data metrics and analyses. + + By providing additional command line arguments to train.py, datasets, + models and predictions can also be logged. + """ + + def __init__(self, opt, hyp): + """ + - Initialize ClearML Task, this object will capture the experiment + - Upload dataset version to ClearML Data if opt.upload_dataset is True + + arguments: + opt (namespace) -- Commandline arguments for this run + hyp (dict) -- Hyperparameters for this run + + """ + self.current_epoch = 0 + # Keep tracked of amount of logged images to enforce a limit + self.current_epoch_logged_images = set() + # Maximum number of images to log to clearML per epoch + self.max_imgs_to_log_per_epoch = 16 + # Get the interval of epochs when bounding box images should be logged + self.bbox_interval = opt.bbox_interval + self.clearml = clearml + self.task = None + self.data_dict = None + if self.clearml: + self.task = Task.init( + project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5', + task_name=opt.name if opt.name != 'exp' else 'Training', + tags=['YOLOv5'], + output_uri=True, + auto_connect_frameworks={'pytorch': False} + # We disconnect pytorch auto-detection, because we added manual model save points in the code + ) + # ClearML's hooks will already grab all general parameters + # Only the hyperparameters coming from the yaml config file + # will have to be added manually! + self.task.connect(hyp, name='Hyperparameters') + + # Get ClearML Dataset Version if requested + if opt.data.startswith('clearml://'): + # data_dict should have the following keys: + # names, nc (number of classes), test, train, val (all three relative paths to ../datasets) + self.data_dict = construct_dataset(opt.data) + # Set data to data_dict because wandb will crash without this information and opt is the best way + # to give it to them + opt.data = self.data_dict + + def log_debug_samples(self, files, title='Debug Samples'): + """ + Log files (images) as debug samples in the ClearML task. + + arguments: + files (List(PosixPath)) a list of file paths in PosixPath format + title (str) A title that groups together images with the same values + """ + for f in files: + if f.exists(): + it = re.search(r'_batch(\d+)', f.name) + iteration = int(it.groups()[0]) if it else 0 + self.task.get_logger().report_image(title=title, + series=f.name.replace(it.group(), ''), + local_path=str(f), + iteration=iteration) + + def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): + """ + Draw the bounding boxes on a single image and report the result as a ClearML debug sample. + + arguments: + image_path (PosixPath) the path the original image file + boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + class_names (dict): dict containing mapping of class int to class name + image (Tensor): A torch tensor containing the actual image data + """ + if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0: + # Log every bbox_interval times and deduplicate for any intermittend extra eval runs + if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images: + im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2)) + annotator = Annotator(im=im, pil=True) + for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])): + color = colors(i) + + class_name = class_names[int(class_nr)] + confidence_percentage = round(float(conf) * 100, 2) + label = f"{class_name}: {confidence_percentage}%" + + if conf > conf_threshold: + annotator.rectangle(box.cpu().numpy(), outline=color) + annotator.box_label(box.cpu().numpy(), label=label, color=color) + + annotated_image = annotator.result() + self.task.get_logger().report_image(title='Bounding Boxes', + series=image_path.name, + iteration=self.current_epoch, + image=annotated_image) + self.current_epoch_logged_images.add(image_path) diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py new file mode 100644 index 0000000000000000000000000000000000000000..ee518b0fbfc89ee811b51bbf85341eee4f685be1 --- /dev/null +++ b/utils/loggers/clearml/hpo.py @@ -0,0 +1,84 @@ +from clearml import Task +# Connecting ClearML with the current process, +# from here on everything is logged automatically +from clearml.automation import HyperParameterOptimizer, UniformParameterRange +from clearml.automation.optuna import OptimizerOptuna + +task = Task.init(project_name='Hyper-Parameter Optimization', + task_name='YOLOv5', + task_type=Task.TaskTypes.optimizer, + reuse_last_task_id=False) + +# Example use case: +optimizer = HyperParameterOptimizer( + # This is the experiment we want to optimize + base_task_id='', + # here we define the hyper-parameters to optimize + # Notice: The parameter name should exactly match what you see in the UI: / + # For Example, here we see in the base experiment a section Named: "General" + # under it a parameter named "batch_size", this becomes "General/batch_size" + # If you have `argparse` for example, then arguments will appear under the "Args" section, + # and you should instead pass "Args/batch_size" + hyper_parameters=[ + UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), + UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), + UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), + UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), + UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), + UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), + UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), + UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), + UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), + UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), + UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), + UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), + UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), + UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), + UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), + UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), + UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), + UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), + UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), + UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], + # this is the objective metric we want to maximize/minimize + objective_metric_title='metrics', + objective_metric_series='mAP_0.5', + # now we decide if we want to maximize it or minimize it (accuracy we maximize) + objective_metric_sign='max', + # let us limit the number of concurrent experiments, + # this in turn will make sure we do dont bombard the scheduler with experiments. + # if we have an auto-scaler connected, this, by proxy, will limit the number of machine + max_number_of_concurrent_tasks=1, + # this is the optimizer class (actually doing the optimization) + # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) + optimizer_class=OptimizerOptuna, + # If specified only the top K performing Tasks will be kept, the others will be automatically archived + save_top_k_tasks_only=5, # 5, + compute_time_limit=None, + total_max_jobs=20, + min_iteration_per_job=None, + max_iteration_per_job=None, +) + +# report every 10 seconds, this is way too often, but we are testing here +optimizer.set_report_period(10 / 60) +# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent +# an_optimizer.start_locally(job_complete_callback=job_complete_callback) +# set the time limit for the optimization process (2 hours) +optimizer.set_time_limit(in_minutes=120.0) +# Start the optimization process in the local environment +optimizer.start_locally() +# wait until process is done (notice we are controlling the optimization process in the background) +optimizer.wait() +# make sure background optimization stopped +optimizer.stop() + +print('We are done, good bye') diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b0318f88d6a63a6ba37fd2bf7ec4869084a45966 --- /dev/null +++ b/utils/loggers/comet/__init__.py @@ -0,0 +1,508 @@ +import glob +import json +import logging +import os +import sys +from pathlib import Path + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +try: + import comet_ml + + # Project Configuration + config = comet_ml.config.get_config() + COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") +except (ModuleNotFoundError, ImportError): + comet_ml = None + COMET_PROJECT_NAME = None + +import PIL +import torch +import torchvision.transforms as T +import yaml + +from utils.dataloaders import img2label_paths +from utils.general import check_dataset, scale_boxes, xywh2xyxy +from utils.metrics import box_iou + +COMET_PREFIX = "comet://" + +COMET_MODE = os.getenv("COMET_MODE", "online") + +# Model Saving Settings +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") + +# Dataset Artifact Settings +COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" + +# Evaluation Settings +COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" +COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" +COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) + +# Confusion Matrix Settings +CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) +IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) + +# Batch Logging Settings +COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" +COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" + +RANK = int(os.getenv("RANK", -1)) + +to_pil = T.ToPILImage() + + +class CometLogger: + """Log metrics, parameters, source code, models and much more + with Comet + """ + + def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + self.job_type = job_type + self.opt = opt + self.hyp = hyp + + # Comet Flags + self.comet_mode = COMET_MODE + + self.save_model = opt.save_period > -1 + self.model_name = COMET_MODEL_NAME + + # Batch Logging Settings + self.log_batch_metrics = COMET_LOG_BATCH_METRICS + self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL + + # Dataset Artifact Settings + self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET + self.resume = self.opt.resume + + # Default parameters to pass to Experiment objects + self.default_experiment_kwargs = { + "log_code": False, + "log_env_gpu": True, + "log_env_cpu": True, + "project_name": COMET_PROJECT_NAME,} + self.default_experiment_kwargs.update(experiment_kwargs) + self.experiment = self._get_experiment(self.comet_mode, run_id) + + self.data_dict = self.check_dataset(self.opt.data) + self.class_names = self.data_dict["names"] + self.num_classes = self.data_dict["nc"] + + self.logged_images_count = 0 + self.max_images = COMET_MAX_IMAGE_UPLOADS + + if run_id is None: + self.experiment.log_other("Created from", "YOLOv5") + if not isinstance(self.experiment, comet_ml.OfflineExperiment): + workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] + self.experiment.log_other( + "Run Path", + f"{workspace}/{project_name}/{experiment_id}", + ) + self.log_parameters(vars(opt)) + self.log_parameters(self.opt.hyp) + self.log_asset_data( + self.opt.hyp, + name="hyperparameters.json", + metadata={"type": "hyp-config-file"}, + ) + self.log_asset( + f"{self.opt.save_dir}/opt.yaml", + metadata={"type": "opt-config-file"}, + ) + + self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX + + if hasattr(self.opt, "conf_thres"): + self.conf_thres = self.opt.conf_thres + else: + self.conf_thres = CONF_THRES + if hasattr(self.opt, "iou_thres"): + self.iou_thres = self.opt.iou_thres + else: + self.iou_thres = IOU_THRES + + self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + + self.comet_log_predictions = COMET_LOG_PREDICTIONS + if self.opt.bbox_interval == -1: + self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 + else: + self.comet_log_prediction_interval = self.opt.bbox_interval + + if self.comet_log_predictions: + self.metadata_dict = {} + self.logged_image_names = [] + + self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS + + self.experiment.log_others({ + "comet_mode": COMET_MODE, + "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, + "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, + "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, + "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, + "comet_model_name": COMET_MODEL_NAME,}) + + # Check if running the Experiment with the Comet Optimizer + if hasattr(self.opt, "comet_optimizer_id"): + self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) + self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) + self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) + self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) + + def _get_experiment(self, mode, experiment_id=None): + if mode == "offline": + if experiment_id is not None: + return comet_ml.ExistingOfflineExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,) + + else: + try: + if experiment_id is not None: + return comet_ml.ExistingExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.Experiment(**self.default_experiment_kwargs) + + except ValueError: + logger.warning("COMET WARNING: " + "Comet credentials have not been set. " + "Comet will default to offline logging. " + "Please set your credentials to enable online logging.") + return self._get_experiment("offline", experiment_id) + + return + + def log_metrics(self, log_dict, **kwargs): + self.experiment.log_metrics(log_dict, **kwargs) + + def log_parameters(self, log_dict, **kwargs): + self.experiment.log_parameters(log_dict, **kwargs) + + def log_asset(self, asset_path, **kwargs): + self.experiment.log_asset(asset_path, **kwargs) + + def log_asset_data(self, asset, **kwargs): + self.experiment.log_asset_data(asset, **kwargs) + + def log_image(self, img, **kwargs): + self.experiment.log_image(img, **kwargs) + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + if not self.save_model: + return + + model_metadata = { + "fitness_score": fitness_score[-1], + "epochs_trained": epoch + 1, + "save_period": opt.save_period, + "total_epochs": opt.epochs,} + + model_files = glob.glob(f"{path}/*.pt") + for model_path in model_files: + name = Path(model_path).name + + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + metadata=model_metadata, + overwrite=True, + ) + + def check_dataset(self, data_file): + with open(data_file) as f: + data_config = yaml.safe_load(f) + + if data_config['path'].startswith(COMET_PREFIX): + path = data_config['path'].replace(COMET_PREFIX, "") + data_dict = self.download_dataset_artifact(path) + + return data_dict + + self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) + + return check_dataset(data_file) + + def log_predictions(self, image, labelsn, path, shape, predn): + if self.logged_images_count >= self.max_images: + return + detections = predn[predn[:, 4] > self.conf_thres] + iou = box_iou(labelsn[:, 1:], detections[:, :4]) + mask, _ = torch.where(iou > self.iou_thres) + if len(mask) == 0: + return + + filtered_detections = detections[mask] + filtered_labels = labelsn[mask] + + image_id = path.split("/")[-1].split(".")[0] + image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" + if image_name not in self.logged_image_names: + native_scale_image = PIL.Image.open(path) + self.log_image(native_scale_image, name=image_name) + self.logged_image_names.append(image_name) + + metadata = [] + for cls, *xyxy in filtered_labels.tolist(): + metadata.append({ + "label": f"{self.class_names[int(cls)]}-gt", + "score": 100, + "box": { + "x": xyxy[0], + "y": xyxy[1], + "x2": xyxy[2], + "y2": xyxy[3]},}) + for *xyxy, conf, cls in filtered_detections.tolist(): + metadata.append({ + "label": f"{self.class_names[int(cls)]}", + "score": conf * 100, + "box": { + "x": xyxy[0], + "y": xyxy[1], + "x2": xyxy[2], + "y2": xyxy[3]},}) + + self.metadata_dict[image_name] = metadata + self.logged_images_count += 1 + + return + + def preprocess_prediction(self, image, labels, shape, pred): + nl, _ = labels.shape[0], pred.shape[0] + + # Predictions + if self.opt.single_cls: + pred[:, 5] = 0 + + predn = pred.clone() + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) + + labelsn = None + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred + + return predn, labelsn + + def add_assets_to_artifact(self, artifact, path, asset_path, split): + img_paths = sorted(glob.glob(f"{asset_path}/*")) + label_paths = img2label_paths(img_paths) + + for image_file, label_file in zip(img_paths, label_paths): + image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) + + try: + artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) + except ValueError as e: + logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') + logger.error(f"COMET ERROR: {e}") + continue + + return artifact + + def upload_dataset_artifact(self): + dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") + path = str((ROOT / Path(self.data_dict["path"])).resolve()) + + metadata = self.data_dict.copy() + for key in ["train", "val", "test"]: + split_path = metadata.get(key) + if split_path is not None: + metadata[key] = split_path.replace(path, "") + + artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) + for key in metadata.keys(): + if key in ["train", "val", "test"]: + if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): + continue + + asset_path = self.data_dict.get(key) + if asset_path is not None: + artifact = self.add_assets_to_artifact(artifact, path, asset_path, key) + + self.experiment.log_artifact(artifact) + + return + + def download_dataset_artifact(self, artifact_path): + logged_artifact = self.experiment.get_artifact(artifact_path) + artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name) + logged_artifact.download(artifact_save_dir) + + metadata = logged_artifact.metadata + data_dict = metadata.copy() + data_dict["path"] = artifact_save_dir + + metadata_names = metadata.get("names") + if type(metadata_names) == dict: + data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + elif type(metadata_names) == list: + data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + else: + raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" + + data_dict = self.update_data_paths(data_dict) + return data_dict + + def update_data_paths(self, data_dict): + path = data_dict.get("path", "") + + for split in ["train", "val", "test"]: + if data_dict.get(split): + split_path = data_dict.get(split) + data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ + f"{path}/{x}" for x in split_path]) + + return data_dict + + def on_pretrain_routine_end(self, paths): + if self.opt.resume: + return + + for path in paths: + self.log_asset(str(path)) + + if self.upload_dataset: + if not self.resume: + self.upload_dataset_artifact() + + return + + def on_train_start(self): + self.log_parameters(self.hyp) + + def on_train_epoch_start(self): + return + + def on_train_epoch_end(self, epoch): + self.experiment.curr_epoch = epoch + + return + + def on_train_batch_start(self): + return + + def on_train_batch_end(self, log_dict, step): + self.experiment.curr_step = step + if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0): + self.log_metrics(log_dict, step=step) + + return + + def on_train_end(self, files, save_dir, last, best, epoch, results): + if self.comet_log_predictions: + curr_epoch = self.experiment.curr_epoch + self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) + + for f in files: + self.log_asset(f, metadata={"epoch": epoch}) + self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) + + if not self.opt.evolve: + model_path = str(best if best.exists() else last) + name = Path(model_path).name + if self.save_model: + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + overwrite=True, + ) + + # Check if running Experiment with Comet Optimizer + if hasattr(self.opt, 'comet_optimizer_id'): + metric = results.get(self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_metric_value', metric) + + self.finish_run() + + def on_val_start(self): + return + + def on_val_batch_start(self): + return + + def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): + if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)): + return + + for si, pred in enumerate(outputs): + if len(pred) == 0: + continue + + image = images[si] + labels = targets[targets[:, 0] == si, 1:] + shape = shapes[si] + path = paths[si] + predn, labelsn = self.preprocess_prediction(image, labels, shape, pred) + if labelsn is not None: + self.log_predictions(image, labelsn, path, shape, predn) + + return + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): + if self.comet_log_per_class_metrics: + if self.num_classes > 1: + for i, c in enumerate(ap_class): + class_name = self.class_names[c] + self.experiment.log_metrics( + { + 'mAP@.5': ap50[i], + 'mAP@.5:.95': ap[i], + 'precision': p[i], + 'recall': r[i], + 'f1': f1[i], + 'true_positives': tp[i], + 'false_positives': fp[i], + 'support': nt[c]}, + prefix=class_name) + + if self.comet_log_confusion_matrix: + epoch = self.experiment.curr_epoch + class_names = list(self.class_names.values()) + class_names.append("background") + num_classes = len(class_names) + + self.experiment.log_confusion_matrix( + matrix=confusion_matrix.matrix, + max_categories=num_classes, + labels=class_names, + epoch=epoch, + column_label='Actual Category', + row_label='Predicted Category', + file_name=f"confusion-matrix-epoch-{epoch}.json", + ) + + def on_fit_epoch_end(self, result, epoch): + self.log_metrics(result, epoch=epoch) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + + def on_params_update(self, params): + self.log_parameters(params) + + def finish_run(self): + self.experiment.end() diff --git a/utils/loggers/comet/__pycache__/__init__.cpython-310.pyc b/utils/loggers/comet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edc1369fb4657b6d1d270b7e857c5913373ceb4a Binary files /dev/null and b/utils/loggers/comet/__pycache__/__init__.cpython-310.pyc differ diff --git a/utils/loggers/comet/__pycache__/comet_utils.cpython-310.pyc b/utils/loggers/comet/__pycache__/comet_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf6b492a3accd1122177ef7e54da49b2ef3c93cd Binary files /dev/null and b/utils/loggers/comet/__pycache__/comet_utils.cpython-310.pyc differ diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3cbd45156b576d09024fd11ea9dce83d4a6e5143 --- /dev/null +++ b/utils/loggers/comet/comet_utils.py @@ -0,0 +1,150 @@ +import logging +import os +from urllib.parse import urlparse + +try: + import comet_ml +except (ModuleNotFoundError, ImportError): + comet_ml = None + +import yaml + +logger = logging.getLogger(__name__) + +COMET_PREFIX = "comet://" +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") + + +def download_model_checkpoint(opt, experiment): + model_dir = f"{opt.project}/{experiment.name}" + os.makedirs(model_dir, exist_ok=True) + + model_name = COMET_MODEL_NAME + model_asset_list = experiment.get_model_asset_list(model_name) + + if len(model_asset_list) == 0: + logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") + return + + model_asset_list = sorted( + model_asset_list, + key=lambda x: x["step"], + reverse=True, + ) + logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} + + resource_url = urlparse(opt.weights) + checkpoint_filename = resource_url.query + + if checkpoint_filename: + asset_id = logged_checkpoint_map.get(checkpoint_filename) + else: + asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) + checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME + + if asset_id is None: + logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") + return + + try: + logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") + asset_filename = checkpoint_filename + + model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + model_download_path = f"{model_dir}/{asset_filename}" + with open(model_download_path, "wb") as f: + f.write(model_binary) + + opt.weights = model_download_path + + except Exception as e: + logger.warning("COMET WARNING: Unable to download checkpoint from Comet") + logger.exception(e) + + +def set_opt_parameters(opt, experiment): + """Update the opts Namespace with parameters + from Comet's ExistingExperiment when resuming a run + + Args: + opt (argparse.Namespace): Namespace of command line options + experiment (comet_ml.APIExperiment): Comet API Experiment object + """ + asset_list = experiment.get_asset_list() + resume_string = opt.resume + + for asset in asset_list: + if asset["fileName"] == "opt.yaml": + asset_id = asset["assetId"] + asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + opt_dict = yaml.safe_load(asset_binary) + for key, value in opt_dict.items(): + setattr(opt, key, value) + opt.resume = resume_string + + # Save hyperparameters to YAML file + # Necessary to pass checks in training script + save_dir = f"{opt.project}/{experiment.name}" + os.makedirs(save_dir, exist_ok=True) + + hyp_yaml_path = f"{save_dir}/hyp.yaml" + with open(hyp_yaml_path, "w") as f: + yaml.dump(opt.hyp, f) + opt.hyp = hyp_yaml_path + + +def check_comet_weights(opt): + """Downloads model weights from Comet and updates the + weights path to point to saved weights location + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if weights are successfully downloaded + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.weights, str): + if opt.weights.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.weights) + experiment_path = f"{resource.netloc}{resource.path}" + experiment = api.get(experiment_path) + download_model_checkpoint(opt, experiment) + return True + + return None + + +def check_comet_resume(opt): + """Restores run parameters to its original state based on the model checkpoint + and logged Experiment parameters. + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if the run is restored successfully + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.resume, str): + if opt.resume.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.resume) + experiment_path = f"{resource.netloc}{resource.path}" + experiment = api.get(experiment_path) + set_opt_parameters(opt, experiment) + download_model_checkpoint(opt, experiment) + + return True + + return None diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py new file mode 100644 index 0000000000000000000000000000000000000000..7dd5c92e8de170222b3cd3eae858f4f3cfddaff6 --- /dev/null +++ b/utils/loggers/comet/hpo.py @@ -0,0 +1,118 @@ +import argparse +import json +import logging +import os +import sys +from pathlib import Path + +import comet_ml + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from train import train +from utils.callbacks import Callbacks +from utils.general import increment_path +from utils.torch_utils import select_device + +# Project Configuration +config = comet_ml.config.get_config() +COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") + + +def get_args(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Weights & Biases arguments + parser.add_argument('--entity', default=None, help='W&B: Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + # Comet Arguments + parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") + parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") + parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") + parser.add_argument("--comet_optimizer_workers", + type=int, + default=1, + help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def run(parameters, opt): + hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} + + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.batch_size = parameters.get("batch_size") + opt.epochs = parameters.get("epochs") + + device = select_device(opt.device, batch_size=opt.batch_size) + train(hyp_dict, opt, device, callbacks=Callbacks()) + + +if __name__ == "__main__": + opt = get_args(known=True) + + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.project = str(opt.project) + + optimizer_id = os.getenv("COMET_OPTIMIZER_ID") + if optimizer_id is None: + with open(opt.comet_optimizer_config) as f: + optimizer_config = json.load(f) + optimizer = comet_ml.Optimizer(optimizer_config) + else: + optimizer = comet_ml.Optimizer(optimizer_id) + + opt.comet_optimizer_id = optimizer.id + status = optimizer.status() + + opt.comet_optimizer_objective = status["spec"]["objective"] + opt.comet_optimizer_metric = status["spec"]["metric"] + + logger.info("COMET INFO: Starting Hyperparameter Sweep") + for parameter in optimizer.get_parameters(): + run(parameter["parameters"], opt) diff --git a/utils/loggers/comet/optimizer_config.json b/utils/loggers/comet/optimizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..83ddddab6f2084b4bdf84dca1e61696de200d1b8 --- /dev/null +++ b/utils/loggers/comet/optimizer_config.json @@ -0,0 +1,209 @@ +{ + "algorithm": "random", + "parameters": { + "anchor_t": { + "type": "discrete", + "values": [ + 2, + 8 + ] + }, + "batch_size": { + "type": "discrete", + "values": [ + 16, + 32, + 64 + ] + }, + "box": { + "type": "discrete", + "values": [ + 0.02, + 0.2 + ] + }, + "cls": { + "type": "discrete", + "values": [ + 0.2 + ] + }, + "cls_pw": { + "type": "discrete", + "values": [ + 0.5 + ] + }, + "copy_paste": { + "type": "discrete", + "values": [ + 1 + ] + }, + "degrees": { + "type": "discrete", + "values": [ + 0, + 45 + ] + }, + "epochs": { + "type": "discrete", + "values": [ + 5 + ] + }, + "fl_gamma": { + "type": "discrete", + "values": [ + 0 + ] + }, + "fliplr": { + "type": "discrete", + "values": [ + 0 + ] + }, + "flipud": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_h": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_s": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_v": { + "type": "discrete", + "values": [ + 0 + ] + }, + "iou_t": { + "type": "discrete", + "values": [ + 0.7 + ] + }, + "lr0": { + "type": "discrete", + "values": [ + 1e-05, + 0.1 + ] + }, + "lrf": { + "type": "discrete", + "values": [ + 0.01, + 1 + ] + }, + "mixup": { + "type": "discrete", + "values": [ + 1 + ] + }, + "momentum": { + "type": "discrete", + "values": [ + 0.6 + ] + }, + "mosaic": { + "type": "discrete", + "values": [ + 0 + ] + }, + "obj": { + "type": "discrete", + "values": [ + 0.2 + ] + }, + "obj_pw": { + "type": "discrete", + "values": [ + 0.5 + ] + }, + "optimizer": { + "type": "categorical", + "values": [ + "SGD", + "Adam", + "AdamW" + ] + }, + "perspective": { + "type": "discrete", + "values": [ + 0 + ] + }, + "scale": { + "type": "discrete", + "values": [ + 0 + ] + }, + "shear": { + "type": "discrete", + "values": [ + 0 + ] + }, + "translate": { + "type": "discrete", + "values": [ + 0 + ] + }, + "warmup_bias_lr": { + "type": "discrete", + "values": [ + 0, + 0.2 + ] + }, + "warmup_epochs": { + "type": "discrete", + "values": [ + 5 + ] + }, + "warmup_momentum": { + "type": "discrete", + "values": [ + 0, + 0.95 + ] + }, + "weight_decay": { + "type": "discrete", + "values": [ + 0, + 0.001 + ] + } + }, + "spec": { + "maxCombo": 0, + "metric": "metrics/mAP_0.5", + "objective": "maximize" + }, + "trials": 1 +} diff --git a/utils/loggers/wandb/__init__.py b/utils/loggers/wandb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..84952a8167bc2975913a6def6b4f027d566552a9 --- /dev/null +++ b/utils/loggers/wandb/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/utils/loggers/wandb/__pycache__/__init__.cpython-310.pyc b/utils/loggers/wandb/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e699a72b24b0a5736630b06fb7f7a9bb95621341 Binary files /dev/null and b/utils/loggers/wandb/__pycache__/__init__.cpython-310.pyc differ diff --git a/utils/loggers/wandb/__pycache__/wandb_utils.cpython-310.pyc b/utils/loggers/wandb/__pycache__/wandb_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..123e66daba817993263c2b3bf2763e1f72b904a8 Binary files /dev/null and b/utils/loggers/wandb/__pycache__/wandb_utils.cpython-310.pyc differ diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..06e81fb693072c99703e5c52b169892b7fd9a8cc --- /dev/null +++ b/utils/loggers/wandb/log_dataset.py @@ -0,0 +1,27 @@ +import argparse + +from wandb_utils import WandbLogger + +from utils.general import LOGGER + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused + if not logger.wandb: + LOGGER.info("install wandb using `pip install wandb` to log the dataset") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') + + opt = parser.parse_args() + opt.resume = False # Explicitly disallow resume check for dataset upload job + + create_dataset_artifact(opt) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py new file mode 100644 index 0000000000000000000000000000000000000000..d49ea6f2778b2e87d0f535c2b3595ccceebab459 --- /dev/null +++ b/utils/loggers/wandb/sweep.py @@ -0,0 +1,41 @@ +import sys +from pathlib import Path + +import wandb + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from train import parse_opt, train +from utils.callbacks import Callbacks +from utils.general import increment_path +from utils.torch_utils import select_device + + +def sweep(): + wandb.init() + # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. + hyp_dict = vars(wandb.config).get("_items").copy() + + # Workaround: get necessary opt args + opt = parse_opt(known=True) + opt.batch_size = hyp_dict.get("batch_size") + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.epochs = hyp_dict.get("epochs") + opt.nosave = True + opt.data = hyp_dict.get("data") + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.hyp = str(opt.hyp) + opt.project = str(opt.project) + device = select_device(opt.device, batch_size=opt.batch_size) + + # train + train(hyp_dict, opt, device, callbacks=Callbacks()) + + +if __name__ == "__main__": + sweep() diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..688b1ea0285f42e779d301ba910bf4e9fe50305c --- /dev/null +++ b/utils/loggers/wandb/sweep.yaml @@ -0,0 +1,143 @@ +# Hyperparameters for training +# To set range- +# Provide min and max values as: +# parameter: +# +# min: scalar +# max: scalar +# OR +# +# Set a specific list of search space- +# parameter: +# values: [scalar1, scalar2, scalar3...] +# +# You can use grid, bayesian and hyperopt search strategy +# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration + +program: utils/loggers/wandb/sweep.py +method: random +metric: + name: metrics/mAP_0.5 + goal: maximize + +parameters: + # hyperparameters: set either min, max range or values list + data: + value: "data/coco128.yaml" + batch_size: + values: [64] + epochs: + values: [10] + + lr0: + distribution: uniform + min: 1e-5 + max: 1e-1 + lrf: + distribution: uniform + min: 0.01 + max: 1.0 + momentum: + distribution: uniform + min: 0.6 + max: 0.98 + weight_decay: + distribution: uniform + min: 0.0 + max: 0.001 + warmup_epochs: + distribution: uniform + min: 0.0 + max: 5.0 + warmup_momentum: + distribution: uniform + min: 0.0 + max: 0.95 + warmup_bias_lr: + distribution: uniform + min: 0.0 + max: 0.2 + box: + distribution: uniform + min: 0.02 + max: 0.2 + cls: + distribution: uniform + min: 0.2 + max: 4.0 + cls_pw: + distribution: uniform + min: 0.5 + max: 2.0 + obj: + distribution: uniform + min: 0.2 + max: 4.0 + obj_pw: + distribution: uniform + min: 0.5 + max: 2.0 + iou_t: + distribution: uniform + min: 0.1 + max: 0.7 + anchor_t: + distribution: uniform + min: 2.0 + max: 8.0 + fl_gamma: + distribution: uniform + min: 0.0 + max: 4.0 + hsv_h: + distribution: uniform + min: 0.0 + max: 0.1 + hsv_s: + distribution: uniform + min: 0.0 + max: 0.9 + hsv_v: + distribution: uniform + min: 0.0 + max: 0.9 + degrees: + distribution: uniform + min: 0.0 + max: 45.0 + translate: + distribution: uniform + min: 0.0 + max: 0.9 + scale: + distribution: uniform + min: 0.0 + max: 0.9 + shear: + distribution: uniform + min: 0.0 + max: 10.0 + perspective: + distribution: uniform + min: 0.0 + max: 0.001 + flipud: + distribution: uniform + min: 0.0 + max: 1.0 + fliplr: + distribution: uniform + min: 0.0 + max: 1.0 + mosaic: + distribution: uniform + min: 0.0 + max: 1.0 + mixup: + distribution: uniform + min: 0.0 + max: 1.0 + copy_paste: + distribution: uniform + min: 0.0 + max: 1.0 diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..238f4edbf2a0ddf34c024fbb6775c71dd19e18aa --- /dev/null +++ b/utils/loggers/wandb/wandb_utils.py @@ -0,0 +1,589 @@ +"""Utilities and tools for tracking runs with Weights & Biases.""" + +import logging +import os +import sys +from contextlib import contextmanager +from pathlib import Path +from typing import Dict + +import yaml +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from utils.dataloaders import LoadImagesAndLabels, img2label_paths +from utils.general import LOGGER, check_dataset, check_file + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + wandb = None + +RANK = int(os.getenv('RANK', -1)) +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): + return from_string[len(prefix):] + + +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def check_wandb_dataset(data_file): + is_trainset_wandb_artifact = False + is_valset_wandb_artifact = False + if isinstance(data_file, dict): + # In that case another dataset manager has already processed it and we don't have to + return data_file + if check_file(data_file) and data_file.endswith('.yaml'): + with open(data_file, errors='ignore') as f: + data_dict = yaml.safe_load(f) + is_trainset_wandb_artifact = isinstance(data_dict['train'], + str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) + is_valset_wandb_artifact = isinstance(data_dict['val'], + str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) + if is_trainset_wandb_artifact or is_valset_wandb_artifact: + return data_dict + else: + return check_dataset(data_file) + + +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + entity = run_path.parent.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return entity, project, run_id, model_artifact_name + + +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + if RANK not in [-1, 0]: # For resuming DDP runs + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True + return None + + +def process_wandb_config_ddp_mode(opt): + with open(check_file(opt.data), errors='ignore') as f: + data_dict = yaml.safe_load(f) # data dict + train_dir, val_dir = None, None + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.safe_dump(data_dict, f) + opt.data = ddp_data_path + + +class WandbLogger(): + """Log training runs, datasets, models, and predictions to Weights & Biases. + + This logger sends information to W&B at wandb.ai. By default, this information + includes hyperparameters, system configuration and metrics, model metrics, + and basic data metrics and analyses. + + By providing additional command line arguments to train.py, datasets, + models and predictions can also be logged. + + For more on how this logger is used, see the Weights & Biases documentation: + https://docs.wandb.com/guides/integrations/yolov5 + """ + + def __init__(self, opt, run_id=None, job_type='Training'): + """ + - Initialize WandbLogger instance + - Upload dataset if opt.upload_dataset is True + - Setup training processes if job_type is 'Training' + + arguments: + opt (namespace) -- Commandline arguments for this run + run_id (str) -- Run ID of W&B run to be resumed + job_type (str) -- To set the job_type for this run + + """ + # Temporary-fix + if opt.upload_dataset: + opt.upload_dataset = False + # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") + + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.val_artifact, self.train_artifact = None, None + self.train_artifact_path, self.val_artifact_path = None, None + self.result_artifact = None + self.val_table, self.result_table = None, None + self.bbox_media_panel_images = [] + self.val_table_path_map = None + self.max_imgs_to_log = 16 + self.wandb_artifact_data_dict = None + self.data_dict = None + # It's more elegant to stick to 1 wandb.init call, + # but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, + project=project, + entity=entity, + resume='allow', + allow_val_change=True) + opt.resume = model_artifact_name + elif self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + entity=opt.entity, + name=opt.name if opt.name != 'exp' else None, + job_type=job_type, + id=run_id, + allow_val_change=True) if not wandb.run else wandb.run + if self.wandb_run: + if self.job_type == 'Training': + if opt.upload_dataset: + if not opt.resume: + self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) + + if isinstance(opt.data, dict): + # This means another dataset manager has already processed the dataset info (e.g. ClearML) + # and they will have stored the already processed dict in opt.data + self.data_dict = opt.data + elif opt.resume: + # resume from artifact + if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + self.data_dict = dict(self.wandb_run.config.data_dict) + else: # local resume + self.data_dict = check_wandb_dataset(opt.data) + else: + self.data_dict = check_wandb_dataset(opt.data) + self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict + + # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. + self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) + self.setup_training(opt) + + if self.job_type == 'Dataset Creation': + self.wandb_run.config.update({"upload_dataset": True}) + self.data_dict = self.check_and_upload_dataset(opt) + + def check_and_upload_dataset(self, opt): + """ + Check if the dataset format is compatible and upload it as W&B artifact + + arguments: + opt (namespace)-- Commandline arguments for current run + + returns: + Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. + """ + assert wandb, 'Install wandb to upload dataset' + config_path = self.log_dataset_artifact(opt.data, opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + with open(config_path, errors='ignore') as f: + wandb_data_dict = yaml.safe_load(f) + return wandb_data_dict + + def setup_training(self, opt): + """ + Setup the necessary processes for training YOLO models: + - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX + - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded + - Setup log_dict, initialize bbox_interval + + arguments: + opt (namespace) -- commandline arguments for this run + + """ + self.log_dict, self.current_epoch = {}, 0 + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ + config.hyp, config.imgsz + data_dict = self.data_dict + if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( + data_dict.get('train'), opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( + data_dict.get('val'), opt.artifact_alias) + + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + + if self.val_artifact is not None: + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) + self.val_table = self.val_artifact.get("val") + if self.val_table_path_map is None: + self.map_val_table_path() + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + if opt.evolve or opt.noplots: + self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval + train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None + # Update the the data_dict to point to local artifacts dir + if train_from_artifact: + self.data_dict = data_dict + + def download_dataset_artifact(self, path, alias): + """ + download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX + + arguments: + path -- path of the dataset to be used for training + alias (str)-- alias of the artifact to be download/used for training + + returns: + (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset + is found otherwise returns (None, None) + """ + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + """ + download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX + + arguments: + opt (namespace) -- Commandline arguments for this run + """ + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + # epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + is_finished = total_epochs is None + assert not is_finished, 'training is finished, can only resume incomplete runs.' + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + """ + Log the model checkpoint as W&B artifact + + arguments: + path (Path) -- Path of directory containing the checkpoints + opt (namespace) -- Command line arguments for this run + epoch (int) -- Current epoch number + fitness_score (float) -- fitness score for current epoch + best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. + """ + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', + type='model', + metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score}) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + """ + Log the dataset as W&B artifact and return the new data file with W&B links + + arguments: + data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. + single_class (boolean) -- train multi-class data as single-class + project (str) -- project name. Used to construct the artifact path + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + file with _wandb postfix. Eg -> data_wandb.yaml + + returns: + the new .yaml file with artifact links. it can be used to start training directly from artifacts + """ + upload_dataset = self.wandb_run.config.upload_dataset + log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' + self.data_dict = check_dataset(data_file) # parse and check + data = dict(self.data_dict) + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + + # log train set + if not log_val_only: + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), + names, + name='train') if data.get('train') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + + self.val_artifact = self.create_dataset_table( + LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + + path = Path(data_file) + # create a _wandb.yaml file with artifacts links if both train and test set are logged + if not log_val_only: + path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path + path = ROOT / 'data' / path + data.pop('download', None) + data.pop('path', None) + with open(path, 'w') as f: + yaml.safe_dump(data, f) + LOGGER.info(f"Created dataset config file {path}") + + if self.job_type == 'Training': # builds correct artifact pipeline graph + if not log_val_only: + self.wandb_run.log_artifact( + self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! + self.wandb_run.use_artifact(self.val_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + """ + Map the validation dataset Table like name of file -> it's id in the W&B Table. + Useful for - referencing artifacts for evaluation. + """ + self.val_table_path_map = {} + LOGGER.info("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_path_map[data[3]] = data[0] + + def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): + """ + Create and return W&B artifact containing W&B Table of the dataset. + + arguments: + dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table + class_to_id -- hash map that maps class ids to labels + name -- name of the artifact + + returns: + dataset artifact to be logged or used + """ + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.im_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), name='data/labels/' + + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + box_data, img_classes = [], {} + for cls, *xywh in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({ + "position": { + "middle": [xywh[0], xywh[1]], + "width": xywh[2], + "height": xywh[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls])}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + """ + Build evaluation Table. Uses reference from validation dataset table. + + arguments: + predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + names (dict(int, str)): hash map that maps class ids to labels + """ + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + avg_conf_per_class = [0] * len(self.data_dict['names']) + pred_class_count = {} + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + cls = int(cls) + box_data.append({ + "position": { + "minX": xyxy[0], + "minY": xyxy[1], + "maxX": xyxy[2], + "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": f"{names[cls]} {conf:.3f}", + "scores": { + "class_score": conf}, + "domain": "pixel"}) + avg_conf_per_class[cls] += conf + + if cls in pred_class_count: + pred_class_count[cls] += 1 + else: + pred_class_count[cls] = 1 + + for pred_class in pred_class_count.keys(): + avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] + + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_path_map[Path(path).name] + self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + *avg_conf_per_class) + + def val_one_image(self, pred, predn, path, names, im): + """ + Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel + + arguments: + pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + """ + if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact + self.log_training_progress(predn, path, names) + + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: + if self.current_epoch % self.bbox_interval == 0: + box_data = [{ + "position": { + "minX": xyxy[0], + "minY": xyxy[1], + "maxX": xyxy[2], + "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": f"{names[int(cls)]} {conf:.3f}", + "scores": { + "class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + + def log(self, log_dict): + """ + save the metrics to the logging dictionary + + arguments: + log_dict (Dict) -- metrics/media to be logged in current step + """ + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + """ + commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. + + arguments: + best_result (boolean): Boolean representing if the result of this evaluation is best or not + """ + if self.wandb_run: + with all_logging_disabled(): + if self.bbox_media_panel_images: + self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images + try: + wandb.log(self.log_dict) + except BaseException as e: + LOGGER.info( + f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" + ) + self.wandb_run.finish() + self.wandb_run = None + + self.log_dict = {} + self.bbox_media_panel_images = [] + if self.result_artifact: + self.result_artifact.add(self.result_table, 'result') + wandb.log_artifact(self.result_artifact, + aliases=[ + 'latest', 'last', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + + wandb.log({"evaluation": self.result_table}) + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + """ + Log metrics if any and finish the current W&B run + """ + if self.wandb_run: + if self.log_dict: + with all_logging_disabled(): + wandb.log(self.log_dict) + wandb.run.finish() + + +@contextmanager +def all_logging_disabled(highest_level=logging.CRITICAL): + """ source - https://gist.github.com/simon-weber/7853144 + A context manager that will prevent any logging messages triggered during the body from being processed. + :param highest_level: the maximum logging level in use. + This would only need to be changed if a custom level greater than CRITICAL is defined. + """ + previous_level = logging.root.manager.disable + logging.disable(highest_level) + try: + yield + finally: + logging.disable(previous_level) diff --git a/utils/loss.py b/utils/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..0ec21f8ae7950656084b0529343922ed74d4e4df --- /dev/null +++ b/utils/loss.py @@ -0,0 +1,363 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from utils.metrics import bbox_iou +from utils.torch_utils import de_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super().__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ComputeLoss: + sort_obj_iou = False + + # Compute losses + def __init__(self, model, autobalance=False): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.anchors = m.anchors + self.device = device + + def __call__(self, p, targets): # predictions, targets + bs = p[0].shape[0] # batch size + loss = torch.zeros(3, device=self.device) # [box, obj, cls] losses + tcls, tbox, indices = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros((pi.shape[0], pi.shape[2], pi.shape[3]), dtype=pi.dtype, device=self.device) # tgt obj + + n_labels = b.shape[0] # number of labels + if n_labels: + # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 + pxy, pwh, _, pcls = pi[b, :, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions + + # Regression + # pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + # pwh = (0.0 + (pwh - 1.09861).sigmoid() * 4) * anchors[i] + # pwh = (0.33333 + (pwh - 1.09861).sigmoid() * 2.66667) * anchors[i] + # pwh = (0.25 + (pwh - 1.38629).sigmoid() * 3.75) * anchors[i] + # pwh = (0.20 + (pwh - 1.60944).sigmoid() * 4.8) * anchors[i] + # pwh = (0.16667 + (pwh - 1.79175).sigmoid() * 5.83333) * anchors[i] + pxy = pxy.sigmoid() * 1.6 - 0.3 + pwh = (0.2 + pwh.sigmoid() * 4.8) * self.anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + loss[0] += (1.0 - iou).mean() # box loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, gj, gi, iou = b[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n_labels), tcls[i]] = self.cp + loss[2] += self.BCEcls(pcls, t) # cls loss + + obji = self.BCEobj(pi[:, 4], tobj) + loss[1] += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + loss[0] *= self.hyp['box'] + loss[1] *= self.hyp['obj'] + loss[2] *= self.hyp['cls'] + return loss.sum() * bs, loss.detach() # [box, obj, cls] losses + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + nt = targets.shape[0] # number of anchors, targets + tcls, tbox, indices = [], [], [] + gain = torch.ones(6, device=self.device) # normalized to gridspace gain + + g = 0.3 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + shape = p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / self.anchors[i] # wh ratio + j = torch.max(r, 1 / r).max(1)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh = t.chunk(3, 1) # (image, class), grid xy, grid wh + b, c = bc.long().T # image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, grid_y, grid_x indices + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + tcls.append(c) # class + + return tcls, tbox, indices + + +class ComputeLoss_NEW: + sort_obj_iou = False + + # Compute losses + def __init__(self, model, autobalance=False): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.anchors = m.anchors + self.device = device + self.BCE_base = nn.BCEWithLogitsLoss(reduction='none') + + def __call__(self, p, targets): # predictions, targets + tcls, tbox, indices = self.build_targets(p, targets) # targets + bs = p[0].shape[0] # batch size + n_labels = targets.shape[0] # number of labels + loss = torch.zeros(3, device=self.device) # [box, obj, cls] losses + + # Compute all losses + all_loss = [] + for i, pi in enumerate(p): # layer index, layer predictions + b, gj, gi = indices[i] # image, anchor, gridy, gridx + if n_labels: + pxy, pwh, pobj, pcls = pi[b, :, gj, gi].split((2, 2, 1, self.nc), 2) # target-subset of predictions + + # Regression + pbox = torch.cat((pxy.sigmoid() * 1.6 - 0.3, (0.2 + pwh.sigmoid() * 4.8) * self.anchors[i]), 2) + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(predicted_box, target_box) + obj_target = iou.detach().clamp(0).type(pi.dtype) # objectness targets + + all_loss.append([(1.0 - iou) * self.hyp['box'], + self.BCE_base(pobj.squeeze(), torch.ones_like(obj_target)) * self.hyp['obj'], + self.BCE_base(pcls, F.one_hot(tcls[i], self.nc).float()).mean(2) * self.hyp['cls'], + obj_target, + tbox[i][..., 2] > 0.0]) # valid + + # Lowest 3 losses per label + n_assign = 4 # top n matches + cat_loss = [torch.cat(x, 1) for x in zip(*all_loss)] + ij = torch.zeros_like(cat_loss[0]).bool() # top 3 mask + sum_loss = cat_loss[0] + cat_loss[2] + for col in torch.argsort(sum_loss, dim=1).T[:n_assign]: + # ij[range(n_labels), col] = True + ij[range(n_labels), col] = cat_loss[4][range(n_labels), col] + loss[0] = cat_loss[0][ij].mean() * self.nl # box loss + loss[2] = cat_loss[2][ij].mean() * self.nl # cls loss + + # Obj loss + for i, (h, pi) in enumerate(zip(ij.chunk(self.nl, 1), p)): # layer index, layer predictions + b, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros((pi.shape[0], pi.shape[2], pi.shape[3]), dtype=pi.dtype, device=self.device) # obj + if n_labels: # if any labels + tobj[b[h], gj[h], gi[h]] = all_loss[i][3][h] + loss[1] += self.BCEobj(pi[:, 4], tobj) * (self.balance[i] * self.hyp['obj']) + + return loss.sum() * bs, loss.detach() # [box, obj, cls] losses + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + nt = targets.shape[0] # number of anchors, targets + tcls, tbox, indices = [], [], [] + gain = torch.ones(6, device=self.device) # normalized to gridspace gain + + g = 0.3 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() # offsets + + for i in range(self.nl): + shape = p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # # Matches + r = t[..., 4:6] / self.anchors[i] # wh ratio + a = torch.max(r, 1 / r).max(1)[0] < self.hyp['anchor_t'] # compare + # a = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + # t = t[a] # filter + + # # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) & a + t = t.repeat((5, 1, 1)) + offsets = torch.zeros_like(gxy)[None] + off[:, None] + t[..., 4:6][~j] = 0.0 # move unsuitable targets far away + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh = t.chunk(3, 2) # (image, class), grid xy, grid wh + b, c = bc.long().transpose(0, 2).contiguous() # image, class + gij = (gxy - offsets).long() + gi, gj = gij.transpose(0, 2).contiguous() # grid indices + + # Append + indices.append((b, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, grid_y, grid_x indices + tbox.append(torch.cat((gxy - gij, gwh), 2).permute(1, 0, 2).contiguous()) # box + tcls.append(c) # class + + # # Unique + # n1 = torch.cat((b.view(-1, 1), tbox[i].view(-1, 4)), 1).shape[0] + # n2 = tbox[i].view(-1, 4).unique(dim=0).shape[0] + # print(f'targets-unique {n1}-{n2} diff={n1-n2}') + + return tcls, tbox, indices diff --git a/utils/loss_tal.py b/utils/loss_tal.py new file mode 100644 index 0000000000000000000000000000000000000000..9f20c787b1c4ec801de946fc1cfea95c8adfd27d --- /dev/null +++ b/utils/loss_tal.py @@ -0,0 +1,215 @@ +import os + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from utils.general import xywh2xyxy +from utils.metrics import bbox_iou +from utils.tal.anchor_generator import dist2bbox, make_anchors, bbox2dist +from utils.tal.assigner import TaskAlignedAssigner +from utils.torch_utils import de_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class VarifocalLoss(nn.Module): + # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367 + def __init__(self): + super().__init__() + + def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0): + weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label + with torch.cuda.amp.autocast(enabled=False): + loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), + reduction="none") * weight).sum() + return loss + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = "none" # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == "mean": + return loss.mean() + elif self.reduction == "sum": + return loss.sum() + else: # 'none' + return loss + + +class BboxLoss(nn.Module): + def __init__(self, reg_max, use_dfl=False): + super().__init__() + self.reg_max = reg_max + self.use_dfl = use_dfl + + def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): + # iou loss + bbox_mask = fg_mask.unsqueeze(-1).repeat([1, 1, 4]) # (b, h*w, 4) + pred_bboxes_pos = torch.masked_select(pred_bboxes, bbox_mask).view(-1, 4) + target_bboxes_pos = torch.masked_select(target_bboxes, bbox_mask).view(-1, 4) + bbox_weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1) + + iou = bbox_iou(pred_bboxes_pos, target_bboxes_pos, xywh=False, CIoU=True) + loss_iou = 1.0 - iou + + loss_iou *= bbox_weight + loss_iou = loss_iou.sum() / target_scores_sum + + # dfl loss + if self.use_dfl: + dist_mask = fg_mask.unsqueeze(-1).repeat([1, 1, (self.reg_max + 1) * 4]) + pred_dist_pos = torch.masked_select(pred_dist, dist_mask).view(-1, 4, self.reg_max + 1) + target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max) + target_ltrb_pos = torch.masked_select(target_ltrb, bbox_mask).view(-1, 4) + loss_dfl = self._df_loss(pred_dist_pos, target_ltrb_pos) * bbox_weight + loss_dfl = loss_dfl.sum() / target_scores_sum + else: + loss_dfl = torch.tensor(0.0).to(pred_dist.device) + + return loss_iou, loss_dfl, iou + + def _df_loss(self, pred_dist, target): + target_left = target.to(torch.long) + target_right = target_left + 1 + weight_left = target_right.to(torch.float) - target + weight_right = 1 - weight_left + loss_left = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_left.view(-1), reduction="none").view( + target_left.shape) * weight_left + loss_right = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_right.view(-1), + reduction="none").view(target_left.shape) * weight_right + return (loss_left + loss_right).mean(-1, keepdim=True) + + +class ComputeLoss: + # Compute losses + def __init__(self, model, use_dfl=True): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device), reduction='none') + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets + + # Focal loss + g = h["fl_gamma"] # focal loss gamma + if g > 0: + BCEcls = FocalLoss(BCEcls, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.BCEcls = BCEcls + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.no = m.no + self.reg_max = m.reg_max + self.device = device + + self.assigner = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.proj = torch.arange(m.reg_max).float().to(device) # / 120.0 + self.use_dfl = use_dfl + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, p, targets, img=None, epoch=0): + loss = torch.zeros(3, device=self.device) # box, cls, dfl + feats = p[1] if isinstance(p, tuple) else p + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size, grid_size = pred_scores.shape[:2] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + targets = self.preprocess(targets, batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + target_labels, target_bboxes, target_scores, fg_mask = self.assigner( + pred_scores.detach().sigmoid(), + (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_bboxes /= stride_tensor + target_scores_sum = max(target_scores.sum(), 1) + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[1] = self.BCEcls(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[2], iou = self.bbox_loss(pred_distri, + pred_bboxes, + anchor_points, + target_bboxes, + target_scores, + target_scores_sum, + fg_mask) + + loss[0] *= 7.5 # box gain + loss[1] *= 0.5 # cls gain + loss[2] *= 1.5 # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) diff --git a/utils/loss_tal_dual.py b/utils/loss_tal_dual.py new file mode 100644 index 0000000000000000000000000000000000000000..259e7888d17c70e97efc31db66dfa8a2bd1faef7 --- /dev/null +++ b/utils/loss_tal_dual.py @@ -0,0 +1,385 @@ +import os + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from utils.general import xywh2xyxy +from utils.metrics import bbox_iou +from utils.tal.anchor_generator import dist2bbox, make_anchors, bbox2dist +from utils.tal.assigner import TaskAlignedAssigner +from utils.torch_utils import de_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class VarifocalLoss(nn.Module): + # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367 + def __init__(self): + super().__init__() + + def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0): + weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label + with torch.cuda.amp.autocast(enabled=False): + loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), + reduction="none") * weight).sum() + return loss + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = "none" # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == "mean": + return loss.mean() + elif self.reduction == "sum": + return loss.sum() + else: # 'none' + return loss + + +class BboxLoss(nn.Module): + def __init__(self, reg_max, use_dfl=False): + super().__init__() + self.reg_max = reg_max + self.use_dfl = use_dfl + + def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): + # iou loss + bbox_mask = fg_mask.unsqueeze(-1).repeat([1, 1, 4]) # (b, h*w, 4) + pred_bboxes_pos = torch.masked_select(pred_bboxes, bbox_mask).view(-1, 4) + target_bboxes_pos = torch.masked_select(target_bboxes, bbox_mask).view(-1, 4) + bbox_weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1) + + iou = bbox_iou(pred_bboxes_pos, target_bboxes_pos, xywh=False, CIoU=True) + loss_iou = 1.0 - iou + + loss_iou *= bbox_weight + loss_iou = loss_iou.sum() / target_scores_sum + + # dfl loss + if self.use_dfl: + dist_mask = fg_mask.unsqueeze(-1).repeat([1, 1, (self.reg_max + 1) * 4]) + pred_dist_pos = torch.masked_select(pred_dist, dist_mask).view(-1, 4, self.reg_max + 1) + target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max) + target_ltrb_pos = torch.masked_select(target_ltrb, bbox_mask).view(-1, 4) + loss_dfl = self._df_loss(pred_dist_pos, target_ltrb_pos) * bbox_weight + loss_dfl = loss_dfl.sum() / target_scores_sum + else: + loss_dfl = torch.tensor(0.0).to(pred_dist.device) + + return loss_iou, loss_dfl, iou + + def _df_loss(self, pred_dist, target): + target_left = target.to(torch.long) + target_right = target_left + 1 + weight_left = target_right.to(torch.float) - target + weight_right = 1 - weight_left + loss_left = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_left.view(-1), reduction="none").view( + target_left.shape) * weight_left + loss_right = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_right.view(-1), + reduction="none").view(target_left.shape) * weight_right + return (loss_left + loss_right).mean(-1, keepdim=True) + + +class ComputeLoss: + # Compute losses + def __init__(self, model, use_dfl=True): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device), reduction='none') + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets + + # Focal loss + g = h["fl_gamma"] # focal loss gamma + if g > 0: + BCEcls = FocalLoss(BCEcls, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.BCEcls = BCEcls + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.no = m.no + self.reg_max = m.reg_max + self.device = device + + self.assigner = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.assigner2 = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.bbox_loss2 = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.proj = torch.arange(m.reg_max).float().to(device) # / 120.0 + self.use_dfl = use_dfl + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, p, targets, img=None, epoch=0): + loss = torch.zeros(3, device=self.device) # box, cls, dfl + feats = p[1][0] if isinstance(p, tuple) else p[0] + feats2 = p[1][1] if isinstance(p, tuple) else p[1] + + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + + pred_distri2, pred_scores2 = torch.cat([xi.view(feats2[0].shape[0], self.no, -1) for xi in feats2], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores2 = pred_scores2.permute(0, 2, 1).contiguous() + pred_distri2 = pred_distri2.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size, grid_size = pred_scores.shape[:2] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + targets = self.preprocess(targets, batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + pred_bboxes2 = self.bbox_decode(anchor_points, pred_distri2) # xyxy, (b, h*w, 4) + + target_labels, target_bboxes, target_scores, fg_mask = self.assigner( + pred_scores.detach().sigmoid(), + (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + target_labels2, target_bboxes2, target_scores2, fg_mask2 = self.assigner2( + pred_scores2.detach().sigmoid(), + (pred_bboxes2.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_bboxes /= stride_tensor + target_scores_sum = max(target_scores.sum(), 1) + target_bboxes2 /= stride_tensor + target_scores_sum2 = max(target_scores2.sum(), 1) + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[1] = self.BCEcls(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + loss[1] *= 0.25 + loss[1] += self.BCEcls(pred_scores2, target_scores2.to(dtype)).sum() / target_scores_sum2 # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[2], iou = self.bbox_loss(pred_distri, + pred_bboxes, + anchor_points, + target_bboxes, + target_scores, + target_scores_sum, + fg_mask) + loss[0] *= 0.25 + loss[2] *= 0.25 + if fg_mask2.sum(): + loss0_, loss2_, iou2 = self.bbox_loss2(pred_distri2, + pred_bboxes2, + anchor_points, + target_bboxes2, + target_scores2, + target_scores_sum2, + fg_mask2) + loss[0] += loss0_ + loss[2] += loss2_ + + loss[0] *= 7.5 # box gain + loss[1] *= 0.5 # cls gain + loss[2] *= 1.5 # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + +class ComputeLossLH: + # Compute losses + def __init__(self, model, use_dfl=True): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device), reduction='none') + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets + + # Focal loss + g = h["fl_gamma"] # focal loss gamma + if g > 0: + BCEcls = FocalLoss(BCEcls, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.BCEcls = BCEcls + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.no = m.no + self.reg_max = m.reg_max + self.device = device + + self.assigner = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.proj = torch.arange(m.reg_max).float().to(device) # / 120.0 + self.use_dfl = use_dfl + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, p, targets, img=None, epoch=0): + loss = torch.zeros(3, device=self.device) # box, cls, dfl + feats = p[1][0] if isinstance(p, tuple) else p[0] + feats2 = p[1][1] if isinstance(p, tuple) else p[1] + + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + + pred_distri2, pred_scores2 = torch.cat([xi.view(feats2[0].shape[0], self.no, -1) for xi in feats2], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores2 = pred_scores2.permute(0, 2, 1).contiguous() + pred_distri2 = pred_distri2.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size, grid_size = pred_scores.shape[:2] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + targets = self.preprocess(targets, batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + pred_bboxes2 = self.bbox_decode(anchor_points, pred_distri2) # xyxy, (b, h*w, 4) + + target_labels, target_bboxes, target_scores, fg_mask = self.assigner( + pred_scores2.detach().sigmoid(), + (pred_bboxes2.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_bboxes /= stride_tensor + target_scores_sum = target_scores.sum() + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[1] = self.BCEcls(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + loss[1] *= 0.25 + loss[1] += self.BCEcls(pred_scores2, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[2], iou = self.bbox_loss(pred_distri, + pred_bboxes, + anchor_points, + target_bboxes, + target_scores, + target_scores_sum, + fg_mask) + loss[0] *= 0.25 + loss[2] *= 0.25 + if fg_mask.sum(): + loss0_, loss2_, iou2 = self.bbox_loss(pred_distri2, + pred_bboxes2, + anchor_points, + target_bboxes, + target_scores, + target_scores_sum, + fg_mask) + loss[0] += loss0_ + loss[2] += loss2_ + + loss[0] *= 7.5 # box gain + loss[1] *= 0.5 # cls gain + loss[2] *= 1.5 # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) diff --git a/utils/loss_tal_triple.py b/utils/loss_tal_triple.py new file mode 100644 index 0000000000000000000000000000000000000000..1ed821983e0a959dfff70575adacb2aed8ae6a5c --- /dev/null +++ b/utils/loss_tal_triple.py @@ -0,0 +1,282 @@ +import os + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from utils.general import xywh2xyxy +from utils.metrics import bbox_iou +from utils.tal.anchor_generator import dist2bbox, make_anchors, bbox2dist +from utils.tal.assigner import TaskAlignedAssigner +from utils.torch_utils import de_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class VarifocalLoss(nn.Module): + # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367 + def __init__(self): + super().__init__() + + def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0): + weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label + with torch.cuda.amp.autocast(enabled=False): + loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), + reduction="none") * weight).sum() + return loss + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = "none" # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == "mean": + return loss.mean() + elif self.reduction == "sum": + return loss.sum() + else: # 'none' + return loss + + +class BboxLoss(nn.Module): + def __init__(self, reg_max, use_dfl=False): + super().__init__() + self.reg_max = reg_max + self.use_dfl = use_dfl + + def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): + # iou loss + bbox_mask = fg_mask.unsqueeze(-1).repeat([1, 1, 4]) # (b, h*w, 4) + pred_bboxes_pos = torch.masked_select(pred_bboxes, bbox_mask).view(-1, 4) + target_bboxes_pos = torch.masked_select(target_bboxes, bbox_mask).view(-1, 4) + bbox_weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1) + + iou = bbox_iou(pred_bboxes_pos, target_bboxes_pos, xywh=False, CIoU=True) + loss_iou = 1.0 - iou + + loss_iou *= bbox_weight + loss_iou = loss_iou.sum() / target_scores_sum + + # dfl loss + if self.use_dfl: + dist_mask = fg_mask.unsqueeze(-1).repeat([1, 1, (self.reg_max + 1) * 4]) + pred_dist_pos = torch.masked_select(pred_dist, dist_mask).view(-1, 4, self.reg_max + 1) + target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max) + target_ltrb_pos = torch.masked_select(target_ltrb, bbox_mask).view(-1, 4) + loss_dfl = self._df_loss(pred_dist_pos, target_ltrb_pos) * bbox_weight + loss_dfl = loss_dfl.sum() / target_scores_sum + else: + loss_dfl = torch.tensor(0.0).to(pred_dist.device) + + return loss_iou, loss_dfl, iou + + def _df_loss(self, pred_dist, target): + target_left = target.to(torch.long) + target_right = target_left + 1 + weight_left = target_right.to(torch.float) - target + weight_right = 1 - weight_left + loss_left = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_left.view(-1), reduction="none").view( + target_left.shape) * weight_left + loss_right = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_right.view(-1), + reduction="none").view(target_left.shape) * weight_right + return (loss_left + loss_right).mean(-1, keepdim=True) + + +class ComputeLoss: + # Compute losses + def __init__(self, model, use_dfl=True): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device), reduction='none') + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets + + # Focal loss + g = h["fl_gamma"] # focal loss gamma + if g > 0: + BCEcls = FocalLoss(BCEcls, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.BCEcls = BCEcls + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.no = m.no + self.reg_max = m.reg_max + self.device = device + + self.assigner = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.assigner2 = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.assigner3 = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.bbox_loss2 = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.bbox_loss3 = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.proj = torch.arange(m.reg_max).float().to(device) # / 120.0 + self.use_dfl = use_dfl + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, p, targets, img=None, epoch=0): + loss = torch.zeros(3, device=self.device) # box, cls, dfl + feats = p[1][0] if isinstance(p, tuple) else p[0] + feats2 = p[1][1] if isinstance(p, tuple) else p[1] + feats3 = p[1][2] if isinstance(p, tuple) else p[2] + + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + + pred_distri2, pred_scores2 = torch.cat([xi.view(feats2[0].shape[0], self.no, -1) for xi in feats2], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores2 = pred_scores2.permute(0, 2, 1).contiguous() + pred_distri2 = pred_distri2.permute(0, 2, 1).contiguous() + + pred_distri3, pred_scores3 = torch.cat([xi.view(feats3[0].shape[0], self.no, -1) for xi in feats3], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores3 = pred_scores3.permute(0, 2, 1).contiguous() + pred_distri3 = pred_distri3.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size, grid_size = pred_scores.shape[:2] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + targets = self.preprocess(targets, batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + pred_bboxes2 = self.bbox_decode(anchor_points, pred_distri2) # xyxy, (b, h*w, 4) + pred_bboxes3 = self.bbox_decode(anchor_points, pred_distri3) # xyxy, (b, h*w, 4) + + target_labels, target_bboxes, target_scores, fg_mask = self.assigner( + pred_scores.detach().sigmoid(), + (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + target_labels2, target_bboxes2, target_scores2, fg_mask2 = self.assigner2( + pred_scores2.detach().sigmoid(), + (pred_bboxes2.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + target_labels3, target_bboxes3, target_scores3, fg_mask3 = self.assigner3( + pred_scores3.detach().sigmoid(), + (pred_bboxes3.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_bboxes /= stride_tensor + target_scores_sum = max(target_scores.sum(), 1) + target_bboxes2 /= stride_tensor + target_scores_sum2 = max(target_scores2.sum(), 1) + target_bboxes3 /= stride_tensor + target_scores_sum3 = max(target_scores3.sum(), 1) + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[1] = 0.25 * self.BCEcls(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + loss[1] += 0.25 * self.BCEcls(pred_scores2, target_scores2.to(dtype)).sum() / target_scores_sum2 # BCE + loss[1] += self.BCEcls(pred_scores3, target_scores3.to(dtype)).sum() / target_scores_sum3 # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[2], iou = self.bbox_loss(pred_distri, + pred_bboxes, + anchor_points, + target_bboxes, + target_scores, + target_scores_sum, + fg_mask) + loss[0] *= 0.25 + loss[2] *= 0.25 + if fg_mask2.sum(): + loss0_, loss2_, iou2 = self.bbox_loss2(pred_distri2, + pred_bboxes2, + anchor_points, + target_bboxes2, + target_scores2, + target_scores_sum2, + fg_mask2) + loss[0] += 0.25 * loss0_ + loss[2] += 0.25 * loss2_ + if fg_mask3.sum(): + loss0__, loss2__, iou3 = self.bbox_loss3(pred_distri3, + pred_bboxes3, + anchor_points, + target_bboxes3, + target_scores3, + target_scores_sum3, + fg_mask3) + loss[0] += loss0__ + loss[2] += loss2__ + + loss[0] *= 7.5 # box gain + loss[1] *= 0.5 # cls gain + loss[2] *= 1.5 # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) diff --git a/utils/metrics.py b/utils/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..1229f2b10d9754ec82c71881ddc54e8b0313161a --- /dev/null +++ b/utils/metrics.py @@ -0,0 +1,397 @@ +import math +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from utils import TryExcept, threaded + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def smooth(y, f=0.05): + # Box filter of fraction f + nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) + p = np.ones(nf // 2) # ones padding + yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded + return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + if n_p == 0 or n_l == 0: + continue + + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = dict(enumerate(names)) # to dict + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') + + i = smooth(f1.mean(0), 0.1).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype(int) + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + if detections is None: + gt_classes = labels.int() + for gc in gt_classes: + self.matrix[self.nc, gc] += 1 # background FN + return + + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(int) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # true background + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # predicted background + + def matrix(self): + return self.matrix + + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') + def plot(self, normalize=True, save_dir='', names=()): + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + ticklabels = (names + ['background']) if labels else "auto" + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, + ax=ax, + annot=nc < 30, + annot_kws={ + "size": 8}, + cmap='Blues', + fmt='.2f', + square=True, + vmin=0.0, + xticklabels=ticklabels, + yticklabels=ticklabels).set_facecolor((1, 1, 1)) + ax.set_ylabel('True') + ax.set_ylabel('Predicted') + ax.set_title('Confusion Matrix') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close(fig) + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +class WIoU_Scale: + ''' monotonous: { + None: origin v1 + True: monotonic FM v2 + False: non-monotonic FM v3 + } + momentum: The momentum of running mean''' + + iou_mean = 1. + monotonous = False + _momentum = 1 - 0.5 ** (1 / 7000) + _is_train = True + + def __init__(self, iou): + self.iou = iou + self._update(self) + + @classmethod + def _update(cls, self): + if cls._is_train: cls.iou_mean = (1 - cls._momentum) * cls.iou_mean + \ + cls._momentum * self.iou.detach().mean().item() + + @classmethod + def _scaled_loss(cls, self, gamma=1.9, delta=3): + if isinstance(self.monotonous, bool): + if self.monotonous: + return (self.iou.detach() / self.iou_mean).sqrt() + else: + beta = self.iou.detach() / self.iou_mean + alpha = delta * torch.pow(gamma, beta - delta) + return beta / alpha + return 1 + + +def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, MDPIoU=False, feat_h=640, feat_w=640, eps=1e-7): + # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) + + # Get the coordinates of bounding boxes + if xywh: # transform from xywh to xyxy + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) + w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 + b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ + b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ + else: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + union = w1 * h1 + w2 * h2 - inter + eps + + # IoU + iou = inter / union + if CIoU or DIoU or GIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + elif MDPIoU: + d1 = (b2_x1 - b1_x1) ** 2 + (b2_y1 - b1_y1) ** 2 + d2 = (b2_x2 - b1_x2) ** 2 + (b2_y2 - b1_y2) ** 2 + mpdiou_hw_pow = feat_h ** 2 + feat_w ** 2 + return iou - d1 / mpdiou_hw_pow - d2 / mpdiou_hw_pow # MPDIoU + return iou # IoU + + +def box_iou(box1, box2, eps=1e-7): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) + inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) + + # IoU = inter / (area1 + area2 - inter) + return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) + + +def bbox_ioa(box1, box2, eps=1e-7): + """Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(nx4) + box2: np.array of shape(mx4) + returns: np.array of shape(nxm) + """ + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1.T + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T + + # Intersection area + inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \ + (np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def wh_iou(wh1, wh2, eps=1e-7): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + + +@threaded +def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title('Precision-Recall Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) + + +@threaded +def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = smooth(py.mean(0), 0.05) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title(f'{ylabel}-Confidence Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) diff --git a/utils/panoptic/__init__.py b/utils/panoptic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..84952a8167bc2975913a6def6b4f027d566552a9 --- /dev/null +++ b/utils/panoptic/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/utils/panoptic/augmentations.py b/utils/panoptic/augmentations.py new file mode 100644 index 0000000000000000000000000000000000000000..8e0a95cb840a4101bde8daa8248cddcbd10ac7c3 --- /dev/null +++ b/utils/panoptic/augmentations.py @@ -0,0 +1,183 @@ +import math +import random + +import cv2 +import numpy as np + +from ..augmentations import box_candidates +from ..general import resample_segments, segment2box +from ..metrics import bbox_ioa + + +def mixup(im, labels, segments, seg_cls, semantic_masks, im2, labels2, segments2, seg_cls2, semantic_masks2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + segments = np.concatenate((segments, segments2), 0) + seg_cls = np.concatenate((seg_cls, seg_cls2), 0) + semantic_masks = np.concatenate((semantic_masks, semantic_masks2), 0) + return im, labels, segments, seg_cls, semantic_masks + + +def random_perspective(im, + targets=(), + segments=(), + semantic_masks = (), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) + T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + new_segments = [] + new_semantic_masks = [] + if n: + new = np.zeros((n, 4)) + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + new_segments.append(xy) + + semantic_masks = resample_segments(semantic_masks) + for i, semantic_mask in enumerate(semantic_masks): + #if i < n: + # xy = np.ones((len(segments[i]), 3)) + # xy[:, :2] = segments[i] + # xy = xy @ M.T # transform + # xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine + + # new[i] = segment2box(xy, width, height) + # new_segments.append(xy) + + xy_s = np.ones((len(semantic_mask), 3)) + xy_s[:, :2] = semantic_mask + xy_s = xy_s @ M.T # transform + xy_s = (xy_s[:, :2] / xy_s[:, 2:3] if perspective else xy_s[:, :2]) # perspective rescale or affine + + new_semantic_masks.append(xy_s) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) + targets = targets[i] + targets[:, 1:5] = new[i] + new_segments = np.array(new_segments)[i] + new_semantic_masks = np.array(new_semantic_masks) + + return im, targets, new_segments, new_semantic_masks + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def copy_paste(im, labels, segments, seg_cls, semantic_masks, p=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if p and n: + h, w, _ = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + + # calculate ioa first then select indexes randomly + boxes = np.stack([w - labels[:, 3], labels[:, 2], w - labels[:, 1], labels[:, 4]], axis=-1) # (n, 4) + ioa = bbox_ioa(boxes, labels[:, 1:5]) # intersection over area + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + for j in random.sample(list(indexes), k=round(p * n)): + l, box, s = labels[j], boxes[j], segments[j] + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + seg_cls.append(l[0].astype(int)) + semantic_masks.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments, seg_cls, semantic_masks \ No newline at end of file diff --git a/utils/panoptic/dataloaders.py b/utils/panoptic/dataloaders.py new file mode 100644 index 0000000000000000000000000000000000000000..6f2b1d72bb05c02c482a0eac52d3aae4d698b57f --- /dev/null +++ b/utils/panoptic/dataloaders.py @@ -0,0 +1,478 @@ +import os +import random + +import pickle +from pathlib import Path + +from itertools import repeat +from multiprocessing.pool import Pool, ThreadPool + +import cv2 +import numpy as np +import torch +from torch.utils.data import DataLoader, distributed +from tqdm import tqdm + +from ..augmentations import augment_hsv +from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker, get_hash, verify_image_label, HELP_URL, TQDM_BAR_FORMAT, LOCAL_RANK +from ..general import NUM_THREADS, LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn +from ..torch_utils import torch_distributed_zero_first +from ..coco_utils import annToMask, getCocoIds +from .augmentations import mixup, random_perspective, copy_paste, letterbox + +RANK = int(os.getenv('RANK', -1)) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + close_mosaic=False, + quad=False, + prefix='', + shuffle=False, + mask_downsample_ratio=1, + overlap_mask=False): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabelsAndMasks( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix, + downsample_ratio=mask_downsample_ratio, + overlap=overlap_mask) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + #loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + loader = DataLoader if image_weights or close_mosaic else InfiniteDataLoader + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return loader( + dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, + worker_init_fn=seed_worker, + generator=generator, + ), dataset + +def img2stuff_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}stuff{os.sep}' # /images/, /segmentations/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing + + def __init__( + self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0, + min_items=0, + prefix="", + downsample_ratio=1, + overlap=False, + ): + super().__init__( + path, + img_size, + batch_size, + augment, + hyp, + rect, + image_weights, + cache_images, + single_cls, + stride, + pad, + min_items, + prefix) + self.downsample_ratio = downsample_ratio + self.overlap = overlap + + # semantic segmentation + self.coco_ids = getCocoIds() + + # Check cache + self.seg_files = img2stuff_paths(self.im_files) # labels + p = Path(path) + cache_path = (p.with_suffix('') if p.is_file() else Path(self.seg_files[0]).parent) + cache_path = Path(str(cache_path) + '_stuff').with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle = True).item(), True # load dict + #assert cache['version'] == self.cache_version # matches current version + #assert cache['hash'] == get_hash(self.seg_files + self.im_files) # identical hash + except Exception: + cache, exists = self.cache_seg_labels(cache_path, prefix), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in {-1, 0}: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" + tqdm(None, desc = (prefix + d), total = n, initial = n, bar_format = TQDM_BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert (0 < nf) or (not augment), f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + seg_labels, _, self.semantic_masks = zip(*cache.values()) + nl = len(np.concatenate(seg_labels, 0)) # number of labels + assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' + + # Update labels + self.seg_cls = [] + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, semantic_masks) in enumerate(zip(seg_labels, self.semantic_masks)): + self.seg_cls.append((label[:, 0].astype(int)).tolist()) + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + if semantic_masks: + self.semantic_masks[i] = semantic_masks[j] + if single_cls: # single-class training, merge all classes into 0 + if semantic_masks: + self.semantic_masks[i][:, 0] = 0 + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + masks = [] + if mosaic: + # Load mosaic + img, labels, segments, seg_cls, semantic_masks = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp["mixup"]: + img, labels, segments, seg_cls, semantic_masks = mixup(img, labels, segments, seg_cls, semantic_masks, + *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy + segments = self.segments[index].copy() + if len(segments): + for i_s in range(len(segments)): + segments[i_s] = xyn2xy( + segments[i_s], + ratio[0] * w, + ratio[1] * h, + padw=pad[0], + padh=pad[1], + ) + + seg_cls = self.seg_cls[index].copy() + semantic_masks = self.semantic_masks[index].copy() + #semantic_masks = [xyn2xy(x, ratio[0] * w, ratio[1] * h, padw = pad[0], padh = pad[1]) for x in semantic_masks] + if len(semantic_masks): + for ss in range(len(semantic_masks)): + semantic_masks[ss] = xyn2xy( + semantic_masks[ss], + ratio[0] * w, + ratio[1] * h, + padw = pad[0], + padh = pad[1], + ) + + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels, segments, semantic_masks = random_perspective( + img, + labels, + segments=segments, + semantic_masks = semantic_masks, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"]) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) + if self.overlap: + masks, sorted_idx = polygons2masks_overlap(img.shape[:2], + segments, + downsample_ratio=self.downsample_ratio) + masks = masks[None] # (640, 640) -> (1, 640, 640) + labels = labels[sorted_idx] + else: + masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) + + masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // + self.downsample_ratio, img.shape[1] // + self.downsample_ratio)) + semantic_masks = polygons2masks(img.shape[:2], semantic_masks, color = 1, downsample_ratio=self.downsample_ratio) + #semantic_masks = polygons2masks(img.shape[:2], semantic_masks, color = 1, downsample_ratio=1) + semantic_masks = torch.from_numpy(semantic_masks) + # TODO: albumentations support + if self.augment: + # Albumentations + # there are some augmentation that won't change boxes and masks, + # so just be it for now. + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + ns = len(semantic_masks) + + # HSV color-space + augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + + # Flip up-down + if random.random() < hyp["flipud"]: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + masks = torch.flip(masks, dims=[1]) + if ns: + semantic_masks = torch.flip(semantic_masks, dims = [1]) + + # Flip left-right + if random.random() < hyp["fliplr"]: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + masks = torch.flip(masks, dims=[2]) + if ns: + semantic_masks = torch.flip(semantic_masks, dims = [2]) + + # Cutouts # labels = cutout(img, labels, p=0.5) + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Combine semantic masks + semantic_seg_masks = torch.zeros((len(self.coco_ids), img.shape[0] // self.downsample_ratio, + img.shape[1] // self.downsample_ratio), dtype = torch.uint8) + #semantic_seg_masks = torch.zeros((len(self.coco_ids), img.shape[0], img.shape[1]), dtype = torch.uint8) + for cls_id, semantic_mask in zip(seg_cls, semantic_masks): + semantic_seg_masks[cls_id] = (semantic_seg_masks[cls_id].logical_or(semantic_mask)).int() + + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks, semantic_seg_masks) + + def load_mosaic(self, index): + # YOLO 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4, seg_cls, semantic_masks4 = [], [], [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + + # 3 additional image indices + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels, segments, semantic_masks = self.labels[index].copy(), self.segments[index].copy(), self.semantic_masks[index].copy() + + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + semantic_masks = [xyn2xy(x, w, h, padw, padh) for x in semantic_masks] + labels4.append(labels) + segments4.extend(segments) + seg_cls.extend(self.seg_cls[index].copy()) + semantic_masks4.extend(semantic_masks) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for i in range(len(semantic_masks4)): + if i < len(segments4): + np.clip(labels4[:, 1:][i], 0, 2 * s, out = labels4[:, 1:][i]) + np.clip(segments4[i], 0, 2 * s, out = segments4[i]) + np.clip(semantic_masks4[i], 0, 2 * s, out = semantic_masks4[i]) + # img4, labels4 = replicate(img4, labels4) # replicate + + # 3 additional image indices + # Augment + img4, labels4, segments4, seg_cls, semantic_masks4 = copy_paste(img4, labels4, segments4, seg_cls, semantic_masks4, p=self.hyp["copy_paste"]) + img4, labels4, segments4, semantic_masks4 = random_perspective(img4, + labels4, + segments4, + semantic_masks4, + degrees=self.hyp["degrees"], + translate=self.hyp["translate"], + scale=self.hyp["scale"], + shear=self.hyp["shear"], + perspective=self.hyp["perspective"], + border=self.mosaic_border) # border to remove + + return img4, labels4, segments4, seg_cls, semantic_masks4 + + def cache_seg_labels(self, path = Path('./labels_stuff.cache'), prefix = ''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + with Pool(NUM_THREADS) as pool: + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.seg_files, repeat(prefix))), + desc = desc, + total = len(self.im_files), + bar_format = TQDM_BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" + + pbar.close() + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. {HELP_URL}') + x['hash'] = get_hash(self.seg_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + try: + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + except Exception as e: + LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable + return x + + @staticmethod + def collate_fn(batch): + img, label, path, shapes, masks, semantic_masks = zip(*batch) # transposed + batched_masks = torch.cat(masks, 0) + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks, torch.stack(semantic_masks, 0) + + + +def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (np.ndarray): [N, M], N is the number of polygons, + M is the number of points(Be divided by 2). + """ + mask = np.zeros(img_size, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) + # NOTE: fillPoly firstly then resize is trying the keep the same way + # of loss calculation when mask-ratio=1. + mask = cv2.resize(mask, (nw, nh)) + return mask + + +def polygons2masks(img_size, polygons, color, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], + N is the number of polygons, + M is the number of points(Be divided by 2). + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) + + +def polygons2masks_overlap(img_size, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask( + img_size, + [segments[si].reshape(-1)], + downsample_ratio=downsample_ratio, + color=1, + ) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index diff --git a/utils/panoptic/general.py b/utils/panoptic/general.py new file mode 100644 index 0000000000000000000000000000000000000000..b526333dc5a1b8625d7e6a51ee6ba41818c62adb --- /dev/null +++ b/utils/panoptic/general.py @@ -0,0 +1,137 @@ +import cv2 +import numpy as np +import torch +import torch.nn.functional as F + + +def crop_mask(masks, boxes): + """ + "Crop" predicted masks by zeroing out everything not in the predicted bbox. + Vectorized by Chong (thanks Chong). + + Args: + - masks should be a size [h, w, n] tensor of masks + - boxes should be a size [n, 4] tensor of bbox coords in relative point form + """ + + n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) + r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) + c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) + + +def process_mask_upsample(protos, masks_in, bboxes, shape): + """ + Crop after upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def process_mask(protos, masks_in, bboxes, shape, upsample=False): + """ + Crop before upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + ih, iw = shape + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + + downsampled_bboxes = bboxes.clone() + downsampled_bboxes[:, 0] *= mw / iw + downsampled_bboxes[:, 2] *= mw / iw + downsampled_bboxes[:, 3] *= mh / ih + downsampled_bboxes[:, 1] *= mh / ih + + masks = crop_mask(masks, downsampled_bboxes) # CHW + if upsample: + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + return masks.gt_(0.5) + + +def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): + """ + img1_shape: model input shape, [h, w] + img0_shape: origin pic shape, [h, w, 3] + masks: [h, w, num] + """ + # Rescale coordinates (xyxy) from im1_shape to im0_shape + if ratio_pad is None: # calculate from im0_shape + gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new + pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding + else: + pad = ratio_pad[1] + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) + + if len(masks.shape) < 2: + raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') + masks = masks[top:bottom, left:right] + # masks = masks.permute(2, 0, 1).contiguous() + # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] + # masks = masks.permute(1, 2, 0).contiguous() + masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) + + if len(masks.shape) == 2: + masks = masks[:, :, None] + return masks + + +def mask_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [M, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, [N, M] + """ + intersection = torch.matmul(mask1, mask2.t()).clamp(0) + union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [N, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, (N, ) + """ + intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) + union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks2segments(masks, strategy='largest'): + # Convert masks(n,160,160) into segments(n,xy) + segments = [] + for x in masks.int().cpu().numpy().astype('uint8'): + c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] + if c: + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + else: + c = np.zeros((0, 2)) # no segments found + segments.append(c.astype('float32')) + return segments diff --git a/utils/panoptic/loss.py b/utils/panoptic/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..b45b2c27e0a05c275cbc50064288aece3ae3e856 --- /dev/null +++ b/utils/panoptic/loss.py @@ -0,0 +1,186 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..general import xywh2xyxy +from ..loss import FocalLoss, smooth_BCE +from ..metrics import bbox_iou +from ..torch_utils import de_parallel +from .general import crop_mask + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False, overlap=False): + self.sort_obj_iou = False + self.overlap = overlap + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + self.device = device + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.nm = m.nm # number of masks + self.anchors = m.anchors + self.device = device + + def __call__(self, preds, targets, masks): # predictions, targets, model + p, proto = preds + bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width + lcls = torch.zeros(1, device=self.device) + lbox = torch.zeros(1, device=self.device) + lobj = torch.zeros(1, device=self.device) + lseg = torch.zeros(1, device=self.device) + tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions + + # Box regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Mask regression + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized + mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) + for bi in b.unique(): + j = b == bi # matching index + if self.overlap: + mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) + else: + mask_gti = masks[tidxs[i]][j] + lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp["box"] + lobj *= self.hyp["obj"] + lcls *= self.hyp["cls"] + lseg *= self.hyp["box"] / bs + + loss = lbox + lobj + lcls + lseg + return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] + gain = torch.ones(8, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + if self.overlap: + batch = p[0].shape[0] + ti = [] + for i in range(batch): + num = (targets[:, 0] == i).sum() # find number of targets of each image + ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) + ti = torch.cat(ti, 1) # (na, nt) + else: + ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + tidxs.append(tidx) + xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized + + return tcls, tbox, indices, anch, tidxs, xywhn diff --git a/utils/panoptic/loss_tal.py b/utils/panoptic/loss_tal.py new file mode 100644 index 0000000000000000000000000000000000000000..d8594395d4beada966859d578c6dc5476f948034 --- /dev/null +++ b/utils/panoptic/loss_tal.py @@ -0,0 +1,285 @@ +import os + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torchvision.ops import sigmoid_focal_loss + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import bbox_iou +from utils.panoptic.tal.anchor_generator import dist2bbox, make_anchors, bbox2dist +from utils.panoptic.tal.assigner import TaskAlignedAssigner +from utils.torch_utils import de_parallel +from utils.panoptic.general import crop_mask + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class VarifocalLoss(nn.Module): + # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367 + def __init__(self): + super().__init__() + + def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0): + weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label + with torch.cuda.amp.autocast(enabled=False): + loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), + reduction="none") * weight).sum() + return loss + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = "none" # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == "mean": + return loss.mean() + elif self.reduction == "sum": + return loss.sum() + else: # 'none' + return loss + + +class BboxLoss(nn.Module): + def __init__(self, reg_max, use_dfl=False): + super().__init__() + self.reg_max = reg_max + self.use_dfl = use_dfl + + def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): + # iou loss + bbox_mask = fg_mask.unsqueeze(-1).repeat([1, 1, 4]) # (b, h*w, 4) + pred_bboxes_pos = torch.masked_select(pred_bboxes, bbox_mask).view(-1, 4) + target_bboxes_pos = torch.masked_select(target_bboxes, bbox_mask).view(-1, 4) + bbox_weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1) + + iou = bbox_iou(pred_bboxes_pos, target_bboxes_pos, xywh=False, CIoU=True) + loss_iou = 1.0 - iou + + #### wiou + #iou = bbox_iou(pred_bboxes_pos, target_bboxes_pos, xywh=False, WIoU=True, scale=True) + #if type(iou) is tuple: + # if len(iou) == 2: + # loss_iou = (iou[1].detach() * (1 - iou[0])) + # iou = iou[0] + # else: + # loss_iou = (iou[0] * iou[1]) + # iou = iou[-1] + #else: + # loss_iou = (1.0 - iou) # iou loss + + loss_iou *= bbox_weight + loss_iou = loss_iou.sum() / target_scores_sum + # loss_iou = loss_iou.mean() + + # dfl loss + if self.use_dfl: + dist_mask = fg_mask.unsqueeze(-1).repeat([1, 1, (self.reg_max + 1) * 4]) + pred_dist_pos = torch.masked_select(pred_dist, dist_mask).view(-1, 4, self.reg_max + 1) + target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max) + target_ltrb_pos = torch.masked_select(target_ltrb, bbox_mask).view(-1, 4) + loss_dfl = self._df_loss(pred_dist_pos, target_ltrb_pos) * bbox_weight + loss_dfl = loss_dfl.sum() / target_scores_sum + else: + loss_dfl = torch.tensor(0.0).to(pred_dist.device) + + return loss_iou, loss_dfl, iou + + def _df_loss(self, pred_dist, target): + target_left = target.to(torch.long) + target_right = target_left + 1 + weight_left = target_right.to(torch.float) - target + weight_right = 1 - weight_left + loss_left = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_left.view(-1), reduction="none").view( + target_left.shape) * weight_left + loss_right = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_right.view(-1), + reduction="none").view(target_left.shape) * weight_right + return (loss_left + loss_right).mean(-1, keepdim=True) + + +class ComputeLoss: + # Compute losses + def __init__(self, model, use_dfl=True, overlap=True): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device), reduction='none') + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets + + # Focal loss + g = h["fl_gamma"] # focal loss gamma + if g > 0: + BCEcls = FocalLoss(BCEcls, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.BCEcls = BCEcls + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.no = m.no + self.nm = m.nm + self.overlap = overlap + self.reg_max = m.reg_max + self.device = device + + self.assigner = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.proj = torch.arange(m.reg_max).float().to(device) # / 120.0 + self.use_dfl = use_dfl + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, p, targets, masks, semasks, img=None, epoch=0): + loss = torch.zeros(6, device=self.device) # box, cls, dfl + feats, pred_masks, proto, psemasks = p if len(p) == 4 else p[1] + batch_size, _, mask_h, mask_w = proto.shape + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + pred_masks = pred_masks.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size, grid_size = pred_scores.shape[:2] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + try: + batch_idx = targets[:, 0].view(-1, 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + except RuntimeError as e: + raise TypeError('ERROR.') from e + + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + target_labels, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( + pred_scores.detach().sigmoid(), + (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_scores_sum = target_scores.sum() + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[2] = self.BCEcls(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[3], _ = self.bbox_loss(pred_distri, + pred_bboxes, + anchor_points, + target_bboxes / stride_tensor, + target_scores, + target_scores_sum, + fg_mask) + + # masks loss + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask[i].sum(): + mask_idx = target_gt_idx[i][fg_mask[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, + marea) # seg loss + # Semantic Segmentation + # focal loss + pt = torch.flatten(psemasks, start_dim = 2).permute(0, 2, 1) + gt = torch.flatten(semasks, start_dim = 2).permute(0, 2, 1) + + bs, _, _ = gt.shape + #torch.clamp(torch.sigmoid(logits), min=eps, max= 1 - eps) + #total_loss = (sigmoid_focal_loss(pt.float(), gt.float(), alpha = .25, gamma = 2., reduction = 'mean')) / 2. + #total_loss = (sigmoid_focal_loss(pt.clamp(-16., 16.), gt, alpha = .25, gamma = 2., reduction = 'mean')) / 2. + total_loss = (sigmoid_focal_loss(pt, gt, alpha = .25, gamma = 2., reduction = 'mean')) / 2. + loss[4] += total_loss * 20. + + # dice loss + pt = torch.flatten(psemasks.softmax(dim = 1)) + gt = torch.flatten(semasks) + + inter_mask = torch.sum(torch.mul(pt, gt)) + union_mask = torch.sum(torch.add(pt, gt)) + dice_coef = (2. * inter_mask + 1.) / (union_mask + 1.) + loss[5] += (1. - dice_coef) / 2. + + loss[0] *= 7.5 # box gain + loss[1] *= 2.5 / batch_size + loss[2] *= 0.5 # cls gain + loss[3] *= 1.5 # dfl gain + loss[4] *= 2.5 #/ batch_size + loss[5] *= 2.5 #/ batch_size + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() diff --git a/utils/panoptic/metrics.py b/utils/panoptic/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..dcb8bc2d6df780a961ff1473de7d1e5f630d3e8d --- /dev/null +++ b/utils/panoptic/metrics.py @@ -0,0 +1,272 @@ +import numpy as np +import torch + +from ..metrics import ap_per_class + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9, 0.1, 0.9] + return (x[:, :len(w)] * w).sum(1) + + +def ap_per_class_box_and_mask( + tp_m, + tp_b, + conf, + pred_cls, + target_cls, + plot=False, + save_dir=".", + names=(), +): + """ + Args: + tp_b: tp of boxes. + tp_m: tp of masks. + other arguments see `func: ap_per_class`. + """ + results_boxes = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Box")[2:] + results_masks = ap_per_class(tp_m, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Mask")[2:] + + results = { + "boxes": { + "p": results_boxes[0], + "r": results_boxes[1], + "ap": results_boxes[3], + "f1": results_boxes[2], + "ap_class": results_boxes[4]}, + "masks": { + "p": results_masks[0], + "r": results_masks[1], + "ap": results_masks[3], + "f1": results_masks[2], + "ap_class": results_masks[4]}} + return results + + +class Metric: + + def __init__(self) -> None: + self.p = [] # (nc, ) + self.r = [] # (nc, ) + self.f1 = [] # (nc, ) + self.all_ap = [] # (nc, 10) + self.ap_class_index = [] # (nc, ) + + @property + def ap50(self): + """AP@0.5 of all classes. + Return: + (nc, ) or []. + """ + return self.all_ap[:, 0] if len(self.all_ap) else [] + + @property + def ap(self): + """AP@0.5:0.95 + Return: + (nc, ) or []. + """ + return self.all_ap.mean(1) if len(self.all_ap) else [] + + @property + def mp(self): + """mean precision of all classes. + Return: + float. + """ + return self.p.mean() if len(self.p) else 0.0 + + @property + def mr(self): + """mean recall of all classes. + Return: + float. + """ + return self.r.mean() if len(self.r) else 0.0 + + @property + def map50(self): + """Mean AP@0.5 of all classes. + Return: + float. + """ + return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 + + @property + def map(self): + """Mean AP@0.5:0.95 of all classes. + Return: + float. + """ + return self.all_ap.mean() if len(self.all_ap) else 0.0 + + def mean_results(self): + """Mean of results, return mp, mr, map50, map""" + return (self.mp, self.mr, self.map50, self.map) + + def class_result(self, i): + """class-aware result, return p[i], r[i], ap50[i], ap[i]""" + return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) + + def get_maps(self, nc): + maps = np.zeros(nc) + self.map + for i, c in enumerate(self.ap_class_index): + maps[c] = self.ap[i] + return maps + + def update(self, results): + """ + Args: + results: tuple(p, r, ap, f1, ap_class) + """ + p, r, all_ap, f1, ap_class_index = results + self.p = p + self.r = r + self.all_ap = all_ap + self.f1 = f1 + self.ap_class_index = ap_class_index + + +class Metrics: + """Metric for boxes and masks.""" + + def __init__(self) -> None: + self.metric_box = Metric() + self.metric_mask = Metric() + + def update(self, results): + """ + Args: + results: Dict{'boxes': Dict{}, 'masks': Dict{}} + """ + self.metric_box.update(list(results["boxes"].values())) + self.metric_mask.update(list(results["masks"].values())) + + def mean_results(self): + return self.metric_box.mean_results() + self.metric_mask.mean_results() + + def class_result(self, i): + return self.metric_box.class_result(i) + self.metric_mask.class_result(i) + + def get_maps(self, nc): + return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) + + @property + def ap_class_index(self): + # boxes and masks have the same ap_class_index + return self.metric_box.ap_class_index + + +class Semantic_Metrics: + def __init__(self, nc, device): + self.nc = nc # number of classes + self.device = device + self.iou = [] + self.c_bit_counts = torch.zeros(nc, dtype = torch.long).to(device) + self.c_intersection_counts = torch.zeros(nc, dtype = torch.long).to(device) + self.c_union_counts = torch.zeros(nc, dtype = torch.long).to(device) + + def update(self, pred_masks, target_masks): + nb, nc, h, w = pred_masks.shape + device = pred_masks.device + + for b in range(nb): + onehot_mask = pred_masks[b].to(device) + # convert predict mask to one hot + semantic_mask = torch.flatten(onehot_mask, start_dim = 1).permute(1, 0) # class x h x w -> (h x w) x class + max_idx = semantic_mask.argmax(1) + output_masks = (torch.zeros(semantic_mask.shape).to(self.device)).scatter(1, max_idx.unsqueeze(1), 1.0) # one hot: (h x w) x class + output_masks = torch.reshape(output_masks.permute(1, 0), (nc, h, w)) # (h x w) x class -> class x h x w + onehot_mask = output_masks.int() + + for c in range(self.nc): + pred_mask = onehot_mask[c].to(device) + target_mask = target_masks[b, c].to(device) + + # calculate IoU + intersection = (torch.logical_and(pred_mask, target_mask).sum()).item() + union = (torch.logical_or(pred_mask, target_mask).sum()).item() + iou = 0. if (0 == union) else (intersection / union) + + # record class pixel counts, intersection counts, union counts + self.c_bit_counts[c] += target_mask.int().sum() + self.c_intersection_counts[c] += intersection + self.c_union_counts[c] += union + + self.iou.append(iou) + + def results(self): + # Mean IoU + miou = 0. if (0 == len(self.iou)) else np.sum(self.iou) / (len(self.iou) * self.nc) + + # Frequency Weighted IoU + c_iou = self.c_intersection_counts / (self.c_union_counts + 1) # add smooth + # c_bit_counts = self.c_bit_counts.astype(int) + total_c_bit_counts = self.c_bit_counts.sum() + freq_ious = torch.zeros(1, dtype = torch.long).to(self.device) if (0 == total_c_bit_counts) else (self.c_bit_counts / total_c_bit_counts) * c_iou + fwiou = (freq_ious.sum()).item() + + return (miou, fwiou) + + def reset(self): + self.iou = [] + self.c_bit_counts = torch.zeros(self.nc, dtype = torch.long).to(self.device) + self.c_intersection_counts = torch.zeros(self.nc, dtype = torch.long).to(self.device) + self.c_union_counts = torch.zeros(self.nc, dtype = torch.long).to(self.device) + + +KEYS = [ + "train/box_loss", + "train/seg_loss", # train loss + "train/cls_loss", + "train/dfl_loss", + "train/fcl_loss", + "train/dic_loss", + "metrics/precision(B)", + "metrics/recall(B)", + "metrics/mAP_0.5(B)", + "metrics/mAP_0.5:0.95(B)", # metrics + "metrics/precision(M)", + "metrics/recall(M)", + "metrics/mAP_0.5(M)", + "metrics/mAP_0.5:0.95(M)", # metrics + "metrics/MIOUS(S)", + "metrics/FWIOUS(S)", # metrics + "val/box_loss", + "val/seg_loss", # val loss + "val/cls_loss", + "val/dfl_loss", + "val/fcl_loss", + "val/dic_loss", + "x/lr0", + "x/lr1", + "x/lr2",] + +BEST_KEYS = [ + "best/epoch", + "best/precision(B)", + "best/recall(B)", + "best/mAP_0.5(B)", + "best/mAP_0.5:0.95(B)", + "best/precision(M)", + "best/recall(M)", + "best/mAP_0.5(M)", + "best/mAP_0.5:0.95(M)", + "best/MIOUS(S)", + "best/FWIOUS(S)",] diff --git a/utils/panoptic/plots.py b/utils/panoptic/plots.py new file mode 100644 index 0000000000000000000000000000000000000000..55d87b7950554aa803d160ad8d20205aef37dc9c --- /dev/null +++ b/utils/panoptic/plots.py @@ -0,0 +1,164 @@ +import contextlib +import math +from pathlib import Path + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch +from torchvision.utils import draw_segmentation_masks, save_image + +from .. import threaded +from ..general import xywh2xyxy +from ..plots import Annotator, colors + + +@threaded +def plot_images_and_masks(images, targets, masks, semasks, paths=None, fname='images.jpg', names=None): + + try: + if images.shape[-2:] != semasks.shape[-2:]: + m = torch.nn.Upsample(scale_factor=4, mode='nearest') + semasks = m(semasks) + + for idx in range(images.shape[0]): + output_img = draw_segmentation_masks( + image = images[idx, :, :, :].cpu().to(dtype = torch.uint8), + masks = semasks[idx, :, :, :].cpu().to(dtype = torch.bool), + alpha = 1) + cv2.imwrite( + '{}_{}.jpg'.format(fname, idx), + torch.permute(output_img, (1, 2, 0)).numpy() + ) + except: + pass + + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if isinstance(masks, torch.Tensor): + masks = masks.cpu().numpy().astype(int) + if isinstance(semasks, torch.Tensor): + semasks = semasks.cpu().numpy().astype(int) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + idx = targets[:, 0] == i + ti = targets[idx] # image targets + + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + + # Plot masks + if len(masks): + if masks.max() > 1.0: # mean that masks are overlap + image_masks = masks[[i]] # (1, 640, 640) + nl = len(ti) + index = np.arange(nl).reshape(nl, 1, 1) + 1 + image_masks = np.repeat(image_masks, nl, axis=0) + image_masks = np.where(image_masks == index, 1.0, 0.0) + else: + image_masks = masks[idx] + + im = np.asarray(annotator.im).copy() + for j, box in enumerate(boxes.T.tolist()): + if labels or conf[j] > 0.25: # 0.25 conf thresh + color = colors(classes[j]) + mh, mw = image_masks[j].shape + if mh != h or mw != w: + mask = image_masks[j].astype(np.uint8) + mask = cv2.resize(mask, (w, h)) + mask = mask.astype(bool) + else: + mask = image_masks[j].astype(bool) + with contextlib.suppress(Exception): + im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + annotator.fromarray(im) + annotator.im.save(fname) # save + + +def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + for f in files: + try: + data = pd.read_csv(f) + index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + + 0.1 * data.values[:, 11]) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + if best: + # best + ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + else: + # last + ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print(f"Warning: Plotting error for {f}: {e}") + ax[1].legend() + fig.savefig(save_dir / "results.png", dpi=200) + plt.close() diff --git a/utils/panoptic/tal/__init__.py b/utils/panoptic/tal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..84952a8167bc2975913a6def6b4f027d566552a9 --- /dev/null +++ b/utils/panoptic/tal/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/utils/panoptic/tal/anchor_generator.py b/utils/panoptic/tal/anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..0de163651e21225445097f90e05a6c6d8ff10092 --- /dev/null +++ b/utils/panoptic/tal/anchor_generator.py @@ -0,0 +1,38 @@ +import torch + +from utils.general import check_version + +TORCH_1_10 = check_version(torch.__version__, '1.10.0') + + +def make_anchors(feats, strides, grid_cell_offset=0.5): + """Generate anchors from features.""" + anchor_points, stride_tensor = [], [] + assert feats is not None + dtype, device = feats[0].dtype, feats[0].device + for i, stride in enumerate(strides): + _, _, h, w = feats[i].shape + sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x + sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y + sy, sx = torch.meshgrid(sy, sx, indexing='ij') if TORCH_1_10 else torch.meshgrid(sy, sx) + anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2)) + stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device)) + return torch.cat(anchor_points), torch.cat(stride_tensor) + + +def dist2bbox(distance, anchor_points, xywh=True, dim=-1): + """Transform distance(ltrb) to box(xywh or xyxy).""" + lt, rb = torch.split(distance, 2, dim) + x1y1 = anchor_points - lt + x2y2 = anchor_points + rb + if xywh: + c_xy = (x1y1 + x2y2) / 2 + wh = x2y2 - x1y1 + return torch.cat((c_xy, wh), dim) # xywh bbox + return torch.cat((x1y1, x2y2), dim) # xyxy bbox + + +def bbox2dist(anchor_points, bbox, reg_max): + """Transform bbox(xyxy) to dist(ltrb).""" + x1y1, x2y2 = torch.split(bbox, 2, -1) + return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp(0, reg_max - 0.01) # dist (lt, rb) diff --git a/utils/panoptic/tal/assigner.py b/utils/panoptic/tal/assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..8c61f5c0508b87522eb4cf048bbe72973dbb4be4 --- /dev/null +++ b/utils/panoptic/tal/assigner.py @@ -0,0 +1,181 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from utils.metrics import bbox_iou + + +def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9): + """select the positive anchor center in gt + + Args: + xy_centers (Tensor): shape(h*w, 4) + gt_bboxes (Tensor): shape(b, n_boxes, 4) + Return: + (Tensor): shape(b, n_boxes, h*w) + """ + n_anchors = xy_centers.shape[0] + bs, n_boxes, _ = gt_bboxes.shape + lt, rb = gt_bboxes.view(-1, 1, 4).chunk(2, 2) # left-top, right-bottom + bbox_deltas = torch.cat((xy_centers[None] - lt, rb - xy_centers[None]), dim=2).view(bs, n_boxes, n_anchors, -1) + # return (bbox_deltas.min(3)[0] > eps).to(gt_bboxes.dtype) + return bbox_deltas.amin(3).gt_(eps) + + +def select_highest_overlaps(mask_pos, overlaps, n_max_boxes): + """if an anchor box is assigned to multiple gts, + the one with the highest iou will be selected. + + Args: + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + overlaps (Tensor): shape(b, n_max_boxes, h*w) + Return: + target_gt_idx (Tensor): shape(b, h*w) + fg_mask (Tensor): shape(b, h*w) + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + """ + # (b, n_max_boxes, h*w) -> (b, h*w) + fg_mask = mask_pos.sum(-2) + if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes + mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1]) # (b, n_max_boxes, h*w) + max_overlaps_idx = overlaps.argmax(1) # (b, h*w) + is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes) # (b, h*w, n_max_boxes) + is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype) # (b, n_max_boxes, h*w) + mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos) # (b, n_max_boxes, h*w) + fg_mask = mask_pos.sum(-2) + # find each grid serve which gt(index) + target_gt_idx = mask_pos.argmax(-2) # (b, h*w) + return target_gt_idx, fg_mask, mask_pos + + +class TaskAlignedAssigner(nn.Module): + def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1e-9): + super().__init__() + self.topk = topk + self.num_classes = num_classes + self.bg_idx = num_classes + self.alpha = alpha + self.beta = beta + self.eps = eps + + @torch.no_grad() + def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt): + """This code referenced to + https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py + + Args: + pd_scores (Tensor): shape(bs, num_total_anchors, num_classes) + pd_bboxes (Tensor): shape(bs, num_total_anchors, 4) + anc_points (Tensor): shape(num_total_anchors, 2) + gt_labels (Tensor): shape(bs, n_max_boxes, 1) + gt_bboxes (Tensor): shape(bs, n_max_boxes, 4) + mask_gt (Tensor): shape(bs, n_max_boxes, 1) + Returns: + target_labels (Tensor): shape(bs, num_total_anchors) + target_bboxes (Tensor): shape(bs, num_total_anchors, 4) + target_scores (Tensor): shape(bs, num_total_anchors, num_classes) + fg_mask (Tensor): shape(bs, num_total_anchors) + """ + self.bs = pd_scores.size(0) + self.n_max_boxes = gt_bboxes.size(1) + + if self.n_max_boxes == 0: + device = gt_bboxes.device + return (torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), + torch.zeros_like(pd_bboxes).to(device), + torch.zeros_like(pd_scores).to(device), + torch.zeros_like(pd_scores[..., 0]).to(device), + torch.zeros_like(pd_scores[..., 0]).to(device)) + + mask_pos, align_metric, overlaps = self.get_pos_mask(pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, + mask_gt) + + target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) + + # assigned target + target_labels, target_bboxes, target_scores = self.get_targets(gt_labels, gt_bboxes, target_gt_idx, fg_mask) + + # normalize + align_metric *= mask_pos + pos_align_metrics = align_metric.amax(axis=-1, keepdim=True) # b, max_num_obj + pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True) # b, max_num_obj + norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1) + target_scores = target_scores * norm_align_metric + + return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx + + def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt): + + # get anchor_align metric, (b, max_num_obj, h*w) + align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes) + # get in_gts mask, (b, max_num_obj, h*w) + mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes) + # get topk_metric mask, (b, max_num_obj, h*w) + mask_topk = self.select_topk_candidates(align_metric * mask_in_gts, + topk_mask=mask_gt.repeat([1, 1, self.topk]).bool()) + # merge all mask to a final mask, (b, max_num_obj, h*w) + mask_pos = mask_topk * mask_in_gts * mask_gt + + return mask_pos, align_metric, overlaps + + def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes): + + gt_labels = gt_labels.to(torch.long) # b, max_num_obj, 1 + ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj + ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes) # b, max_num_obj + ind[1] = gt_labels.squeeze(-1) # b, max_num_obj + # get the scores of each grid for each gt cls + bbox_scores = pd_scores[ind[0], :, ind[1]] # b, max_num_obj, h*w + + overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False, CIoU=True).squeeze(3).clamp(0) + #overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False, WIoU=True, scale=True)[-1].squeeze(3).clamp(0) + align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta) + return align_metric, overlaps + + def select_topk_candidates(self, metrics, largest=True, topk_mask=None): + """ + Args: + metrics: (b, max_num_obj, h*w). + topk_mask: (b, max_num_obj, topk) or None + """ + + num_anchors = metrics.shape[-1] # h*w + # (b, max_num_obj, topk) + topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest) + if topk_mask is None: + topk_mask = (topk_metrics.max(-1, keepdim=True) > self.eps).tile([1, 1, self.topk]) + # (b, max_num_obj, topk) + topk_idxs = torch.where(topk_mask, topk_idxs, 0) + # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w) + is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2) + # filter invalid bboxes + # assigned topk should be unique, this is for dealing with empty labels + # since empty labels will generate index `0` through `F.one_hot` + # NOTE: but what if the topk_idxs include `0`? + is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk) + return is_in_topk.to(metrics.dtype) + + def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask): + """ + Args: + gt_labels: (b, max_num_obj, 1) + gt_bboxes: (b, max_num_obj, 4) + target_gt_idx: (b, h*w) + fg_mask: (b, h*w) + """ + + # assigned target labels, (b, 1) + batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None] + target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w) + target_labels = gt_labels.long().flatten()[target_gt_idx] # (b, h*w) + + # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w) + target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx] + + # assigned target scores + target_labels.clamp(0) + target_scores = F.one_hot(target_labels, self.num_classes) # (b, h*w, 80) + fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes) # (b, h*w, 80) + target_scores = torch.where(fg_scores_mask > 0, target_scores, 0) + + return target_labels, target_bboxes, target_scores diff --git a/utils/plots.py b/utils/plots.py new file mode 100644 index 0000000000000000000000000000000000000000..fa49dc19d7f4b445a76de1e2ee135defa95778e3 --- /dev/null +++ b/utils/plots.py @@ -0,0 +1,570 @@ +import contextlib +import math +import os +from copy import copy +from pathlib import Path +from urllib.error import URLError + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sn +import torch +from PIL import Image, ImageDraw, ImageFont + +from utils import TryExcept, threaded +from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, + is_ascii, xywh2xyxy, xyxy2xywh) +from utils.metrics import fitness +from utils.segment.general import scale_image + +# Settings +RANK = int(os.getenv('RANK', -1)) +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb(f'#{c}') for c in hexs] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' + + +def check_pil_font(font=FONT, size=10): + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary + font = Path(font) + font = font if font.exists() else (CONFIG_DIR / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except Exception: # download if missing + try: + check_font(font) + return ImageFont.truetype(str(font), size) + except TypeError: + check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 + except URLError: # not online + return ImageFont.load_default() + + +class Annotator: + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic + self.pil = pil or non_ascii + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle( + (box[0], box[1] - h if outside else box[1], box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), + fill=color, + ) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h >= 3 + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, + label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, + self.lw / 3, + txt_color, + thickness=tf, + lineType=cv2.LINE_AA) + + def masks(self, masks, colors, im_gpu=None, alpha=0.5): + """Plot masks at once. + Args: + masks (tensor): predicted masks on cuda, shape: [n, h, w] + colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] + im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] + alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque + """ + if self.pil: + # convert to numpy first + self.im = np.asarray(self.im).copy() + if im_gpu is None: + # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...) + if len(masks) == 0: + return + if isinstance(masks, torch.Tensor): + masks = torch.as_tensor(masks, dtype=torch.uint8) + masks = masks.permute(1, 2, 0).contiguous() + masks = masks.cpu().numpy() + # masks = np.ascontiguousarray(masks.transpose(1, 2, 0)) + masks = scale_image(masks.shape[:2], masks, self.im.shape) + masks = np.asarray(masks, dtype=np.float32) + colors = np.asarray(colors, dtype=np.float32) # shape(n,3) + s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together + masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3) + self.im[:] = masks * alpha + self.im * (1 - s * alpha) + else: + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255).byte().cpu().numpy() + self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape) + if self.pil: + # convert im back to PIL and update draw + self.fromarray(self.im) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): + # Add text to image (PIL-only) + if anchor == 'bottom': # start y from font bottom + w, h = self.font.getsize(text) # text width, height + xy[1] += 1 - h + self.draw.text(xy, text, fill=txt_color, font=self.font) + + def fromarray(self, im): + # Update self.im from a numpy array + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) + + +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + x: Features to be visualized + module_type: Module type + stage: Module stage within model + n: Maximum number of feature maps to plot + save_dir: Directory to save results + """ + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + LOGGER.info(f'Saving {f}... ({n}/{channels})') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def output_to_target(output, max_det=300): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting + targets = [] + for i, o in enumerate(output): + box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) + j = torch.full((conf.shape[0], 1), i) + targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1)) + return torch.cat(targets, 0).numpy() + + +@threaded +def plot_images(images, targets, paths=None, fname='images.jpg', names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + ti = targets[targets[:, 0] == i] # image targets + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + annotator.im.save(fname) # save + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_val_txt(): # from utils.plots import *; plot_val() + # Plot val.txt histograms + x = np.loadtxt('val.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() + # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) + save_dir = Path(file).parent if file else Path(dir) + plot2 = False # plot additional results + if plot2: + ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(save_dir.glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + if plot2: + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[5, 1:j], + y[3, 1:j] * 1E2, + '.-', + linewidth=2, + markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', + linewidth=2, + markersize=8, + alpha=.25, + label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(25, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + f = save_dir / 'study.png' + print(f'Saving {f}...') + plt.savefig(f, dpi=300) + + +@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 +def plot_labels(labels, names=(), save_dir=Path('')): + # plot dataset labels + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + with contextlib.suppress(Exception): # color histogram bars by class + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + +def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): + # Show classification image grid with labels (optional) and predictions (optional) + from utils.augmentations import denormalize + + names = names or [f'class{i}' for i in range(1000)] + blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), + dim=0) # select batch index 0, block by channels + n = min(len(blocks), nmax) # number of plots + m = min(8, round(n ** 0.5)) # 8 x 8 default + fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols + ax = ax.ravel() if m > 1 else [ax] + # plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) + ax[i].axis('off') + if labels is not None: + s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') + ax[i].set_title(s, fontsize=8, verticalalignment='top') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + if verbose: + LOGGER.info(f"Saving {f}") + if labels is not None: + LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) + if pred is not None: + LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) + return f + + +def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() + # Plot evolve.csv hyp evolution results + evolve_csv = Path(evolve_csv) + data = pd.read_csv(evolve_csv) + keys = [x.strip() for x in data.columns] + x = data.values + f = fitness(x) + j = np.argmax(f) # max fitness index + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + print(f'Best results from row {j} of {evolve_csv}:') + for i, k in enumerate(keys[7:]): + v = x[:, 7 + i] + mu = v[j] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print(f'{k:>15}: {mu:.3g}') + f = evolve_csv.with_suffix('.png') # filename + plt.savefig(f, dpi=200) + plt.close() + print(f'Saved {f}') + + +def plot_results(file='path/to/results.csv', dir=''): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for f in files: + try: + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): + y = data.values[:, j].astype('float') + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + LOGGER.info(f'Warning: Plotting error for {f}: {e}') + ax[1].legend() + fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print(f'Warning: Plotting error for {f}; {e}') + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_boxes(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + f = str(increment_path(file).with_suffix('.jpg')) + # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB + return crop diff --git a/utils/segment/__init__.py b/utils/segment/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..84952a8167bc2975913a6def6b4f027d566552a9 --- /dev/null +++ b/utils/segment/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/utils/segment/__pycache__/__init__.cpython-310.pyc b/utils/segment/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b2ac7ddcb7ef7230210e329e4651dcf0b15e9ef Binary files /dev/null and b/utils/segment/__pycache__/__init__.cpython-310.pyc differ diff --git a/utils/segment/__pycache__/__init__.cpython-311.pyc b/utils/segment/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b25a7240fdaaa44d856dab5e3a8c0bdbfdec3e9b Binary files /dev/null and b/utils/segment/__pycache__/__init__.cpython-311.pyc differ diff --git a/utils/segment/__pycache__/general.cpython-310.pyc b/utils/segment/__pycache__/general.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6515bc0db16c92bcacc7fcb3ede9adb41c60919d Binary files /dev/null and b/utils/segment/__pycache__/general.cpython-310.pyc differ diff --git a/utils/segment/__pycache__/general.cpython-311.pyc b/utils/segment/__pycache__/general.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74c7ddb33e46d3c61efaf329b38a031982e2ab48 Binary files /dev/null and b/utils/segment/__pycache__/general.cpython-311.pyc differ diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py new file mode 100644 index 0000000000000000000000000000000000000000..34b5bf75f9feb860270ff8502360609408c64b72 --- /dev/null +++ b/utils/segment/augmentations.py @@ -0,0 +1,99 @@ +import math +import random + +import cv2 +import numpy as np + +from ..augmentations import box_candidates +from ..general import resample_segments, segment2box + + +def mixup(im, labels, segments, im2, labels2, segments2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + segments = np.concatenate((segments, segments2), 0) + return im, labels, segments + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) + T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + new_segments = [] + if n: + new = np.zeros((n, 4)) + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + new_segments.append(xy) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) + targets = targets[i] + targets[:, 1:5] = new[i] + new_segments = np.array(new_segments)[i] + + return im, targets, new_segments diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py new file mode 100644 index 0000000000000000000000000000000000000000..335570a63d330696612d87b51d2bd8ae0541c37b --- /dev/null +++ b/utils/segment/dataloaders.py @@ -0,0 +1,328 @@ +import os +import random + +import cv2 +import numpy as np +import torch +from torch.utils.data import DataLoader, distributed + +from ..augmentations import augment_hsv, copy_paste, letterbox +from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker +from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn +from ..torch_utils import torch_distributed_zero_first +from .augmentations import mixup, random_perspective + +RANK = int(os.getenv('RANK', -1)) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + close_mosaic=False, + quad=False, + prefix='', + shuffle=False, + mask_downsample_ratio=1, + overlap_mask=False): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabelsAndMasks( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix, + downsample_ratio=mask_downsample_ratio, + overlap=overlap_mask) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + #loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + loader = DataLoader if image_weights or close_mosaic else InfiniteDataLoader + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return loader( + dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, + worker_init_fn=seed_worker, + generator=generator, + ), dataset + + +class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing + + def __init__( + self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0, + min_items=0, + prefix="", + downsample_ratio=1, + overlap=False, + ): + super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, + stride, pad, min_items, prefix) + self.downsample_ratio = downsample_ratio + self.overlap = overlap + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + masks = [] + if mosaic: + # Load mosaic + img, labels, segments = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp["mixup"]: + img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy + segments = self.segments[index].copy() + if len(segments): + for i_s in range(len(segments)): + segments[i_s] = xyn2xy( + segments[i_s], + ratio[0] * w, + ratio[1] * h, + padw=pad[0], + padh=pad[1], + ) + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels, segments = random_perspective(img, + labels, + segments=segments, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"]) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) + if self.overlap: + masks, sorted_idx = polygons2masks_overlap(img.shape[:2], + segments, + downsample_ratio=self.downsample_ratio) + masks = masks[None] # (640, 640) -> (1, 640, 640) + labels = labels[sorted_idx] + else: + masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) + + masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // + self.downsample_ratio, img.shape[1] // + self.downsample_ratio)) + # TODO: albumentations support + if self.augment: + # Albumentations + # there are some augmentation that won't change boxes and masks, + # so just be it for now. + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + + # Flip up-down + if random.random() < hyp["flipud"]: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + masks = torch.flip(masks, dims=[1]) + + # Flip left-right + if random.random() < hyp["fliplr"]: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + masks = torch.flip(masks, dims=[2]) + + # Cutouts # labels = cutout(img, labels, p=0.5) + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + + # 3 additional image indices + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels, segments = self.labels[index].copy(), self.segments[index].copy() + + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp["degrees"], + translate=self.hyp["translate"], + scale=self.hyp["scale"], + shear=self.hyp["shear"], + perspective=self.hyp["perspective"], + border=self.mosaic_border) # border to remove + return img4, labels4, segments4 + + @staticmethod + def collate_fn(batch): + img, label, path, shapes, masks = zip(*batch) # transposed + batched_masks = torch.cat(masks, 0) + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks + + +def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (np.ndarray): [N, M], N is the number of polygons, + M is the number of points(Be divided by 2). + """ + mask = np.zeros(img_size, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) + # NOTE: fillPoly firstly then resize is trying the keep the same way + # of loss calculation when mask-ratio=1. + mask = cv2.resize(mask, (nw, nh)) + return mask + + +def polygons2masks(img_size, polygons, color, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], + N is the number of polygons, + M is the number of points(Be divided by 2). + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) + + +def polygons2masks_overlap(img_size, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask( + img_size, + [segments[si].reshape(-1)], + downsample_ratio=downsample_ratio, + color=1, + ) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index diff --git a/utils/segment/general.py b/utils/segment/general.py new file mode 100644 index 0000000000000000000000000000000000000000..b526333dc5a1b8625d7e6a51ee6ba41818c62adb --- /dev/null +++ b/utils/segment/general.py @@ -0,0 +1,137 @@ +import cv2 +import numpy as np +import torch +import torch.nn.functional as F + + +def crop_mask(masks, boxes): + """ + "Crop" predicted masks by zeroing out everything not in the predicted bbox. + Vectorized by Chong (thanks Chong). + + Args: + - masks should be a size [h, w, n] tensor of masks + - boxes should be a size [n, 4] tensor of bbox coords in relative point form + """ + + n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) + r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) + c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) + + +def process_mask_upsample(protos, masks_in, bboxes, shape): + """ + Crop after upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def process_mask(protos, masks_in, bboxes, shape, upsample=False): + """ + Crop before upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + ih, iw = shape + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + + downsampled_bboxes = bboxes.clone() + downsampled_bboxes[:, 0] *= mw / iw + downsampled_bboxes[:, 2] *= mw / iw + downsampled_bboxes[:, 3] *= mh / ih + downsampled_bboxes[:, 1] *= mh / ih + + masks = crop_mask(masks, downsampled_bboxes) # CHW + if upsample: + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + return masks.gt_(0.5) + + +def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): + """ + img1_shape: model input shape, [h, w] + img0_shape: origin pic shape, [h, w, 3] + masks: [h, w, num] + """ + # Rescale coordinates (xyxy) from im1_shape to im0_shape + if ratio_pad is None: # calculate from im0_shape + gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new + pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding + else: + pad = ratio_pad[1] + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) + + if len(masks.shape) < 2: + raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') + masks = masks[top:bottom, left:right] + # masks = masks.permute(2, 0, 1).contiguous() + # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] + # masks = masks.permute(1, 2, 0).contiguous() + masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) + + if len(masks.shape) == 2: + masks = masks[:, :, None] + return masks + + +def mask_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [M, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, [N, M] + """ + intersection = torch.matmul(mask1, mask2.t()).clamp(0) + union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [N, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, (N, ) + """ + intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) + union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks2segments(masks, strategy='largest'): + # Convert masks(n,160,160) into segments(n,xy) + segments = [] + for x in masks.int().cpu().numpy().astype('uint8'): + c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] + if c: + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + else: + c = np.zeros((0, 2)) # no segments found + segments.append(c.astype('float32')) + return segments diff --git a/utils/segment/loss.py b/utils/segment/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..b45b2c27e0a05c275cbc50064288aece3ae3e856 --- /dev/null +++ b/utils/segment/loss.py @@ -0,0 +1,186 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..general import xywh2xyxy +from ..loss import FocalLoss, smooth_BCE +from ..metrics import bbox_iou +from ..torch_utils import de_parallel +from .general import crop_mask + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False, overlap=False): + self.sort_obj_iou = False + self.overlap = overlap + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + self.device = device + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.nm = m.nm # number of masks + self.anchors = m.anchors + self.device = device + + def __call__(self, preds, targets, masks): # predictions, targets, model + p, proto = preds + bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width + lcls = torch.zeros(1, device=self.device) + lbox = torch.zeros(1, device=self.device) + lobj = torch.zeros(1, device=self.device) + lseg = torch.zeros(1, device=self.device) + tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions + + # Box regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Mask regression + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized + mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) + for bi in b.unique(): + j = b == bi # matching index + if self.overlap: + mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) + else: + mask_gti = masks[tidxs[i]][j] + lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp["box"] + lobj *= self.hyp["obj"] + lcls *= self.hyp["cls"] + lseg *= self.hyp["box"] / bs + + loss = lbox + lobj + lcls + lseg + return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] + gain = torch.ones(8, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + if self.overlap: + batch = p[0].shape[0] + ti = [] + for i in range(batch): + num = (targets[:, 0] == i).sum() # find number of targets of each image + ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) + ti = torch.cat(ti, 1) # (na, nt) + else: + ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + tidxs.append(tidx) + xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized + + return tcls, tbox, indices, anch, tidxs, xywhn diff --git a/utils/segment/loss_tal.py b/utils/segment/loss_tal.py new file mode 100644 index 0000000000000000000000000000000000000000..3f90b27ef7c25df65e072f1d26aaaa4305e83460 --- /dev/null +++ b/utils/segment/loss_tal.py @@ -0,0 +1,261 @@ +import os + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torchvision.ops import sigmoid_focal_loss + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import bbox_iou +from utils.segment.tal.anchor_generator import dist2bbox, make_anchors, bbox2dist +from utils.segment.tal.assigner import TaskAlignedAssigner +from utils.torch_utils import de_parallel +from utils.segment.general import crop_mask + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class VarifocalLoss(nn.Module): + # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367 + def __init__(self): + super().__init__() + + def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0): + weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label + with torch.cuda.amp.autocast(enabled=False): + loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), + reduction="none") * weight).sum() + return loss + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = "none" # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == "mean": + return loss.mean() + elif self.reduction == "sum": + return loss.sum() + else: # 'none' + return loss + + +class BboxLoss(nn.Module): + def __init__(self, reg_max, use_dfl=False): + super().__init__() + self.reg_max = reg_max + self.use_dfl = use_dfl + + def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): + # iou loss + bbox_mask = fg_mask.unsqueeze(-1).repeat([1, 1, 4]) # (b, h*w, 4) + pred_bboxes_pos = torch.masked_select(pred_bboxes, bbox_mask).view(-1, 4) + target_bboxes_pos = torch.masked_select(target_bboxes, bbox_mask).view(-1, 4) + bbox_weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1) + + iou = bbox_iou(pred_bboxes_pos, target_bboxes_pos, xywh=False, CIoU=True) + loss_iou = 1.0 - iou + + loss_iou *= bbox_weight + loss_iou = loss_iou.sum() / target_scores_sum + + # dfl loss + if self.use_dfl: + dist_mask = fg_mask.unsqueeze(-1).repeat([1, 1, (self.reg_max + 1) * 4]) + pred_dist_pos = torch.masked_select(pred_dist, dist_mask).view(-1, 4, self.reg_max + 1) + target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max) + target_ltrb_pos = torch.masked_select(target_ltrb, bbox_mask).view(-1, 4) + loss_dfl = self._df_loss(pred_dist_pos, target_ltrb_pos) * bbox_weight + loss_dfl = loss_dfl.sum() / target_scores_sum + else: + loss_dfl = torch.tensor(0.0).to(pred_dist.device) + + return loss_iou, loss_dfl, iou + + def _df_loss(self, pred_dist, target): + target_left = target.to(torch.long) + target_right = target_left + 1 + weight_left = target_right.to(torch.float) - target + weight_right = 1 - weight_left + loss_left = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_left.view(-1), reduction="none").view( + target_left.shape) * weight_left + loss_right = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_right.view(-1), + reduction="none").view(target_left.shape) * weight_right + return (loss_left + loss_right).mean(-1, keepdim=True) + + +class ComputeLoss: + # Compute losses + def __init__(self, model, use_dfl=True, overlap=True): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device), reduction='none') + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets + + # Focal loss + g = h["fl_gamma"] # focal loss gamma + if g > 0: + BCEcls = FocalLoss(BCEcls, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.BCEcls = BCEcls + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.no = m.no + self.nm = m.nm + self.overlap = overlap + self.reg_max = m.reg_max + self.device = device + + self.assigner = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.proj = torch.arange(m.reg_max).float().to(device) # / 120.0 + self.use_dfl = use_dfl + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, p, targets, masks, img=None, epoch=0): + loss = torch.zeros(4, device=self.device) # box, cls, dfl + feats, pred_masks, proto = p if len(p) == 3 else p[1] + batch_size, _, mask_h, mask_w = proto.shape + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + pred_masks = pred_masks.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size, grid_size = pred_scores.shape[:2] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + try: + batch_idx = targets[:, 0].view(-1, 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + except RuntimeError as e: + raise TypeError('ERROR.') from e + + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + target_labels, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( + pred_scores.detach().sigmoid(), + (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_scores_sum = target_scores.sum() + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[2] = self.BCEcls(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[3], _ = self.bbox_loss(pred_distri, + pred_bboxes, + anchor_points, + target_bboxes / stride_tensor, + target_scores, + target_scores_sum, + fg_mask) + + # masks loss + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask[i].sum(): + mask_idx = target_gt_idx[i][fg_mask[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, + marea) # seg loss + + loss[0] *= 7.5 # box gain + loss[1] *= 2.5 / batch_size + loss[2] *= 0.5 # cls gain + loss[3] *= 1.5 # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') + #loss = sigmoid_focal_loss(pred_mask, gt_mask, alpha = .25, gamma = 2., reduction = 'none') + + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + #p_m = torch.flatten(pred_mask.sigmoid()) + #p_m = torch.flatten(pred_mask.softmax(dim = 1)) + #g_m = torch.flatten(gt_mask) + #i_m = torch.sum(torch.mul(p_m, g_m)) + #u_m = torch.sum(torch.add(p_m, g_m)) + #d_c = (2. * i_m + 1.) / (u_m + 1.) + #d_l = (1. - d_c) + #return d_l diff --git a/utils/segment/loss_tal_dual.py b/utils/segment/loss_tal_dual.py new file mode 100644 index 0000000000000000000000000000000000000000..87bb8ebfb3008ec4dc37b981e8fa559a7e90b68d --- /dev/null +++ b/utils/segment/loss_tal_dual.py @@ -0,0 +1,727 @@ +import os + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torchvision.ops import sigmoid_focal_loss + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import bbox_iou +from utils.segment.tal.anchor_generator import dist2bbox, make_anchors, bbox2dist +from utils.segment.tal.assigner import TaskAlignedAssigner +from utils.torch_utils import de_parallel +from utils.segment.general import crop_mask + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class VarifocalLoss(nn.Module): + # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367 + def __init__(self): + super().__init__() + + def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0): + weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label + with torch.cuda.amp.autocast(enabled=False): + loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), + reduction="none") * weight).sum() + return loss + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = "none" # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == "mean": + return loss.mean() + elif self.reduction == "sum": + return loss.sum() + else: # 'none' + return loss + + +class BboxLoss(nn.Module): + def __init__(self, reg_max, use_dfl=False): + super().__init__() + self.reg_max = reg_max + self.use_dfl = use_dfl + + def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): + # iou loss + bbox_mask = fg_mask.unsqueeze(-1).repeat([1, 1, 4]) # (b, h*w, 4) + pred_bboxes_pos = torch.masked_select(pred_bboxes, bbox_mask).view(-1, 4) + target_bboxes_pos = torch.masked_select(target_bboxes, bbox_mask).view(-1, 4) + bbox_weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1) + + iou = bbox_iou(pred_bboxes_pos, target_bboxes_pos, xywh=False, CIoU=True) + loss_iou = 1.0 - iou + + loss_iou *= bbox_weight + loss_iou = loss_iou.sum() / target_scores_sum + + # dfl loss + if self.use_dfl: + dist_mask = fg_mask.unsqueeze(-1).repeat([1, 1, (self.reg_max + 1) * 4]) + pred_dist_pos = torch.masked_select(pred_dist, dist_mask).view(-1, 4, self.reg_max + 1) + target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max) + target_ltrb_pos = torch.masked_select(target_ltrb, bbox_mask).view(-1, 4) + loss_dfl = self._df_loss(pred_dist_pos, target_ltrb_pos) * bbox_weight + loss_dfl = loss_dfl.sum() / target_scores_sum + else: + loss_dfl = torch.tensor(0.0).to(pred_dist.device) + + return loss_iou, loss_dfl, iou + + def _df_loss(self, pred_dist, target): + target_left = target.to(torch.long) + target_right = target_left + 1 + weight_left = target_right.to(torch.float) - target + weight_right = 1 - weight_left + loss_left = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_left.view(-1), reduction="none").view( + target_left.shape) * weight_left + loss_right = F.cross_entropy(pred_dist.view(-1, self.reg_max + 1), target_right.view(-1), + reduction="none").view(target_left.shape) * weight_right + return (loss_left + loss_right).mean(-1, keepdim=True) + + +class ComputeLoss: + # Compute losses + def __init__(self, model, use_dfl=True, overlap=True): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device), reduction='none') + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets + + # Focal loss + g = h["fl_gamma"] # focal loss gamma + if g > 0: + BCEcls = FocalLoss(BCEcls, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.BCEcls = BCEcls + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.no = m.no + self.nm = m.nm + self.overlap = overlap + self.reg_max = m.reg_max + self.device = device + + self.assigner = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.assigner2 = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.bbox_loss2 = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.proj = torch.arange(m.reg_max).float().to(device) # / 120.0 + self.use_dfl = use_dfl + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, p, targets, masks, img=None, epoch=0): + loss = torch.zeros(4, device=self.device) # box, cls, dfl + + feats_, pred_masks_, proto_ = p if len(p) == 3 else p[1] + + feats, pred_masks, proto = feats_[0], pred_masks_[0], proto_[0] + feats2, pred_masks2, proto2 = feats_[1], pred_masks_[1], proto_[1] + + batch_size, _, mask_h, mask_w = proto.shape + + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + pred_masks = pred_masks.permute(0, 2, 1).contiguous() + + pred_distri2, pred_scores2 = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats2], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores2 = pred_scores2.permute(0, 2, 1).contiguous() + pred_distri2 = pred_distri2.permute(0, 2, 1).contiguous() + pred_masks2 = pred_masks2.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size, grid_size = pred_scores.shape[:2] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + try: + batch_idx = targets[:, 0].view(-1, 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + except RuntimeError as e: + raise TypeError('ERROR.') from e + + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + pred_bboxes2 = self.bbox_decode(anchor_points, pred_distri2) # xyxy, (b, h*w, 4) + + target_labels, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( + pred_scores.detach().sigmoid(), + (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_labels2, target_bboxes2, target_scores2, fg_mask2, target_gt_idx2 = self.assigner2( + pred_scores2.detach().sigmoid(), + (pred_bboxes2.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_scores_sum = target_scores.sum() + + target_scores_sum2 = target_scores2.sum() + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[2] = self.BCEcls(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + loss[2] *= 0.25 + loss[2] += self.BCEcls(pred_scores2, target_scores2.to(dtype)).sum() / target_scores_sum2 # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[3], _ = self.bbox_loss(pred_distri, + pred_bboxes, + anchor_points, + target_bboxes / stride_tensor, + target_scores, + target_scores_sum, + fg_mask) + + # masks loss + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask[i].sum(): + mask_idx = target_gt_idx[i][fg_mask[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, + marea) # seg loss + + loss[0] *= 0.25 + loss[3] *= 0.25 + loss[1] *= 0.25 + + # bbox loss + if fg_mask2.sum(): + loss0_, loss3_, _ = self.bbox_loss2(pred_distri2, + pred_bboxes2, + anchor_points, + target_bboxes2 / stride_tensor, + target_scores2, + target_scores_sum2, + fg_mask2) + + # masks loss + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask2[i].sum(): + mask_idx = target_gt_idx2[i][fg_mask2[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes2[i][fg_mask2[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += self.single_mask_loss(gt_mask, pred_masks2[i][fg_mask2[i]], proto2[i], mxyxy, + marea) # seg loss + + loss[0] += loss0_ + loss[3] += loss3_ + + loss[0] *= 7.5 # box gain + loss[1] *= 2.5 / batch_size + loss[2] *= 0.5 # cls gain + loss[3] *= 1.5 # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') + #loss = sigmoid_focal_loss(pred_mask, gt_mask, alpha = .25, gamma = 2., reduction = 'none') + + #p_m = torch.flatten(pred_mask.softmax(dim = 1)) + #g_m = torch.flatten(gt_mask) + #i_m = torch.sum(torch.mul(p_m, g_m)) + #u_m = torch.sum(torch.add(p_m, g_m)) + #dice_coef = (2. * i_m + 1.) / (u_m + 1.) + #dice_loss = (1. - dice_coef) + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + +class ComputeLossLH: + # Compute losses + def __init__(self, model, use_dfl=True, overlap=True): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device), reduction='none') + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets + + # Focal loss + g = h["fl_gamma"] # focal loss gamma + if g > 0: + BCEcls = FocalLoss(BCEcls, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.BCEcls = BCEcls + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.no = m.no + self.nm = m.nm + self.overlap = overlap + self.reg_max = m.reg_max + self.device = device + + self.assigner = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.proj = torch.arange(m.reg_max).float().to(device) # / 120.0 + self.use_dfl = use_dfl + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, p, targets, masks, img=None, epoch=0): + loss = torch.zeros(4, device=self.device) # box, cls, dfl + + feats_, pred_masks_, proto_ = p if len(p) == 3 else p[1] + + feats, pred_masks, proto = feats_[0], pred_masks_[0], proto_[0] + feats2, pred_masks2, proto2 = feats_[1], pred_masks_[1], proto_[1] + + batch_size, _, mask_h, mask_w = proto.shape + + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + pred_masks = pred_masks.permute(0, 2, 1).contiguous() + + pred_distri2, pred_scores2 = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats2], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores2 = pred_scores2.permute(0, 2, 1).contiguous() + pred_distri2 = pred_distri2.permute(0, 2, 1).contiguous() + pred_masks2 = pred_masks2.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size, grid_size = pred_scores.shape[:2] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + try: + batch_idx = targets[:, 0].view(-1, 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + except RuntimeError as e: + raise TypeError('ERROR.') from e + + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + pred_bboxes2 = self.bbox_decode(anchor_points, pred_distri2) # xyxy, (b, h*w, 4) + + target_labels, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( + pred_scores2.detach().sigmoid(), + (pred_bboxes2.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_scores_sum = target_scores.sum() + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[2] = self.BCEcls(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + loss[2] *= 0.25 + loss[2] += self.BCEcls(pred_scores2, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[3], _ = self.bbox_loss(pred_distri, + pred_bboxes, + anchor_points, + target_bboxes / stride_tensor, + target_scores, + target_scores_sum, + fg_mask) + + # masks loss + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask[i].sum(): + mask_idx = target_gt_idx[i][fg_mask[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, + marea) # seg loss + + loss[0] *= 0.25 + loss[3] *= 0.25 + loss[1] *= 0.25 + + # bbox loss + if fg_mask.sum(): + loss0_, loss3_, _ = self.bbox_loss(pred_distri2, + pred_bboxes2, + anchor_points, + target_bboxes / stride_tensor, + target_scores, + target_scores_sum, + fg_mask) + + # masks loss + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask[i].sum(): + mask_idx = target_gt_idx[i][fg_mask[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += self.single_mask_loss(gt_mask, pred_masks2[i][fg_mask[i]], proto2[i], mxyxy, + marea) # seg loss + + loss[0] += loss0_ + loss[3] += loss3_ + + loss[0] *= 7.5 # box gain + loss[1] *= 2.5 / batch_size + loss[2] *= 0.5 # cls gain + loss[3] *= 1.5 # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') + #loss = sigmoid_focal_loss(pred_mask, gt_mask, alpha = .25, gamma = 2., reduction = 'none') + + #p_m = torch.flatten(pred_mask.softmax(dim = 1)) + #g_m = torch.flatten(gt_mask) + #i_m = torch.sum(torch.mul(p_m, g_m)) + #u_m = torch.sum(torch.add(p_m, g_m)) + #dice_coef = (2. * i_m + 1.) / (u_m + 1.) + #dice_loss = (1. - dice_coef) + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + +class ComputeLossLH0: + # Compute losses + def __init__(self, model, use_dfl=True, overlap=True): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device), reduction='none') + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets + + # Focal loss + g = h["fl_gamma"] # focal loss gamma + if g > 0: + BCEcls = FocalLoss(BCEcls, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.BCEcls = BCEcls + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.no = m.no + self.nm = m.nm + self.overlap = overlap + self.reg_max = m.reg_max + self.device = device + + self.assigner = TaskAlignedAssigner(topk=int(os.getenv('YOLOM', 10)), + num_classes=self.nc, + alpha=float(os.getenv('YOLOA', 0.5)), + beta=float(os.getenv('YOLOB', 6.0))) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=use_dfl).to(device) + self.proj = torch.arange(m.reg_max).float().to(device) # / 120.0 + self.use_dfl = use_dfl + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, p, targets, masks, img=None, epoch=0): + loss = torch.zeros(4, device=self.device) # box, cls, dfl + + feats_, pred_masks_, proto_ = p if len(p) == 3 else p[1] + + feats, pred_masks, proto = feats_[0], pred_masks_[0], proto_[0] + feats2, pred_masks2, proto2 = feats_[1], pred_masks_[1], proto_[1] + + batch_size, _, mask_h, mask_w = proto.shape + + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + pred_masks = pred_masks.permute(0, 2, 1).contiguous() + + pred_distri2, pred_scores2 = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats2], 2).split( + (self.reg_max * 4, self.nc), 1) + pred_scores2 = pred_scores2.permute(0, 2, 1).contiguous() + pred_distri2 = pred_distri2.permute(0, 2, 1).contiguous() + pred_masks2 = pred_masks2.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size, grid_size = pred_scores.shape[:2] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + try: + batch_idx = targets[:, 0].view(-1, 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + except RuntimeError as e: + raise TypeError('ERROR.') from e + + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + pred_bboxes2 = self.bbox_decode(anchor_points, pred_distri2) # xyxy, (b, h*w, 4) + + target_labels, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( + pred_scores2.detach().sigmoid(), + (pred_bboxes2.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, + gt_labels, + gt_bboxes, + mask_gt) + + target_scores_sum = target_scores.sum() + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[2] = self.BCEcls(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + loss[2] *= 0.25 + loss[2] += self.BCEcls(pred_scores2, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[3], _ = self.bbox_loss(pred_distri, + pred_bboxes, + anchor_points, + target_bboxes / stride_tensor, + target_scores, + target_scores_sum, + fg_mask) + + # masks loss + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask[i].sum(): + mask_idx = target_gt_idx[i][fg_mask[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, + marea) # seg loss + + loss[0] *= 0.25 + loss[3] *= 0.25 + loss[1] *= 0.25 + + # bbox loss + if fg_mask.sum(): + loss0_, loss3_, _ = self.bbox_loss(pred_distri2, + pred_bboxes2, + anchor_points, + target_bboxes / stride_tensor, + target_scores, + target_scores_sum, + fg_mask) + + # masks loss + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask[i].sum(): + mask_idx = target_gt_idx[i][fg_mask[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += 0. * self.single_mask_loss(gt_mask, pred_masks2[i][fg_mask[i]], proto2[i], mxyxy, + marea) # seg loss + + loss[0] += loss0_ + loss[3] += loss3_ + + loss[0] *= 7.5 # box gain + loss[1] *= 2.5 / batch_size + loss[2] *= 0.5 # cls gain + loss[3] *= 1.5 # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') + #loss = sigmoid_focal_loss(pred_mask, gt_mask, alpha = .25, gamma = 2., reduction = 'none') + + #p_m = torch.flatten(pred_mask.softmax(dim = 1)) + #g_m = torch.flatten(gt_mask) + #i_m = torch.sum(torch.mul(p_m, g_m)) + #u_m = torch.sum(torch.add(p_m, g_m)) + #dice_coef = (2. * i_m + 1.) / (u_m + 1.) + #dice_loss = (1. - dice_coef) + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..e6e5a0ad37ef3dad84cb2a247271efcaee752f11 --- /dev/null +++ b/utils/segment/metrics.py @@ -0,0 +1,205 @@ +import numpy as np + +from ..metrics import ap_per_class + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] + return (x[:, :8] * w).sum(1) + + +def ap_per_class_box_and_mask( + tp_m, + tp_b, + conf, + pred_cls, + target_cls, + plot=False, + save_dir=".", + names=(), +): + """ + Args: + tp_b: tp of boxes. + tp_m: tp of masks. + other arguments see `func: ap_per_class`. + """ + results_boxes = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Box")[2:] + results_masks = ap_per_class(tp_m, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Mask")[2:] + + results = { + "boxes": { + "p": results_boxes[0], + "r": results_boxes[1], + "ap": results_boxes[3], + "f1": results_boxes[2], + "ap_class": results_boxes[4]}, + "masks": { + "p": results_masks[0], + "r": results_masks[1], + "ap": results_masks[3], + "f1": results_masks[2], + "ap_class": results_masks[4]}} + return results + + +class Metric: + + def __init__(self) -> None: + self.p = [] # (nc, ) + self.r = [] # (nc, ) + self.f1 = [] # (nc, ) + self.all_ap = [] # (nc, 10) + self.ap_class_index = [] # (nc, ) + + @property + def ap50(self): + """AP@0.5 of all classes. + Return: + (nc, ) or []. + """ + return self.all_ap[:, 0] if len(self.all_ap) else [] + + @property + def ap(self): + """AP@0.5:0.95 + Return: + (nc, ) or []. + """ + return self.all_ap.mean(1) if len(self.all_ap) else [] + + @property + def mp(self): + """mean precision of all classes. + Return: + float. + """ + return self.p.mean() if len(self.p) else 0.0 + + @property + def mr(self): + """mean recall of all classes. + Return: + float. + """ + return self.r.mean() if len(self.r) else 0.0 + + @property + def map50(self): + """Mean AP@0.5 of all classes. + Return: + float. + """ + return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 + + @property + def map(self): + """Mean AP@0.5:0.95 of all classes. + Return: + float. + """ + return self.all_ap.mean() if len(self.all_ap) else 0.0 + + def mean_results(self): + """Mean of results, return mp, mr, map50, map""" + return (self.mp, self.mr, self.map50, self.map) + + def class_result(self, i): + """class-aware result, return p[i], r[i], ap50[i], ap[i]""" + return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) + + def get_maps(self, nc): + maps = np.zeros(nc) + self.map + for i, c in enumerate(self.ap_class_index): + maps[c] = self.ap[i] + return maps + + def update(self, results): + """ + Args: + results: tuple(p, r, ap, f1, ap_class) + """ + p, r, all_ap, f1, ap_class_index = results + self.p = p + self.r = r + self.all_ap = all_ap + self.f1 = f1 + self.ap_class_index = ap_class_index + + +class Metrics: + """Metric for boxes and masks.""" + + def __init__(self) -> None: + self.metric_box = Metric() + self.metric_mask = Metric() + + def update(self, results): + """ + Args: + results: Dict{'boxes': Dict{}, 'masks': Dict{}} + """ + self.metric_box.update(list(results["boxes"].values())) + self.metric_mask.update(list(results["masks"].values())) + + def mean_results(self): + return self.metric_box.mean_results() + self.metric_mask.mean_results() + + def class_result(self, i): + return self.metric_box.class_result(i) + self.metric_mask.class_result(i) + + def get_maps(self, nc): + return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) + + @property + def ap_class_index(self): + # boxes and masks have the same ap_class_index + return self.metric_box.ap_class_index + + +KEYS = [ + "train/box_loss", + "train/seg_loss", # train loss + "train/obj_loss", + "train/cls_loss", + "metrics/precision(B)", + "metrics/recall(B)", + "metrics/mAP_0.5(B)", + "metrics/mAP_0.5:0.95(B)", # metrics + "metrics/precision(M)", + "metrics/recall(M)", + "metrics/mAP_0.5(M)", + "metrics/mAP_0.5:0.95(M)", # metrics + "val/box_loss", + "val/seg_loss", # val loss + "val/obj_loss", + "val/cls_loss", + "x/lr0", + "x/lr1", + "x/lr2",] + +BEST_KEYS = [ + "best/epoch", + "best/precision(B)", + "best/recall(B)", + "best/mAP_0.5(B)", + "best/mAP_0.5:0.95(B)", + "best/precision(M)", + "best/recall(M)", + "best/mAP_0.5(M)", + "best/mAP_0.5:0.95(M)",] diff --git a/utils/segment/plots.py b/utils/segment/plots.py new file mode 100644 index 0000000000000000000000000000000000000000..9b90900b3772fe23dbd57deb64221f98e563b069 --- /dev/null +++ b/utils/segment/plots.py @@ -0,0 +1,143 @@ +import contextlib +import math +from pathlib import Path + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch + +from .. import threaded +from ..general import xywh2xyxy +from ..plots import Annotator, colors + + +@threaded +def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if isinstance(masks, torch.Tensor): + masks = masks.cpu().numpy().astype(int) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + idx = targets[:, 0] == i + ti = targets[idx] # image targets + + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + + # Plot masks + if len(masks): + if masks.max() > 1.0: # mean that masks are overlap + image_masks = masks[[i]] # (1, 640, 640) + nl = len(ti) + index = np.arange(nl).reshape(nl, 1, 1) + 1 + image_masks = np.repeat(image_masks, nl, axis=0) + image_masks = np.where(image_masks == index, 1.0, 0.0) + else: + image_masks = masks[idx] + + im = np.asarray(annotator.im).copy() + for j, box in enumerate(boxes.T.tolist()): + if labels or conf[j] > 0.25: # 0.25 conf thresh + color = colors(classes[j]) + mh, mw = image_masks[j].shape + if mh != h or mw != w: + mask = image_masks[j].astype(np.uint8) + mask = cv2.resize(mask, (w, h)) + mask = mask.astype(bool) + else: + mask = image_masks[j].astype(bool) + with contextlib.suppress(Exception): + im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + annotator.fromarray(im) + annotator.im.save(fname) # save + + +def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + for f in files: + try: + data = pd.read_csv(f) + index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + + 0.1 * data.values[:, 11]) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + if best: + # best + ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + else: + # last + ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print(f"Warning: Plotting error for {f}: {e}") + ax[1].legend() + fig.savefig(save_dir / "results.png", dpi=200) + plt.close() diff --git a/utils/segment/tal/__init__.py b/utils/segment/tal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..84952a8167bc2975913a6def6b4f027d566552a9 --- /dev/null +++ b/utils/segment/tal/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/utils/segment/tal/anchor_generator.py b/utils/segment/tal/anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..0de163651e21225445097f90e05a6c6d8ff10092 --- /dev/null +++ b/utils/segment/tal/anchor_generator.py @@ -0,0 +1,38 @@ +import torch + +from utils.general import check_version + +TORCH_1_10 = check_version(torch.__version__, '1.10.0') + + +def make_anchors(feats, strides, grid_cell_offset=0.5): + """Generate anchors from features.""" + anchor_points, stride_tensor = [], [] + assert feats is not None + dtype, device = feats[0].dtype, feats[0].device + for i, stride in enumerate(strides): + _, _, h, w = feats[i].shape + sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x + sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y + sy, sx = torch.meshgrid(sy, sx, indexing='ij') if TORCH_1_10 else torch.meshgrid(sy, sx) + anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2)) + stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device)) + return torch.cat(anchor_points), torch.cat(stride_tensor) + + +def dist2bbox(distance, anchor_points, xywh=True, dim=-1): + """Transform distance(ltrb) to box(xywh or xyxy).""" + lt, rb = torch.split(distance, 2, dim) + x1y1 = anchor_points - lt + x2y2 = anchor_points + rb + if xywh: + c_xy = (x1y1 + x2y2) / 2 + wh = x2y2 - x1y1 + return torch.cat((c_xy, wh), dim) # xywh bbox + return torch.cat((x1y1, x2y2), dim) # xyxy bbox + + +def bbox2dist(anchor_points, bbox, reg_max): + """Transform bbox(xyxy) to dist(ltrb).""" + x1y1, x2y2 = torch.split(bbox, 2, -1) + return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp(0, reg_max - 0.01) # dist (lt, rb) diff --git a/utils/segment/tal/assigner.py b/utils/segment/tal/assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..598b3575f83a0ec45449910bc2c5fde18dbaa054 --- /dev/null +++ b/utils/segment/tal/assigner.py @@ -0,0 +1,180 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from utils.metrics import bbox_iou + + +def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9): + """select the positive anchor center in gt + + Args: + xy_centers (Tensor): shape(h*w, 4) + gt_bboxes (Tensor): shape(b, n_boxes, 4) + Return: + (Tensor): shape(b, n_boxes, h*w) + """ + n_anchors = xy_centers.shape[0] + bs, n_boxes, _ = gt_bboxes.shape + lt, rb = gt_bboxes.view(-1, 1, 4).chunk(2, 2) # left-top, right-bottom + bbox_deltas = torch.cat((xy_centers[None] - lt, rb - xy_centers[None]), dim=2).view(bs, n_boxes, n_anchors, -1) + # return (bbox_deltas.min(3)[0] > eps).to(gt_bboxes.dtype) + return bbox_deltas.amin(3).gt_(eps) + + +def select_highest_overlaps(mask_pos, overlaps, n_max_boxes): + """if an anchor box is assigned to multiple gts, + the one with the highest iou will be selected. + + Args: + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + overlaps (Tensor): shape(b, n_max_boxes, h*w) + Return: + target_gt_idx (Tensor): shape(b, h*w) + fg_mask (Tensor): shape(b, h*w) + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + """ + # (b, n_max_boxes, h*w) -> (b, h*w) + fg_mask = mask_pos.sum(-2) + if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes + mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1]) # (b, n_max_boxes, h*w) + max_overlaps_idx = overlaps.argmax(1) # (b, h*w) + is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes) # (b, h*w, n_max_boxes) + is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype) # (b, n_max_boxes, h*w) + mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos) # (b, n_max_boxes, h*w) + fg_mask = mask_pos.sum(-2) + # find each grid serve which gt(index) + target_gt_idx = mask_pos.argmax(-2) # (b, h*w) + return target_gt_idx, fg_mask, mask_pos + + +class TaskAlignedAssigner(nn.Module): + def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1e-9): + super().__init__() + self.topk = topk + self.num_classes = num_classes + self.bg_idx = num_classes + self.alpha = alpha + self.beta = beta + self.eps = eps + + @torch.no_grad() + def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt): + """This code referenced to + https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py + + Args: + pd_scores (Tensor): shape(bs, num_total_anchors, num_classes) + pd_bboxes (Tensor): shape(bs, num_total_anchors, 4) + anc_points (Tensor): shape(num_total_anchors, 2) + gt_labels (Tensor): shape(bs, n_max_boxes, 1) + gt_bboxes (Tensor): shape(bs, n_max_boxes, 4) + mask_gt (Tensor): shape(bs, n_max_boxes, 1) + Returns: + target_labels (Tensor): shape(bs, num_total_anchors) + target_bboxes (Tensor): shape(bs, num_total_anchors, 4) + target_scores (Tensor): shape(bs, num_total_anchors, num_classes) + fg_mask (Tensor): shape(bs, num_total_anchors) + """ + self.bs = pd_scores.size(0) + self.n_max_boxes = gt_bboxes.size(1) + + if self.n_max_boxes == 0: + device = gt_bboxes.device + return (torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), + torch.zeros_like(pd_bboxes).to(device), + torch.zeros_like(pd_scores).to(device), + torch.zeros_like(pd_scores[..., 0]).to(device), + torch.zeros_like(pd_scores[..., 0]).to(device)) + + mask_pos, align_metric, overlaps = self.get_pos_mask(pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, + mask_gt) + + target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) + + # assigned target + target_labels, target_bboxes, target_scores = self.get_targets(gt_labels, gt_bboxes, target_gt_idx, fg_mask) + + # normalize + align_metric *= mask_pos + pos_align_metrics = align_metric.amax(axis=-1, keepdim=True) # b, max_num_obj + pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True) # b, max_num_obj + norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1) + target_scores = target_scores * norm_align_metric + + return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx + + def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt): + + # get anchor_align metric, (b, max_num_obj, h*w) + align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes) + # get in_gts mask, (b, max_num_obj, h*w) + mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes) + # get topk_metric mask, (b, max_num_obj, h*w) + mask_topk = self.select_topk_candidates(align_metric * mask_in_gts, + topk_mask=mask_gt.repeat([1, 1, self.topk]).bool()) + # merge all mask to a final mask, (b, max_num_obj, h*w) + mask_pos = mask_topk * mask_in_gts * mask_gt + + return mask_pos, align_metric, overlaps + + def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes): + + gt_labels = gt_labels.to(torch.long) # b, max_num_obj, 1 + ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj + ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes) # b, max_num_obj + ind[1] = gt_labels.squeeze(-1) # b, max_num_obj + # get the scores of each grid for each gt cls + bbox_scores = pd_scores[ind[0], :, ind[1]] # b, max_num_obj, h*w + + overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False, CIoU=True).squeeze(3).clamp(0) + align_metric = bbox_scores.pow(self.alpha) * (overlaps).pow(self.beta) + return align_metric, overlaps + + def select_topk_candidates(self, metrics, largest=True, topk_mask=None): + """ + Args: + metrics: (b, max_num_obj, h*w). + topk_mask: (b, max_num_obj, topk) or None + """ + + num_anchors = metrics.shape[-1] # h*w + # (b, max_num_obj, topk) + topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest) + if topk_mask is None: + topk_mask = (topk_metrics.max(-1, keepdim=True) > self.eps).tile([1, 1, self.topk]) + # (b, max_num_obj, topk) + topk_idxs = torch.where(topk_mask, topk_idxs, 0) + # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w) + is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2) + # filter invalid bboxes + # assigned topk should be unique, this is for dealing with empty labels + # since empty labels will generate index `0` through `F.one_hot` + # NOTE: but what if the topk_idxs include `0`? + is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk) + return is_in_topk.to(metrics.dtype) + + def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask): + """ + Args: + gt_labels: (b, max_num_obj, 1) + gt_bboxes: (b, max_num_obj, 4) + target_gt_idx: (b, h*w) + fg_mask: (b, h*w) + """ + + # assigned target labels, (b, 1) + batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None] + target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w) + target_labels = gt_labels.long().flatten()[target_gt_idx] # (b, h*w) + + # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w) + target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx] + + # assigned target scores + target_labels.clamp(0) + target_scores = F.one_hot(target_labels, self.num_classes) # (b, h*w, 80) + fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes) # (b, h*w, 80) + target_scores = torch.where(fg_scores_mask > 0, target_scores, 0) + + return target_labels, target_bboxes, target_scores diff --git a/utils/tal/__init__.py b/utils/tal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..84952a8167bc2975913a6def6b4f027d566552a9 --- /dev/null +++ b/utils/tal/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/utils/tal/__pycache__/__init__.cpython-310.pyc b/utils/tal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9796c817bb992010b163fc9464d86c0e4d1571d Binary files /dev/null and b/utils/tal/__pycache__/__init__.cpython-310.pyc differ diff --git a/utils/tal/__pycache__/__init__.cpython-311.pyc b/utils/tal/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..474e43e5d248b994ee3beea72f0aaec76dc86100 Binary files /dev/null and b/utils/tal/__pycache__/__init__.cpython-311.pyc differ diff --git a/utils/tal/__pycache__/anchor_generator.cpython-310.pyc b/utils/tal/__pycache__/anchor_generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..626585dce75e32eb1aa0de158fa4e97bad8d6cd4 Binary files /dev/null and b/utils/tal/__pycache__/anchor_generator.cpython-310.pyc differ diff --git a/utils/tal/__pycache__/anchor_generator.cpython-311.pyc b/utils/tal/__pycache__/anchor_generator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47047fb5a1bd791ac2e95aa8128458e738c61c9b Binary files /dev/null and b/utils/tal/__pycache__/anchor_generator.cpython-311.pyc differ diff --git a/utils/tal/__pycache__/assigner.cpython-310.pyc b/utils/tal/__pycache__/assigner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6ef1746f9758e0b1752146a7bfcf2c85f68dc49 Binary files /dev/null and b/utils/tal/__pycache__/assigner.cpython-310.pyc differ diff --git a/utils/tal/anchor_generator.py b/utils/tal/anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..0de163651e21225445097f90e05a6c6d8ff10092 --- /dev/null +++ b/utils/tal/anchor_generator.py @@ -0,0 +1,38 @@ +import torch + +from utils.general import check_version + +TORCH_1_10 = check_version(torch.__version__, '1.10.0') + + +def make_anchors(feats, strides, grid_cell_offset=0.5): + """Generate anchors from features.""" + anchor_points, stride_tensor = [], [] + assert feats is not None + dtype, device = feats[0].dtype, feats[0].device + for i, stride in enumerate(strides): + _, _, h, w = feats[i].shape + sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x + sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y + sy, sx = torch.meshgrid(sy, sx, indexing='ij') if TORCH_1_10 else torch.meshgrid(sy, sx) + anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2)) + stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device)) + return torch.cat(anchor_points), torch.cat(stride_tensor) + + +def dist2bbox(distance, anchor_points, xywh=True, dim=-1): + """Transform distance(ltrb) to box(xywh or xyxy).""" + lt, rb = torch.split(distance, 2, dim) + x1y1 = anchor_points - lt + x2y2 = anchor_points + rb + if xywh: + c_xy = (x1y1 + x2y2) / 2 + wh = x2y2 - x1y1 + return torch.cat((c_xy, wh), dim) # xywh bbox + return torch.cat((x1y1, x2y2), dim) # xyxy bbox + + +def bbox2dist(anchor_points, bbox, reg_max): + """Transform bbox(xyxy) to dist(ltrb).""" + x1y1, x2y2 = torch.split(bbox, 2, -1) + return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp(0, reg_max - 0.01) # dist (lt, rb) diff --git a/utils/tal/assigner.py b/utils/tal/assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..ac4bdb8c83837d3bee11ae7996e34f1e87ba3b65 --- /dev/null +++ b/utils/tal/assigner.py @@ -0,0 +1,179 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from utils.metrics import bbox_iou + + +def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9): + """select the positive anchor center in gt + + Args: + xy_centers (Tensor): shape(h*w, 4) + gt_bboxes (Tensor): shape(b, n_boxes, 4) + Return: + (Tensor): shape(b, n_boxes, h*w) + """ + n_anchors = xy_centers.shape[0] + bs, n_boxes, _ = gt_bboxes.shape + lt, rb = gt_bboxes.view(-1, 1, 4).chunk(2, 2) # left-top, right-bottom + bbox_deltas = torch.cat((xy_centers[None] - lt, rb - xy_centers[None]), dim=2).view(bs, n_boxes, n_anchors, -1) + # return (bbox_deltas.min(3)[0] > eps).to(gt_bboxes.dtype) + return bbox_deltas.amin(3).gt_(eps) + + +def select_highest_overlaps(mask_pos, overlaps, n_max_boxes): + """if an anchor box is assigned to multiple gts, + the one with the highest iou will be selected. + + Args: + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + overlaps (Tensor): shape(b, n_max_boxes, h*w) + Return: + target_gt_idx (Tensor): shape(b, h*w) + fg_mask (Tensor): shape(b, h*w) + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + """ + # (b, n_max_boxes, h*w) -> (b, h*w) + fg_mask = mask_pos.sum(-2) + if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes + mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1]) # (b, n_max_boxes, h*w) + max_overlaps_idx = overlaps.argmax(1) # (b, h*w) + is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes) # (b, h*w, n_max_boxes) + is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype) # (b, n_max_boxes, h*w) + mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos) # (b, n_max_boxes, h*w) + fg_mask = mask_pos.sum(-2) + # find each grid serve which gt(index) + target_gt_idx = mask_pos.argmax(-2) # (b, h*w) + return target_gt_idx, fg_mask, mask_pos + + +class TaskAlignedAssigner(nn.Module): + def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1e-9): + super().__init__() + self.topk = topk + self.num_classes = num_classes + self.bg_idx = num_classes + self.alpha = alpha + self.beta = beta + self.eps = eps + + @torch.no_grad() + def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt): + """This code referenced to + https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py + + Args: + pd_scores (Tensor): shape(bs, num_total_anchors, num_classes) + pd_bboxes (Tensor): shape(bs, num_total_anchors, 4) + anc_points (Tensor): shape(num_total_anchors, 2) + gt_labels (Tensor): shape(bs, n_max_boxes, 1) + gt_bboxes (Tensor): shape(bs, n_max_boxes, 4) + mask_gt (Tensor): shape(bs, n_max_boxes, 1) + Returns: + target_labels (Tensor): shape(bs, num_total_anchors) + target_bboxes (Tensor): shape(bs, num_total_anchors, 4) + target_scores (Tensor): shape(bs, num_total_anchors, num_classes) + fg_mask (Tensor): shape(bs, num_total_anchors) + """ + self.bs = pd_scores.size(0) + self.n_max_boxes = gt_bboxes.size(1) + + if self.n_max_boxes == 0: + device = gt_bboxes.device + return (torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), + torch.zeros_like(pd_bboxes).to(device), + torch.zeros_like(pd_scores).to(device), + torch.zeros_like(pd_scores[..., 0]).to(device)) + + mask_pos, align_metric, overlaps = self.get_pos_mask(pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, + mask_gt) + + target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) + + # assigned target + target_labels, target_bboxes, target_scores = self.get_targets(gt_labels, gt_bboxes, target_gt_idx, fg_mask) + + # normalize + align_metric *= mask_pos + pos_align_metrics = align_metric.amax(axis=-1, keepdim=True) # b, max_num_obj + pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True) # b, max_num_obj + norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1) + target_scores = target_scores * norm_align_metric + + return target_labels, target_bboxes, target_scores, fg_mask.bool() + + def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt): + + # get anchor_align metric, (b, max_num_obj, h*w) + align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes) + # get in_gts mask, (b, max_num_obj, h*w) + mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes) + # get topk_metric mask, (b, max_num_obj, h*w) + mask_topk = self.select_topk_candidates(align_metric * mask_in_gts, + topk_mask=mask_gt.repeat([1, 1, self.topk]).bool()) + # merge all mask to a final mask, (b, max_num_obj, h*w) + mask_pos = mask_topk * mask_in_gts * mask_gt + + return mask_pos, align_metric, overlaps + + def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes): + + gt_labels = gt_labels.to(torch.long) # b, max_num_obj, 1 + ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj + ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes) # b, max_num_obj + ind[1] = gt_labels.squeeze(-1) # b, max_num_obj + # get the scores of each grid for each gt cls + bbox_scores = pd_scores[ind[0], :, ind[1]] # b, max_num_obj, h*w + + overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False, CIoU=True).squeeze(3).clamp(0) + align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta) + return align_metric, overlaps + + def select_topk_candidates(self, metrics, largest=True, topk_mask=None): + """ + Args: + metrics: (b, max_num_obj, h*w). + topk_mask: (b, max_num_obj, topk) or None + """ + + num_anchors = metrics.shape[-1] # h*w + # (b, max_num_obj, topk) + topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest) + if topk_mask is None: + topk_mask = (topk_metrics.max(-1, keepdim=True) > self.eps).tile([1, 1, self.topk]) + # (b, max_num_obj, topk) + topk_idxs = torch.where(topk_mask, topk_idxs, 0) + # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w) + is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2) + # filter invalid bboxes + # assigned topk should be unique, this is for dealing with empty labels + # since empty labels will generate index `0` through `F.one_hot` + # NOTE: but what if the topk_idxs include `0`? + is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk) + return is_in_topk.to(metrics.dtype) + + def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask): + """ + Args: + gt_labels: (b, max_num_obj, 1) + gt_bboxes: (b, max_num_obj, 4) + target_gt_idx: (b, h*w) + fg_mask: (b, h*w) + """ + + # assigned target labels, (b, 1) + batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None] + target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w) + target_labels = gt_labels.long().flatten()[target_gt_idx] # (b, h*w) + + # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w) + target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx] + + # assigned target scores + target_labels.clamp(0) + target_scores = F.one_hot(target_labels, self.num_classes) # (b, h*w, 80) + fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes) # (b, h*w, 80) + target_scores = torch.where(fg_scores_mask > 0, target_scores, 0) + + return target_labels, target_bboxes, target_scores diff --git a/utils/torch_utils.py b/utils/torch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1ce2d25251090aef94b4bbccab5ec78795ed98 --- /dev/null +++ b/utils/torch_utils.py @@ -0,0 +1,529 @@ +import math +import os +import platform +import subprocess +import time +import warnings +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.parallel import DistributedDataParallel as DDP + +from utils.general import LOGGER, check_version, colorstr, file_date, git_describe +from utils.lion import Lion + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + +try: + import thop # for FLOPs computation +except ImportError: + thop = None + +# Suppress PyTorch warnings +warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') +warnings.filterwarnings('ignore', category=UserWarning) + + +def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): + # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator + def decorate(fn): + return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) + + return decorate + + +def smartCrossEntropyLoss(label_smoothing=0.0): + # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 + if check_version(torch.__version__, '1.10.0'): + return nn.CrossEntropyLoss(label_smoothing=label_smoothing) + if label_smoothing > 0: + LOGGER.warning(f'WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0') + return nn.CrossEntropyLoss() + + +def smart_DDP(model): + # Model DDP creation with checks + assert not check_version(torch.__version__, '1.12.0', pinned=True), \ + 'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \ + 'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395' + if check_version(torch.__version__, '1.11.0'): + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) + else: + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) + + +def reshape_classifier_output(model, n=1000): + # Update a TorchVision classification model to class count 'n' if required + from models.common import Classify + name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module + if isinstance(m, Classify): # YOLOv5 Classify() head + if m.linear.out_features != n: + m.linear = nn.Linear(m.linear.in_features, n) + elif isinstance(m, nn.Linear): # ResNet, EfficientNet + if m.out_features != n: + setattr(model, name, nn.Linear(m.in_features, n)) + elif isinstance(m, nn.Sequential): + types = [type(x) for x in m] + if nn.Linear in types: + i = types.index(nn.Linear) # nn.Linear index + if m[i].out_features != n: + m[i] = nn.Linear(m[i].in_features, n) + elif nn.Conv2d in types: + i = types.index(nn.Conv2d) # nn.Conv2d index + if m[i].out_channels != n: + m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + # Decorator to make all processes in distributed training wait for each local_master to do something + if local_rank not in [-1, 0]: + dist.barrier(device_ids=[local_rank]) + yield + if local_rank == 0: + dist.barrier(device_ids=[0]) + + +def device_count(): + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Supports Linux and Windows + assert platform.system() in ('Linux', 'Windows'), 'device_count() only supported on Linux or Windows' + try: + cmd = 'nvidia-smi -L | wc -l' if platform.system() == 'Linux' else 'nvidia-smi -L | find /c /v ""' # Windows + return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) + except Exception: + return 0 + + +def select_device(device='', batch_size=0, newline=True): + # device = None or 'cpu' or 0 or '0' or '0,1,2,3' + s = f'YOLO 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' + device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0' + cpu = device == 'cpu' + mps = device == 'mps' # Apple Metal Performance Shaders (MPS) + if cpu or mps: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + + if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch_size > 0: # check batch_size is divisible by device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * (len(s) + 1) + for i, d in enumerate(devices): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB + arg = 'cuda:0' + elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available + s += 'MPS\n' + arg = 'mps' + else: # revert to CPU + s += 'CPU\n' + arg = 'cpu' + + if not newline: + s = s.rstrip() + LOGGER.info(s) + return torch.device(arg) + + +def time_sync(): + # PyTorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(input, ops, n=10, device=None): + """ YOLOv5 speed/memory/FLOPs profiler + Usage: + input = torch.randn(16, 3, 640, 640) + m1 = lambda x: x * torch.sigmoid(x) + m2 = nn.SiLU() + profile(input, [m1, m2], n=100) # profile over 100 iterations + """ + results = [] + if not isinstance(device, torch.device): + device = select_device(device) + print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + except Exception: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception: # no backward method + # print(e) # for debug + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes + p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + print(e) + results.append(None) + torch.cuda.empty_cache() + return results + + +def is_parallel(model): + # Returns True if model is of type DP or DDP + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + LOGGER.info(f'Model pruned to {sparsity(model):.3g} global sparsity') + + +def fuse_conv_and_bn(conv, bn): + # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + dilation=conv.dilation, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # Prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # Prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, imgsz=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPs + p = next(model.parameters()) + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride + im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs + imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float + fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs + except Exception: + fs = '' + + name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' + LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # Scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): + # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay + g = [], [], [] # optimizer parameter groups + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + #for v in model.modules(): + # for p_name, p in v.named_parameters(recurse=0): + # if p_name == 'bias': # bias (no decay) + # g[2].append(p) + # elif p_name == 'weight' and isinstance(v, bn): # weight (no decay) + # g[1].append(p) + # else: + # g[0].append(p) # weight (with decay) + + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) + g[2].append(v.bias) + if isinstance(v, bn): # weight (no decay) + g[1].append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) + g[0].append(v.weight) + + if hasattr(v, 'im'): + if hasattr(v.im, 'implicit'): + g[1].append(v.im.implicit) + else: + for iv in v.im: + g[1].append(iv.implicit) + if hasattr(v, 'ia'): + if hasattr(v.ia, 'implicit'): + g[1].append(v.ia.implicit) + else: + for iv in v.ia: + g[1].append(iv.implicit) + + if hasattr(v, 'im2'): + if hasattr(v.im2, 'implicit'): + g[1].append(v.im2.implicit) + else: + for iv in v.im2: + g[1].append(iv.implicit) + if hasattr(v, 'ia2'): + if hasattr(v.ia2, 'implicit'): + g[1].append(v.ia2.implicit) + else: + for iv in v.ia2: + g[1].append(iv.implicit) + + if hasattr(v, 'im3'): + if hasattr(v.im3, 'implicit'): + g[1].append(v.im3.implicit) + else: + for iv in v.im3: + g[1].append(iv.implicit) + if hasattr(v, 'ia3'): + if hasattr(v.ia3, 'implicit'): + g[1].append(v.ia3.implicit) + else: + for iv in v.ia3: + g[1].append(iv.implicit) + + if hasattr(v, 'im4'): + if hasattr(v.im4, 'implicit'): + g[1].append(v.im4.implicit) + else: + for iv in v.im4: + g[1].append(iv.implicit) + if hasattr(v, 'ia4'): + if hasattr(v.ia4, 'implicit'): + g[1].append(v.ia4.implicit) + else: + for iv in v.ia4: + g[1].append(iv.implicit) + + if hasattr(v, 'im5'): + if hasattr(v.im5, 'implicit'): + g[1].append(v.im5.implicit) + else: + for iv in v.im5: + g[1].append(iv.implicit) + if hasattr(v, 'ia5'): + if hasattr(v.ia5, 'implicit'): + g[1].append(v.ia5.implicit) + else: + for iv in v.ia5: + g[1].append(iv.implicit) + + if hasattr(v, 'im6'): + if hasattr(v.im6, 'implicit'): + g[1].append(v.im6.implicit) + else: + for iv in v.im6: + g[1].append(iv.implicit) + if hasattr(v, 'ia6'): + if hasattr(v.ia6, 'implicit'): + g[1].append(v.ia6.implicit) + else: + for iv in v.ia6: + g[1].append(iv.implicit) + + if hasattr(v, 'im7'): + if hasattr(v.im7, 'implicit'): + g[1].append(v.im7.implicit) + else: + for iv in v.im7: + g[1].append(iv.implicit) + if hasattr(v, 'ia7'): + if hasattr(v.ia7, 'implicit'): + g[1].append(v.ia7.implicit) + else: + for iv in v.ia7: + g[1].append(iv.implicit) + + if name == 'Adam': + optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum + elif name == 'AdamW': + optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0, amsgrad=True) + elif name == 'RMSProp': + optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum) + elif name == 'SGD': + optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) + elif name == 'LION': + optimizer = Lion(g[2], lr=lr, betas=(momentum, 0.99), weight_decay=0.0) + else: + raise NotImplementedError(f'Optimizer {name} not implemented.') + + optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay + optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " + f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias") + return optimizer + + +def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs): + # YOLOv5 torch.hub.load() wrapper with smart error/issue handling + if check_version(torch.__version__, '1.9.1'): + kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors + if check_version(torch.__version__, '1.12.0'): + kwargs['trust_repo'] = True # argument required starting in torch 0.12 + try: + return torch.hub.load(repo, model, **kwargs) + except Exception: + return torch.hub.load(repo, model, force_reload=True, **kwargs) + + +def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True): + # Resume training from a partially trained checkpoint + best_fitness = 0.0 + start_epoch = ckpt['epoch'] + 1 + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) # optimizer + best_fitness = ckpt['best_fitness'] + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA + ema.updates = ckpt['updates'] + if resume: + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.\n' \ + f"Start a new training without --resume, i.e. 'python train.py --weights {weights}'" + LOGGER.info(f'Resuming training from {weights} from epoch {start_epoch} to {epochs} total epochs') + if epochs < start_epoch: + LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") + epochs += ckpt['epoch'] # finetune additional epochs + return best_fitness, start_epoch, epochs + + +class EarlyStopping: + # YOLOv5 simple early stopper + def __init__(self, patience=30): + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch + + def __call__(self, epoch, fitness): + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' + f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' + f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' + f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') + return stop + + +class ModelEMA: + """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + Keeps a moving average of everything in the model state_dict (parameters and buffers) + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + """ + + def __init__(self, model, decay=0.9999, tau=2000, updates=0): + # Create EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + self.updates += 1 + d = self.decay(self.updates) + + msd = de_parallel(model).state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: # true for FP16 and FP32 + v *= d + v += (1 - d) * msd[k].detach() + # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32' + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) diff --git a/utils/triton.py b/utils/triton.py new file mode 100644 index 0000000000000000000000000000000000000000..bf09797cf02335e7bd6da7ac8c451979a6073f5d --- /dev/null +++ b/utils/triton.py @@ -0,0 +1,81 @@ +import typing +from urllib.parse import urlparse + +import torch + + +class TritonRemoteModel: + """ A wrapper over a model served by the Triton Inference Server. It can + be configured to communicate over GRPC or HTTP. It accepts Torch Tensors + as input and returns them as outputs. + """ + + def __init__(self, url: str): + """ + Keyword arguments: + url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 + """ + + parsed_url = urlparse(url) + if parsed_url.scheme == "grpc": + from tritonclient.grpc import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository.models[0].name + self.metadata = self.client.get_model_metadata(self.model_name, as_json=True) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + else: + from tritonclient.http import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository[0]['name'] + self.metadata = self.client.get_model_metadata(self.model_name) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + self._create_input_placeholders_fn = create_input_placeholders + + @property + def runtime(self): + """Returns the model runtime""" + return self.metadata.get("backend", self.metadata.get("platform")) + + def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: + """ Invokes the model. Parameters can be provided via args or kwargs. + args, if provided, are assumed to match the order of inputs of the model. + kwargs are matched with the model input names. + """ + inputs = self._create_inputs(*args, **kwargs) + response = self.client.infer(model_name=self.model_name, inputs=inputs) + result = [] + for output in self.metadata['outputs']: + tensor = torch.as_tensor(response.as_numpy(output['name'])) + result.append(tensor) + return result[0] if len(result) == 1 else result + + def _create_inputs(self, *args, **kwargs): + args_len, kwargs_len = len(args), len(kwargs) + if not args_len and not kwargs_len: + raise RuntimeError("No inputs provided.") + if args_len and kwargs_len: + raise RuntimeError("Cannot specify args and kwargs at the same time") + + placeholders = self._create_input_placeholders_fn() + if args_len: + if args_len != len(placeholders): + raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + for input, value in zip(placeholders, args): + input.set_data_from_numpy(value.cpu().numpy()) + else: + for input in placeholders: + value = kwargs[input.name] + input.set_data_from_numpy(value.cpu().numpy()) + return placeholders diff --git a/val.py b/val.py new file mode 100644 index 0000000000000000000000000000000000000000..496b941f9cdce30a573e3ec96ddb8e1b3c3b0dfe --- /dev/null +++ b/val.py @@ -0,0 +1,389 @@ +import argparse +import json +import os +import sys +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.callbacks import Callbacks +from utils.dataloaders import create_dataloader +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, + check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, + print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, ap_per_class, box_iou +from utils.plots import output_to_target, plot_images, plot_val_study +from utils.torch_utils import select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + +def process_batch(detections, labels, iouv): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + iou = box_iou(labels[:, 1:], detections[:, :4]) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.7, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + min_items=0, # Experimental + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + callbacks=Callbacks(), + compute_loss=None, +): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + min_items=opt.min_items, + prefix=colorstr(f'{task}: '))[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') + tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt = Profile(), Profile(), Profile() # profiling times + loss = torch.zeros(3, device=device) + jdict, stats, ap, ap_class = [], [], [], [] + callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar + for batch_i, (im, targets, paths, shapes) in enumerate(pbar): + callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None) + + # Loss + if compute_loss: + loss += compute_loss(train_out, targets)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det) + + # Metrics + for si, pred in enumerate(preds): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct = process_batch(predn, labelsn, iouv) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary + callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels + plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds) + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) + ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() + nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format + LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + if nt.sum() == 0: + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(ap_class): + LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools') + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + eval = COCOeval(anno, pred, 'bbox') + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + maps = np.zeros(nc) + map + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.7, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--min-items', type=int, default=0, help='Experimental') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + #check_requirements(exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + if opt.save_hybrid: + LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/val_dual.py b/val_dual.py new file mode 100644 index 0000000000000000000000000000000000000000..4f0af05bfca44a7031214f8041365477681dd1d6 --- /dev/null +++ b/val_dual.py @@ -0,0 +1,393 @@ +import argparse +import json +import os +import sys +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.callbacks import Callbacks +from utils.dataloaders import create_dataloader +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, + check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, + print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, ap_per_class, box_iou +from utils.plots import output_to_target, plot_images, plot_val_study +from utils.torch_utils import select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + +def process_batch(detections, labels, iouv): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + iou = box_iou(labels[:, 1:], detections[:, :4]) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.7, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + min_items=0, # Experimental + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + callbacks=Callbacks(), + compute_loss=None, +): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + min_items=opt.min_items, + prefix=colorstr(f'{task}: '))[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') + tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt = Profile(), Profile(), Profile() # profiling times + loss = torch.zeros(3, device=device) + jdict, stats, ap, ap_class = [], [], [], [] + callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar + for batch_i, (im, targets, paths, shapes) in enumerate(pbar): + callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None) + + # Loss + if compute_loss: + preds = preds[1] + #train_out = train_out[1] + #loss += compute_loss(train_out, targets)[1] # box, obj, cls + else: + preds = preds[0][1] + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det) + + # Metrics + for si, pred in enumerate(preds): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct = process_batch(predn, labelsn, iouv) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary + callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels + plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds) + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) + ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() + nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format + LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + if nt.sum() == 0: + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(ap_class): + LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools') + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + eval = COCOeval(anno, pred, 'bbox') + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + maps = np.zeros(nc) + map + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.7, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--min-items', type=int, default=0, help='Experimental') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + #check_requirements(exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + if opt.save_hybrid: + LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/val_triple.py b/val_triple.py new file mode 100644 index 0000000000000000000000000000000000000000..e8997a34ba8e3d542bbbe7bda9b0b3ef44f7e959 --- /dev/null +++ b/val_triple.py @@ -0,0 +1,391 @@ +import argparse +import json +import os +import sys +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLO root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.callbacks import Callbacks +from utils.dataloaders import create_dataloader +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, + check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, + print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, ap_per_class, box_iou +from utils.plots import output_to_target, plot_images, plot_val_study +from utils.torch_utils import select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + +def process_batch(detections, labels, iouv): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + iou = box_iou(labels[:, 1:], detections[:, :4]) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.7, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + min_items=0, # Experimental + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + callbacks=Callbacks(), + compute_loss=None, +): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + min_items=opt.min_items, + prefix=colorstr(f'{task}: '))[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') + tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt = Profile(), Profile(), Profile() # profiling times + loss = torch.zeros(3, device=device) + jdict, stats, ap, ap_class = [], [], [], [] + callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar + for batch_i, (im, targets, paths, shapes) in enumerate(pbar): + callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None) + preds = preds[2] + train_out = train_out[2] + + # Loss + #if compute_loss: + # loss += compute_loss(train_out, targets)[2] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det) + + # Metrics + for si, pred in enumerate(preds): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct = process_batch(predn, labelsn, iouv) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary + callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels + plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds) + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) + ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() + nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format + LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + if nt.sum() == 0: + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(ap_class): + LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools') + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + eval = COCOeval(anno, pred, 'bbox') + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + maps = np.zeros(nc) + map + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.7, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--min-items', type=int, default=0, help='Experimental') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + #check_requirements(exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + if opt.save_hybrid: + LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/yolov9/yolov9/app.py b/yolov9/yolov9/app.py new file mode 100644 index 0000000000000000000000000000000000000000..e34e88d9bdb7be5dbaf15e6a50283ae87499e83d --- /dev/null +++ b/yolov9/yolov9/app.py @@ -0,0 +1,54 @@ +import streamlit as st +import matplotlib.pyplot as plt +import matplotlib.image as mpimg +import subprocess +import os + +def run_detection(uploaded_file): + # Save the uploaded file temporarily + with open("temp_image.jpg", "wb") as f: + f.write(uploaded_file.getbuffer()) + + # Run the detection command + command = [ + "python", "detect_dual.py", + "--source", "temp_image.jpg", + "--img", "640", + "--device", "cpu", + "--weights", "models/detect/yolov9tr.pt", + "--name", "yolov9_c_640_detect", + "--exist" "-ok" + ] + + subprocess.run(command, check=True) + + # Find the output image + output_dir = "runs/detect/yolov9_c_640_detect" + output_image = os.path.join(output_dir, os.path.basename("temp_image.jpg")) + + return output_image + +def main(): + st.title("YOLOv9 Object Detection") + + uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) + + if uploaded_file is not None: + st.image(uploaded_file, caption="Uploaded Image", use_column_width=True) + + if st.button("Run Detection"): + with st.spinner("Running detection..."): + output_image = run_detection(uploaded_file) + + # Display the output image + st.image(output_image, caption="Detection Result", use_column_width=True) + + # Optional: Display the image using matplotlib + fig, ax = plt.subplots() + img = mpimg.imread(output_image) + ax.imshow(img) + ax.axis('off') + st.pyplot(fig) + +if __name__ == "__main__": + main() \ No newline at end of file